content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import factory
from faker import Faker
import random
from .models import Rating
from item.models import Item
from actor.models import Actor
fake = Faker()
class RatingFactory(factory.django.DjangoModelFactory):
class Meta:
model = Rating
|
nilq/baby-python
|
python
|
import pandas as pd
def _exchanges():
# 通过 `股票.exchange = exchanges.exchange`来关联
# 深证信 股票信息 上市地点
return pd.DataFrame({
'exchange': ['深交所主板', '上交所', '深交所中小板', '深交所创业板', '上交所科创板', '深证B股', '上海B股', '指数'],
'canonical_name': ['XSHE', 'XSHG', 'XSHE', 'XSHE', 'XSHG', 'XSHE', 'XSHG', 'XSHG'],
'country_code': ['CN'] * 8
})
|
nilq/baby-python
|
python
|
from django.db import models
from datetime import datetime
from django.db.models.functions import Lower
from guardian.models import UserObjectPermissionBase, GroupObjectPermissionBase
from main.models import User
from main.validators import validate_item_name
class AccountHolder(models.Model):
name = models.CharField(unique=True, max_length=200, validators=[validate_item_name])
def __str__(self):
return self.name
class Meta:
ordering = [Lower('name')]
class Category(models.Model):
name = models.CharField(unique=True, max_length=200, validators=[validate_item_name])
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "categories"
ordering = [Lower('name')]
class BaseBooking(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT, null=True, blank=False)
category = models.ForeignKey(Category, on_delete=models.PROTECT, null=True, blank=False)
account_holder = models.ForeignKey(AccountHolder, on_delete=models.PROTECT, null=True, blank=False)
amount = models.DecimalField(decimal_places=2, max_digits=15)
description = models.CharField(unique=False, null=True, blank=True, max_length=500)
last_update = models.DateTimeField('last update', null=False, blank=False, default=datetime.now)
class Meta:
abstract = True
class Booking(BaseBooking):
parent_identifier = models.CharField(unique=True, null=True, blank=True, max_length=32)
booking_date = models.DateField('booking date', null=False, blank=False)
def __str__(self):
return str(self.booking_date.year) + "-" + str(self.booking_date.month) + "-" + str(
self.booking_date.day) + " : " + str(self.account_holder) + " : " + str(self.amount)
class BookingUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Booking, on_delete=models.CASCADE)
class BookingGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(Booking, on_delete=models.CASCADE)
class PeriodicBooking(BaseBooking):
start_date = models.DateField(null=False, blank=False)
end_date = models.DateField(null=True, blank=True)
interval = models.IntegerField(default=1, null=False, blank=False)
identifier = models.CharField(unique=True, null=True, blank=False, max_length=32)
booking_day_of_month = models.IntegerField('DOM', default=1, null=False, blank=False)
class PeriodicBookingUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(PeriodicBooking, on_delete=models.CASCADE)
class PeriodicBookingGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(PeriodicBooking, on_delete=models.CASCADE)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2020/12/1 09:29
# @Author : ooooo
from typing import *
from bisect import bisect_left, bisect_right
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
if len(nums) == 0 or target > nums[-1] or target < nums[0]:
return [-1, -1]
l, r = bisect_left(nums, target), bisect_right(nums, target)
# print(l, r)
if nums[l] != target:
return [-1, -1]
return [l, r - 1]
if __name__ == '__main__':
s = Solution()
print(s.searchRange([5, 7, 7, 8, 9, 10], 8))
print(s.searchRange([5, 7, 7, 8, 8, 10], 8))
print(s.searchRange([5, 7, 7, 8, 8, 10], 6))
|
nilq/baby-python
|
python
|
"""
from marshmallow import Schema, EXCLUDE
from marshmallow.fields import Str
from marshmallow.validate import Length
class ProfileSchema(Schema):
username = Str(required=True, validate=[Length(min=1, max=16)])
full_name = Str(required=True)
personal_address = Str(required=True)
profession = Str(required=True)
institution = Str(required=True)
institution_address = Str(required=True)
class Meta:
unknown = EXCLUDE
"""
|
nilq/baby-python
|
python
|
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# conding=utf-8
import logging
import struct
import networkx as nx
from operator import attrgetter
from ryu import cfg
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER, HANDSHAKE_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import arp
from ryu.lib.packet import lldp
from ryu.lib.packet import ether_types
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link
import network_awareness
import network_monitor
import network_delay_detector
CONF = cfg.CONF
class ShortestForwarding(app_manager.RyuApp):
"""
ShortestForwarding is a Ryu app for forwarding packets in shortest
path.
The shortest path computation is done by module network awareness,
network monitor and network delay detector.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
"network_awareness": network_awareness.NetworkAwareness,
"network_monitor": network_monitor.NetworkMonitor,
"network_delay_detector": network_delay_detector.NetworkDelayDetector}
WEIGHT_MODEL = {'hop': 'weight', 'delay': "delay", "bw": "bw"}
def __init__(self, *args, **kwargs):
super(ShortestForwarding, self).__init__(*args, **kwargs)
self.name = 'shortest_forwarding'
self.awareness = kwargs["network_awareness"]
self.monitor = kwargs["network_monitor"]
self.delay_detector = kwargs["network_delay_detector"]
self.datapaths = {}
self.weight = self.WEIGHT_MODEL[CONF.weight]
self.gid = 0
def set_weight_mode(self, weight):
"""
set weight mode of path calculating.
"""
self.weight = weight
if self.weight == self.WEIGHT_MODEL['hop']:
self.awareness.get_shortest_paths(weight=self.weight)
return True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Collect datapath information.
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def add_flow(self, dp, p, match, actions, idle_timeout=0, hard_timeout=0):
"""
Send a flow entry to datapath.
"""
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=dp, priority=p,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
dp.send_msg(mod)
def send_flow_mod(self, datapath, flow_info, src_port, dst_port, group_id=0):
"""
Build flow entry, and send it to datapath.
"""
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
actions = []
if group_id == 0:
actions.append(parser.OFPActionOutput(dst_port))
else:
actions.append(parser.OFPActionGroup(group_id))
if src_port == 0:
match = parser.OFPMatch(
eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
self.add_flow(datapath, 1, match, actions,
idle_timeout=0, hard_timeout=0)
else:
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
self.add_flow(datapath, 1, match, actions,
idle_timeout=0, hard_timeout=0)
def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
Build packet out object.
"""
actions = []
if dst_port:
actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))
msg_data = None
if buffer_id == datapath.ofproto.OFP_NO_BUFFER:
if data is None:
return None
msg_data = data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=buffer_id,
data=msg_data, in_port=src_port, actions=actions)
return out
def send_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
Send packet out packet to assigned datapath.
"""
out = self._build_packet_out(datapath, buffer_id,
src_port, dst_port, data)
if out:
datapath.send_msg(out)
def get_port(self, dst_ip, access_table):
"""
Get access port if dst host.
access_table: {(sw,port) :(ip, mac)}
"""
k = []
v = []
if access_table:
k = list(access_table.keys())
v = list(access_table.values())
if isinstance(v[0], tuple):
for key in k:
if dst_ip == access_table[key][0]:
dst_port = key[1]
return dst_port
return None
def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):
"""
Get port pair of link, so that controller can install flow entry.
"""
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("dpid:%s->dpid:%s is not in links" % (
src_dpid, dst_dpid))
return None
def flood(self, msg):
"""
Flood ARP packet to the access port
which has no record of host.
"""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
for dpid in self.awareness.access_ports:
for port in self.awareness.access_ports[dpid]:
if (dpid, port) not in self.awareness.access_table.keys():
datapath = self.datapaths[dpid]
out = self._build_packet_out(
datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER, port, msg.data)
datapath.send_msg(out)
self.logger.debug("Flooding msg")
def arp_forwarding(self, msg, src_ip, dst_ip):
""" Send ARP packet to the destination host,
if the dst host record is existed,
else, flow it to the unknow access port.
"""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
result = self.awareness.get_host_location(dst_ip)
if result: # host record in access table.
datapath_dst, out_port = result[0], result[1]
datapath = self.datapaths[datapath_dst]
out = self._build_packet_out(datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER,
out_port, msg.data)
datapath.send_msg(out)
self.logger.debug("Reply ARP to knew host")
else:
self.flood(msg)
def get_path(self, src, dst, weight):
"""
Get shortest path from network awareness module.
"""
shortest_paths = self.awareness.shortest_paths
graph = self.awareness.graph
if weight == self.WEIGHT_MODEL['hop']:
paths = shortest_paths.get(src).get(dst)
#print('get_path:', src, dst, paths)
return paths
elif weight == self.WEIGHT_MODEL['delay']:
# If paths existed, return it, else calculate it and save it.
try:
paths = shortest_paths.get(src).get(dst)
return paths[0]
except:
paths = self.awareness.k_shortest_paths(graph, src, dst,
weight=weight)
shortest_paths.setdefault(src, {})
shortest_paths[src].setdefault(dst, paths)
return paths[0]
elif weight == self.WEIGHT_MODEL['bw']:
# Because all paths will be calculate
# when call self.monitor.get_best_path_by_bw
# So we just need to call it once in a period,
# and then, we can get path directly.
try:
# if path is existed, return it.
path = self.monitor.best_paths.get(src).get(dst)
return path
except:
# else, calculate it, and return.
result = self.monitor.get_best_path_by_bw(graph,
shortest_paths)
paths = result[1]
best_path = paths.get(src).get(dst)
return best_path
def get_sw(self, dpid, in_port, src, dst):
"""
Get pair of source and destination switches.
"""
src_sw = dpid
dst_sw = None
src_location = self.awareness.get_host_location(src)
if in_port in self.awareness.access_ports[dpid]:
if (dpid, in_port) == src_location:
src_sw = src_location[0]
else:
return None
dst_location = self.awareness.get_host_location(dst)
if dst_location:
dst_sw = dst_location[0]
return src_sw, dst_sw
def send_group_mod(self, datapath, group_id_1, out_port_1, out_port_2, watch_port_2=0):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
actions_1 = [ofp_parser.OFPActionOutput(out_port_1)]
watch_port_1 = out_port_1
actions_2 = [ofp_parser.OFPActionOutput(out_port_2)]
if watch_port_2 == 0:
watch_port_2 = out_port_2
else:
watch_port_2 = watch_port_2
buckets = [ofp_parser.OFPBucket(watch_port=watch_port_1, watch_group=0,
actions=actions_1),
ofp_parser.OFPBucket(watch_port=watch_port_2, watch_group=0,
actions=actions_2)]
group_id = group_id_1
req = ofp_parser.OFPGroupMod(datapath, ofp.OFPGC_ADD,
ofp.OFPGT_FF, group_id, buckets)
datapath.send_msg(req)
def install_flow(self, datapaths, link_to_port, cir_list, access_table,
paths, flow_info, buffer_id, data=None):
'''
Install flow entires for roundtrip: go and back.
@parameter: path=[dpid1, dpid2...]
flow_info=(eth_type, src_ip, dst_ip, in_port)
'''
if len(paths) > 1:
path, path_ = paths[0], paths[1]
else:
path = paths[0]
#------ working path install
if path is None or len(path) == 0:
self.logger.info("Path error!")
return
in_port = flow_info[3]
first_dp = datapaths[path[0]]
out_port = first_dp.ofproto.OFPP_LOCAL
back_info = (flow_info[0], flow_info[2], flow_info[1])
###~~~~new~~~~~
b = 0
len_cir = len(cir_list)
len_pa = len(path)
path_cir = []
bp_cir = []
cir_cnt = 0
cir_dir = [] # -1 means anticlockwise
bp_exclue = []
if len(path) <2:
return
print('cir_list:', cir_list)
##------first_dp-----------
port_pair = self.get_port_pair_from_link(link_to_port,
path[0], path[1])
out_port = port_pair[0]
# backward_wildcard
self.send_flow_mod(first_dp, back_info, 0, in_port)
for j in range(len_cir):
if path[0] in cir_list[j] and path[1] in cir_list[j]:
print('first_cir:', cir_list[j])
bp_cir = cir_list[j]
p = bp_cir.index(path[0])
try:
if path[1] == bp_cir[p+1]:
bp = bp_cir[p-1]
cir_dir.append(-1)
else:
bp = bp_cir[p+1]
cir_dir.append(1)
except IndexError:
if path[1] == bp_cir[0]:
bp = bp_cir[p-1]
cir_dir.append(-1)
else:
bp = bp_cir[0]
cir_dir.append(1)
port_pair = self.get_port_pair_from_link(link_to_port,
path[0], bp)
bp_port = port_pair[0]
# forward_ffg
self.send_group_mod(first_dp, self.gid, out_port, bp_port)
self.send_flow_mod(first_dp, flow_info, in_port, out_port, self.gid)
# match return packets
self.send_flow_mod(first_dp, flow_info, out_port, bp_port)
path_cir.append(bp_cir)
#bp_exclue[0].append(path[0])
#bp_exclue[0].append(path[1])
cir_cnt = 1
b = 1
break
# forward_no_bp
if b == 0:
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
b = 0
##------last_dp-----------
last_dp = datapaths[path[-1]]
port_pair = self.get_port_pair_from_link(link_to_port,
path[-2], path[-1])
src_port = port_pair[1]
dst_port = self.get_port(flow_info[2], access_table)
# forkward_wildcard
self.send_flow_mod(last_dp, flow_info, 0, dst_port)
for j in range(len_cir):
if path[-2] in cir_list[j] and path[-1] in cir_list[j]:
bp_cir = cir_list[j]
print('last_cir:', bp_cir)
p = bp_cir.index(path[-1])
for k in range(len(path_cir)):
if path[-2] in path_cir[k] and path[-1] in path_cir[k]:
bp_cir = path_cir[k]
#bp_exclue[cir_cnt].append(path[-2])
#bp_exclue[cir_cnt].append(path[-1])
break
else:
if k == len(path_cir)-1:
path_cir.append(cir_list[j])
bp_cir = cir_list[j]
cir_cnt += 1
#bp_exclue[cir_cnt] = [path[-2], path[-1]]
if path[-2] == bp_cir[p-1]:
cir_dir.append(-1)
else:
cir_dir.append(1)
else:
continue
try:
if path[-2] == bp_cir[p+1]:
bp = bp_cir[p-1]
else:
bp = bp_cir[p+1]
except IndexError:
if path[-2] == bp_cir[0]:
bp = bp_cir[p-1]
else:
bp = bp_cir[0]
port_pair = self.get_port_pair_from_link(link_to_port,
path[-1], bp)
bp_port = port_pair[0]
# backward_ffg
self.send_group_mod(last_dp, self.gid, src_port, bp_port)
self.send_flow_mod(last_dp, back_info, dst_port, src_port, self.gid)
# match return packets
self.send_flow_mod(last_dp, back_info, src_port, bp_port)
b = 1
break
# backward_no_bp
if b == 0:
self.send_flow_mod(last_dp, back_info, dst_port, src_port)
b = 0
##-------inter_dp----------
cir_01 = []
ad = 0
if len_pa > 2:
for i in range(1, len_pa-1):
datapath = datapaths[path[i]]
print('~~~~ path[i]:', path[i])
port_pair = self.get_port_pair_from_link(link_to_port,
path[i-1], path[i])
port_next = self.get_port_pair_from_link(link_to_port,
path[i], path[i+1])
src_port, dst_port = port_pair[1], port_next[0]
for j in range(len_cir):
#p = cir_list[j].index(path[i])
if path[i-1] in cir_list[j] and path[i] in cir_list[j] and path[i+1] not in cir_list[j]:
p = cir_list[j].index(path[i])
f = 0
print('inter_circle_10:', cir_list[j])
try:
if path[i-1] == cir_list[j][p+1]:
bp = cir_list[j][p-1]
else:
bp = cir_list[j][p+1]
except IndexError:
if path[i-1] == cir_list[j][0]:
bp = cir_list[j][p-1]
else:
bp = cir_list[j][0]
bp_port = self.get_port_pair_from_link(link_to_port,
path[i], bp)[0]
for m in range(len_cir):
if path[i] in cir_list[m] and path[i+1] in cir_list[m]:
bp_cir_ = cir_list[m]
print ('bp_cir__101', bp_cir_)
p_ = bp_cir_.index(path[i])
if bp_cir_ in path_cir:
pass
else:
path_cir.append(bp_cir_)
cir_cnt += 1
try:
if path[i+1] == bp_cir_[p_+1]:
cir_dir.append(-1)
else:
cir_dir.append(1)
except IndexError:
if path[i+1] == bp_cir_[0]:
cir_dir.append(-1)
else:
cir_dir.append(1)
if path[i-1] in bp_cir_:
print('inter_circle_1011')
f = 1
# forward_wildcard_ffg
self.send_group_mod(datapath, self.gid, dst_port,
datapath.ofproto.OFPP_IN_PORT, src_port)
self.send_flow_mod(datapath, flow_info, bp_port, dst_port)
self.send_flow_mod(datapath, flow_info, src_port, dst_port,
self.gid)
# match return packets
self.send_flow_mod(datapath, flow_info, dst_port, src_port)
# backward_ffg
self.send_group_mod(datapath, self.gid+1, src_port, bp_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port,
self.gid+1)
datapath_ = datapaths[path[i-1]]
p_ = bp_cir_.index(path[i-1])
try:
if path[i] == bp_cir_[p_+1]:
bp_ = bp_cir_[p_-1]
else:
bp_ = bp_cir_[p_+1]
except IndexError:
if path[i+1] == bp_cir_[0]:
bp_ = bp_cir_[p_-1]
else:
bp_ = bp_cir_[0]
bp_port_ = self.get_port_pair_from_link(link_to_port,
path[i-1], bp_)[0]
self.send_flow_mod(datapath_, flow_info, port_pair[0], bp_port_)
h = 0
for n in range(i):
datapath_ = datapaths[path[n]]
if h == 1:
src_port_ = self.get_port_pair_from_link(link_to_port,
path[n], path[n-1])[0]
dst_port_ = self.get_port_pair_from_link(link_to_port,
path[n], path[n+1])[0]
self.send_flow_mod(datapath_, flow_info, dst_port_, src_port_)
continue
if path[n] in bp_cir_:
p_ = bp_cir_.index(path[n])
try:
if path[n+1] == bp_cir_[p_+1]:
bp_ = bp_cir_[p_-1]
else:
bp_ = bp_cir_[p_+1]
except IndexError:
if path[n+1] == bp_cir_[0]:
bp_ = bp_cir_[p_-1]
else:
bp_ = bp_cir_[0]
bp_port_ = self.get_port_pair_from_link(link_to_port,
path[n], bp_)[0]
dst_port_ = self.get_port_pair_from_link(link_to_port,
path[n], path[n+1])[0]
self.send_flow_mod(datapath_, flow_info, dst_port_, bp_port_)
h = 1
continue
break
else:
print('inter_circle_1010')
f = 1
p_ = bp_cir_.index(path[i])
try:
if path[i+1] == bp_cir_[p_+1]:
bp_ = bp_cir_[p_-1]
else:
bp_ = bp_cir_[p_+1]
except IndexError:
if path[i+1] == bp_cir_[0]:
bp_ = bp_cir_[p_-1]
else:
bp_ = bp_cir_[0]
bp_port_ = self.get_port_pair_from_link(link_to_port,
path[i], bp_)[0]
# forward_wildcard_ffg
self.send_group_mod(datapath, self.gid, dst_port, bp_port_)
self.send_flow_mod(datapath, flow_info, src_port, dst_port,
self.gid)
self.send_flow_mod(datapath, flow_info, bp_port, dst_port,
self.gid)
# match_fir_return
self.send_flow_mod(datapath, back_info, src_port, bp_port)
# match_sec_return
self.send_flow_mod(datapath, flow_info, dst_port, bp_port_)
# backward_ffg
self.send_group_mod(datapath, self.gid+1, src_port, bp_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port,
self.gid+1)
self.send_flow_mod(datapath, back_info, bp_port_, src_port,
self.gid+1)
break
else:
if m == len_cir-1 :
f =1
print('inter_cir_100')
# forward_wildcard
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
self.send_flow_mod(datapath, flow_info, bp_port, dst_port)
# backward_ffg
self.send_group_mod(datapath, self.gid, src_port, bp_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port,
self.gid)
# match return packets
self.send_flow_mod(datapath, back_info, src_port, bp_port)
if f == 1:
break
elif path[i-1] in cir_list[j] and path[i] in cir_list[j] and path[i+1] in cir_list[j]:
print('inter_circle_11:', cir_list[j])
bp_cir_ = cir_list[j]
# forward_ffg
self.send_group_mod(datapath, self.gid, dst_port,
datapath.ofproto.OFPP_IN_PORT, src_port)
self.send_flow_mod(datapath, flow_info, src_port, dst_port,
self.gid)
# match return packets
self.send_flow_mod(datapath, flow_info, dst_port, src_port)
# backward_ffg
self.send_group_mod(datapath, self.gid+1, src_port,
datapath.ofproto.OFPP_IN_PORT, dst_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port,
self.gid+1)
# match return packets
self.send_flow_mod(datapath, back_info, src_port, dst_port)
#datapath_ = datapaths[path[i-1]]
#p_ = bp_cir_.index(path[i-1])
#try:
# if path[i] == bp_cir_[p_+1]:
# bp_ = bp_cir_[p_-1]
# else:
# bp_ = bp_cir_[p_+1]
#except IndexError:
# if path[i+1] == bp_cir_[0]:
# bp_ = bp_cir_[p_-1]
# else:
# bp_ = bp_cir_[0]
#bp_port_ = self.get_port_pair_from_link(link_to_port,
# path[i-1], bp_)[0]
#self.send_flow_mod(datapath_, flow_info, port_pair[0], bp_port_)
break
elif path[i-1] not in cir_list[j] and path[i] in cir_list[j] and path[i+1] in cir_list[j]:
cir_01 = cir_list[j]
if j == len_cir-1:
p = cir_list[j].index(path[i])
print('inter_circle_01:', cir_01)
bp_cir = cir_01
if bp_cir in path_cir:
pass
else:
path_cir.append(bp_cir)
cir_cnt += 1
try:
if path[i+1] == bp_cir[p+1]:
bp = bp_cir[p-1]
cir_dir.append(-1)
else:
bp = bp_cir[p+1]
cir_dir.append(1)
except IndexError:
if path[i+1] == bp_cir[0]:
bp = bp_cir[p-1]
cir_dir.append(-1)
else:
bp = bp_cir[0]
cir_dir.append(1)
bp_port = self.get_port_pair_from_link(link_to_port,
path[i], bp)[0]
print('inter_dp, p, bp,bp_port:', path[i], p, bp, bp_port)
# forward_ffg
self.send_group_mod(datapath, self.gid, dst_port, bp_port)
self.send_flow_mod(datapath, flow_info, src_port, dst_port,
self.gid)
# match return packets
self.send_flow_mod(datapath, flow_info, dst_port, bp_port)
# backward_wildcard
self.send_flow_mod(datapath, back_info, bp_port, src_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port)
break
elif j == len_cir-1:
if len(cir_01) == 0:
print('inter_circle_00')
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port)
else:
print('inter_circle_01:', cir_01)
p = cir_01.index(path[i])
bp_cir = cir_01
if bp_cir in path_cir:
pass
else:
path_cir.append(bp_cir)
cir_cnt += 1
try:
if path[i+1] == bp_cir[p+1]:
bp = bp_cir[p-1]
cir_dir.append(-1)
else:
bp = bp_cir[p+1]
cir_dir.append(1)
except IndexError:
if path[i+1] == bp_cir[0]:
bp = bp_cir[p-1]
cir_dir.append(-1)
else:
bp = bp_cir[0]
cir_dir.append(1)
bp_port = self.get_port_pair_from_link(link_to_port,
path[i], bp)[0]
print('inter_dp, p, bp,bp_port:', path[i], p, bp, bp_port)
# forward_ffg
self.send_group_mod(datapath, self.gid, dst_port, bp_port)
self.send_flow_mod(datapath, flow_info, src_port, dst_port,
self.gid)
# match return packets
self.send_flow_mod(datapath, flow_info, dst_port, bp_port)
# backward_wildcard
self.send_flow_mod(datapath, back_info, bp_port, src_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port)
##--------bp_dp---------------
print('\npath_cir:\n', path_cir)
for j in range(len(path_cir)):
for i in path_cir[j]:
if i in path:
pass
else:
p = path_cir[j].index(i)
print("bp_i, path_cir, p, dir:", i, path_cir[j], p, cir_dir[j] )
#print('i:', i)
try:
port = self.get_port_pair_from_link(link_to_port,
path_cir[j][p-cir_dir[j]], path_cir[j][p])
except IndexError:
port = self.get_port_pair_from_link(link_to_port,
path_cir[j][0], path_cir[j][p])
try:
port_next = self.get_port_pair_from_link(link_to_port,
path_cir[j][p], path_cir[j][p+cir_dir[j]])
except IndexError:
port_next = self.get_port_pair_from_link(link_to_port,
path_cir[j][p], path_cir[j][0])
if port and port_next:
src_port, dst_port = port[1], port_next[0]
datapath = datapaths[path_cir[j][p]]
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port)
self.logger.debug("inter_link of bp flow install")
def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):
"""
To calculate shortest forwarding path and install them into datapaths.
"""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
result = self.get_sw(datapath.id, in_port, ip_src, ip_dst)
if result:
src_sw, dst_sw = result[0], result[1]
if dst_sw:
# Path has already calculated, just get it.
paths = self.get_path(src_sw, dst_sw, weight=self.weight)
print('paths', paths)
path_0 = paths[0]
self.logger.info("[PATH]%s<-->%s: %s" % (ip_src, ip_dst, path_0))
self.logger.info('gid%s' % self.gid)
flow_info = (eth_type, ip_src, ip_dst, in_port)
# install flow entries to datapath along side the path.
self.install_flow(self.datapaths,
self.awareness.link_to_port,
self.awareness.cir_list,
self.awareness.access_table, paths,
flow_info, msg.buffer_id, msg.data)
return
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
'''
In packet_in handler, we need to learn access_table by ARP.
Therefore, the first packet from UNKOWN host MUST be ARP.
'''
msg = ev.msg
datapath = msg.datapath
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
arp_pkt = pkt.get_protocol(arp.arp)
ip_pkt = pkt.get_protocol(ipv4.ipv4)
lldp_pkt = pkt.get_protocol(lldp.lldp)
eth = pkt.get_protocol(ethernet.ethernet)
#if isinstance(lldp_pkt, lldp.lldp):
# print ('^^^ LLDP ^^^^')
if isinstance(arp_pkt, arp.arp):
print('\nARP: packet in switch', datapath.id, 'in_port:', in_port,
'arp_src:', arp_pkt.src_ip, 'arp_dst:', arp_pkt.dst_ip)
self.logger.debug("ARP processing")
self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip)
if isinstance(ip_pkt, ipv4.ipv4):
self.logger.debug("IPV4 processing")
in_port = msg.match['in_port']
if len(pkt.get_protocols(ethernet.ethernet)):
print('\n***** IPv4: packet in switch', datapath.id, 'in_port:', in_port,
'src:', ip_pkt.src, 'dst:', ip_pkt.dst)
self.gid += 2
eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype
self.shortest_forwarding(msg, eth_type, ip_pkt.src, ip_pkt.dst)
@set_ev_cls(ofp_event.EventOFPErrorMsg, [HANDSHAKE_DISPATCHER,
MAIN_DISPATCHER, CONFIG_DISPATCHER])
def _error_msg_handler(self, ev):
msg = ev.msg
dpid = msg.datapath.id
err_type = int(msg.type)
err_code = int(msg.code)
print('error_msg:', dpid,err_type, err_code)
|
nilq/baby-python
|
python
|
import os
import sys
from flake8.api import legacy as flake8
from collectd_haproxy import compat
DIRS_TO_TEST = ("collectd_haproxy", "tests")
MAX_COMPLEXITY = 11
# flake8 does not work on python 2.6 or lower
__test__ = sys.version_info >= (2, 7)
def test_style():
for path in DIRS_TO_TEST:
python_files = list(get_python_files(path))
yield create_style_assert(path, python_files)
def get_python_files(path):
path = os.path.join(os.path.dirname(__file__), "../", path)
for root, dirs, files in os.walk(path):
for filename in files:
if not filename.endswith(".py"):
continue
yield os.path.join(root, filename)
def create_style_assert(path, python_files):
def test_function():
assert_conforms_to_style(python_files)
test_name = "test_style__%s" % path
test_function.__name__ = test_name
test_function.description = test_name
return test_function
def assert_conforms_to_style(python_files):
checker = flake8.get_style_guide(max_complexity=MAX_COMPLEXITY)
checker.options.jobs = 1
checker.options.verbose = True
report = checker.check_files(python_files)
warnings = report.get_statistics("W")
errors = report.get_statistics("E")
assert not (warnings or errors), "\n" + "\n".join([
"Warnings:",
"\n".join(warnings),
"Errors:",
"\n".join(errors),
])
|
nilq/baby-python
|
python
|
from causalml.propensity import ElasticNetPropensityModel
from causalml.metrics import roc_auc_score
from .const import RANDOM_SEED
def test_elasticnet_propensity_model(generate_regression_data):
y, X, treatment, tau, b, e = generate_regression_data()
pm = ElasticNetPropensityModel(random_state=RANDOM_SEED)
ps = pm.fit_predict(X, treatment)
assert roc_auc_score(treatment, ps) > .5
|
nilq/baby-python
|
python
|
# --------------
import numpy as np
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print(data.shape)
census=np.concatenate((data, new_record),axis = 0)
print(census.shape)
# --------------
#Code starts here
import numpy as np
age=census[:,0]
print(age)
max_age = np.max(age)
print(max_age)
min_age = np.min(age)
print(min_age)
age_mean = np.mean(age)
print(age_mean)
age_std = np.std(age)
print(age_std)
# --------------
#Code starts here
import numpy as np
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
print('Race_0: ', len_0)
print('Race_1: ', len_1)
print('Race_2: ', len_2)
print('Race_3: ', len_3)
print('Race_4: ', len_4)
race_list=[len_0, len_1,len_2, len_3, len_4]
minority_race=race_list.index(min(race_list))
# --------------
#Code starts here
senior_citizens = census[census[:, 0 ]> 60]
print(senior_citizens)
working_hours_sum =senior_citizens.sum(axis=0)[6]
print(working_hours_sum)
senior_citizens_len=len(senior_citizens)
print(senior_citizens_len)
avg_working_hours=working_hours_sum/senior_citizens_len
print((avg_working_hours))
# --------------
#Code starts here
high = census[census[:,1] >10]
low = census[census[:,1] <=10]
avg_pay_high=high[:,7].mean()
print(avg_pay_high)
avg_pay_low=low[:,7].mean()
print(avg_pay_low)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import logging
from typing import List
import eduid_msg
from eduid_common.api.exceptions import MsgTaskFailed
from eduid_common.config.base import MsgConfigMixin
__author__ = 'lundberg'
logger = logging.getLogger(__name__)
TEMPLATES_RELATION = {
'mobile-validator': 'mobile-confirm',
'mobile-reset-password': 'mobile-reset-password',
'nin-validator': 'nin-confirm',
'nin-reset-password': 'nin-reset-password',
}
LANGUAGE_MAPPING = {
'en': 'en_US',
'sv': 'sv_SE',
}
class MsgRelay(object):
def __init__(self, config: MsgConfigMixin):
self.conf = config
eduid_msg.init_app(config.celery)
# these have to be imported _after_ eduid_msg.init_app()
from eduid_msg.tasks import get_postal_address, get_relations_to, pong, send_message, sendsms
self._get_postal_address = get_postal_address
self._get_relations_to = get_relations_to
self._send_message = send_message
self._send_sms = sendsms
self._pong = pong
@staticmethod
def get_language(lang: str) -> str:
return LANGUAGE_MAPPING.get(lang, 'en_US')
def get_postal_address(self, nin: str, timeout: int = 25) -> dict:
"""
:param nin: Swedish national identity number
:param timeout: Max wait time for task to finish
:return: Official name and postal address
The expected address format is:
OrderedDict([
(u'Name', OrderedDict([
(u'GivenNameMarking', u'20'),
(u'GivenName', u'personal name'),
(u'SurName', u'thesurname')
])),
(u'OfficialAddress', OrderedDict([
(u'Address2', u'StreetName 103'),
(u'PostalCode', u'74141'),
(u'City', u'STOCKHOLM')
]))
])
"""
rtask = self._get_postal_address.apply_async(args=[nin])
try:
ret = rtask.get(timeout=timeout)
if ret is not None:
return ret
raise MsgTaskFailed('No postal address returned from Navet')
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'get_postal_address task failed: {e}')
def get_relations_to(self, nin: str, relative_nin: str, timeout: int = 25) -> List[str]:
"""
Get a list of the NAVET 'Relations' type codes between a NIN and a relatives NIN.
Known codes:
M = spouse (make/maka)
B = child (barn)
FA = father
MO = mother
VF = some kind of legal guardian status. Children typically have ['B', 'VF'] it seems.
:param nin: Swedish National Identity Number
:param relative_nin: Another Swedish National Identity Number
:param timeout: Max wait time for task to finish
:return: List of codes. Empty list if the NINs are not related.
"""
rtask = self._get_relations_to.apply_async(args=[nin, relative_nin])
try:
ret = rtask.get(timeout=timeout)
if ret is not None:
return ret
raise MsgTaskFailed('No postal address returned from Navet')
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'get_relations_to task failed: {e}')
def sendsms(self, recipient: str, message: str, reference: str, timeout: int = 25) -> None:
"""
:param recipient: the recipient of the sms
:param message: message as a string (160 chars per sms)
:param reference: Audit reference to help cross reference audit log and events
:param timeout: Max wait time for task to finish
"""
logger.info(f'Trying to send SMS with reference: {reference}')
logger.debug(f'Recipient: {recipient}. Message: {message}')
rtask = self._send_sms.apply_async(args=[recipient, message, reference])
try:
res = rtask.get(timeout=timeout)
logger.info(f'SMS with reference {reference} sent. Task result: {res}')
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'sendsms task failed: {repr(e)}')
def ping(self, timeout: int = 1) -> str:
rtask = self._pong.apply_async()
try:
return rtask.get(timeout=timeout)
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'ping task failed: {repr(e)}')
|
nilq/baby-python
|
python
|
from __future__ import division
import numpy as np
import numpy.ma as ma
cimport numpy as np
from libc.stdint cimport int32_t
cimport cython
from libc.stdio cimport printf
@cython.embedsignature(True)
@cython.cdivision(True)
@cython.wraparound(False)
@cython.boundscheck(False)
def get_fit(object theta, object height):
"""
fits 3 lines to a vertical theta profile
parameters
----------
theta, height: numpy 1d array of floating point numbers
returns:
--------
fitvals: numpy 1d array of floating point numbers
RSS: numpy 2d array of floating point numbers
j, k: integers
example
-------
"""
theta=np.ascontiguousarray(theta)
theta=theta.astype(np.float64)
cdef double* thetaPtr= <double*> np.PyArray_DATA(theta)
height=np.ascontiguousarray(height)
height=height.astype(np.float64)
cdef double* heightPtr= <double*> np.PyArray_DATA(height)
cdef np.float64_t[:] fitvals=np.empty([theta.size],dtype=np.float64)
cdef np.float64_t[:,:] RSS=np.empty([290, 290],dtype=np.float64)
cdef int i, j, k, J, K
#cdef double num_b_11, num_b_12, num_b_13, dem_b_11, dem_b_12
#cdef double num_b_21, num_b_22, dem_b_21, dem_b_22, num_a_21, num_a_22
#cdef double num_b_31, num_b_32, dem_b_31, dem_b_32, num_a_31, num_a_32
#cdef double b_1, a_1, b_2, a_2, b_3, a_3, num_b, dem_b, num_b2, dem_b2, num_b_3, dem_b_3
#def get_fit(theta, height):
# """
# Fitting the local theta profile with three lines
#
# """
#RSS = np.empty((290, 290))+ np.nan
#print RSS[0,0]
for j in range(290):
if j > 2:
for k in range(290):
if k>j+1 and k<289:
b_1 = (np.sum(np.multiply(height[:j], theta[:j])) - 1/j*np.sum(height[:j])*np.sum(theta[:j]))/(np.sum(height[:j]**2) - 1/j*np.sum(height[:j])**2)
a_1 = np.sum(np.multiply(height[:j], theta[:j]))/np.sum(height[:j]) - b_1*np.sum(height[:j]**2)/np.sum(height[:j])
b_2 = (np.sum(theta[j:k]) - (k-j)*(a_1+b_1*height[j]))/(np.sum(height[j:k]) - (k-j)*height[j])
a_2 = np.sum(np.multiply(height[j:k], theta[j:k]))/np.sum(height[j:k]) - b_2*np.sum(height[j:k]**2)/np.sum(height[j:k])
b_3 = (np.sum(theta[k:290]) - (290-k)*(a_2+b_2*height[k]))/(np.sum(height[k:290]) - (290-k)*height[k])
a_3 = np.sum(np.multiply(height[k:290], theta[k:290]))/np.sum(height[k:290]) - b_3*np.sum(height[k:290]**2)/np.sum(height[k:290])
RSS[j, k] = np.sum(np.add(theta[2:j], -(a_1+ b_1*height[2:j]))**2) + np.sum(np.add(theta[j:k], -(a_2+ b_2*height[j:k]))**2) + np.sum(np.add(theta[k:290], -(a_3+ b_3*height[k:290]))**2)
if j==3 and k==5:
RSS_min = RSS[j, k]
if RSS[j, k]<RSS_min:
RSS_min = RSS[j, k]
J, K = j, k
return RSS, J, K
|
nilq/baby-python
|
python
|
from market import application
from flask import render_template, redirect, url_for, flash, request
from market.models import Item, User
from market.forms import RegisterForm, LoginForm, PurchaseItemForm, SellItemForm
from market import db #we can directly import from market becasue db is located in the dunder init file
from flask_login import login_user, logout_user, login_required, current_user
@application.route("/")
@application.route('/home')
def home_page():
return render_template('home.html')
@application.route('/about/<username>')
def about_page(username):
return f'<h1>This is the about page of {username}..</h1>'
@application.route('/market', methods=['GET','POST'])
@login_required
def market_page():
purchase_form=PurchaseItemForm()
selling_form=SellItemForm()
if request.method=='POST': #to avoid form resubmission warning when get&post is present
#purchased item logic
purchased_item=request.form.get('purchased_item')
p_item_object=Item.query.filter_by(name=purchased_item).first() #filtering the item object based on name of purchased item
if p_item_object:
if current_user.can_purchase(p_item_object):
p_item_object.assign_ownership(current_user)
flash(f'Cogratulaitions! You purchased {p_item_object.name} for ₹{p_item_object.price}', category='success')
else:
flash(f"Unfortunately, you don't have enough money to purchase {p_item_object.name}", category='danger')
#sell item logic
sold_item=request.form.get('sold_item')
s_item_object=Item.query.filter_by(name=sold_item).first()
if s_item_object:
if current_user.can_sell(s_item_object):
s_item_object.sell(current_user)
flash(f'Cogratulaitions! You sold {s_item_object.name} for ₹{s_item_object.price}!', category='success')
else:
flash(f"Unfortunately, something went wrong with selling {s_item_object.name}", category='danger')
return redirect(url_for('market_page'))
if request.method=='GET':
items = Item.query.filter_by(owner=None) #display in available items only if there is no owner
owned_items=Item.query.filter_by(owner=current_user.id)
return render_template('market.html', items=items, purchase_form=purchase_form, owned_items= owned_items, selling_form=selling_form)
@application.route('/register', methods=['GET','POST'])
def register_page():
form=RegisterForm()
if form.validate_on_submit(): #checks if the validation conditions are met when user clicks submit button
user_to_create=User(username=form.username.data,
email_address=form.email_address.data,
password=form.password1.data)
db.session.add(user_to_create)
db.session.commit()
login_user(user_to_create)
flash(f'Account created successfully. You are logged in now as {user_to_create.username}', category='success')
return redirect(url_for('market_page'))
if form.errors != {}: #if there are errors
for err_msg in form.errors.values():
flash(f'There was an error with creating a user: {err_msg}', category='danger')
return render_template('register.html',form=form)
@application.route('/login', methods=['GET','POST'])
def login_page():
form=LoginForm()
if form.validate_on_submit():
attempted_user=User.query.filter_by(username=form.username.data).first()
if attempted_user and attempted_user.check_password_correction(
attempted_password=form.password.data):
login_user(attempted_user)
flash(f'Success! You are logged in as: {attempted_user.username}', category='success')
return redirect(url_for('market_page'))
else:
flash('Username and password are not match! Please try again!', category='danger')
return render_template('login.html',form=form)
@application.route('/logout')
def logout_page():
logout_user()
flash('You have been logged out!', category='info')
return redirect(url_for('home_page'))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
File: zenodo_api_access.py
Created Date: September 22nd 2019
Author: ZL Deng <dawnmsg(at)gmail.com>
---------------------------------------
Last Modified: 22nd September 2019 7:45:14 pm
'''
import requests
import json
import click
from os import path
@click.command()
@click.argument("token", type=str)
# @click.option("-t", "--type",
# required=True,
# type=click.Choice(["dataset", "software", "publication"]),
# help="The type of the data to uploade")
@click.argument("metadata", type=click.Path(exists=True))
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option("-s", "--sandbox", is_flag=True,
help="Test in sandbox for uploading")
def upload(token, metadata, files, sandbox):
global BASE_URL
BASE_URL = "https://sandbox.zenodo.org" if sandbox else "https://zenodo.org"
global ACCESS_TOKEN
ACCESS_TOKEN = token
deposit_id = get_deposit_id(metadata)
for file in files:
filename = path.basename(file)
upload_data = {'filename': filename}
upload_file = {'file': open(file, 'rb')}
r = requests.post("{}/api/deposit/depositions/{}/files".format(BASE_URL, deposit_id),
params={
'access_token': ACCESS_TOKEN},
data=upload_data,
files=upload_file)
print("Uploading {}".format(filename))
if r.status_code >= 400:
raise RuntimeError("Error occurred while uploading {}, status code: {}".format(filename,
str(r.status_code)))
if click.confirm('''Do you want to publish the uploaded files?
Note, once a deposition is published, you can no longer delete it.'''):
publish(deposit_id)
print("Your deposition has been published!")
print(
"You can check your deposition here: {}/record/{}".format(BASE_URL, deposit_id))
return
print("Uploading done!")
print("You can check your deposition here: {}/record/{}".format(BASE_URL, deposit_id))
def get_deposit_id(metadata):
headers = {"Content-Type": "application/json"}
with open(metadata, "r") as fh:
metadata_content = json.load(fh)
metadata_content = json.dumps(metadata_content, ensure_ascii=True)
r = requests.post("{}/api/deposit/depositions".format(BASE_URL),
params={'access_token': ACCESS_TOKEN},
data=metadata_content,
json={},
headers=headers)
if r.status_code >= 400:
raise RuntimeError("Error occurred while creating deposit ID, status code: {}".format(
str(r.status_code)))
deposit_id = r.json()['id']
return deposit_id
def publish(deposit_id):
r = requests.post("{}/api/deposit/depositions/{}/actions/publish".format(BASE_URL, deposit_id),
params={'access_token': ACCESS_TOKEN})
if r.status_code >= 400:
raise RuntimeError("Error occurred while publishing your deposition, status code: {}".format(
str(r.status_code)))
if __name__ == '__main__':
upload()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Plot results from simulations optimizing 2D randomly-generated synthetic
objective functions.
"""
import numpy as np
import scipy.io as io
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import rcParams
rcParams.update({'font.size': 18})
plt.close('all')
def plot_avg_objective_vals(filename_part1, filename_part2, num_trials, num_runs,
fig_num, plot_mean_SD = True, line_plot = False,
color = 'blue', alpha = 0.5, norm = False,
plot_SD = True, mean_linestyle = '-',
mean_linewidth = 1.5, plot_SE = True):
"""
Function to calculate means and standard deviations of objective function
values over the different runs, and add them to the given figure. Also,
includes an option for just plotting each sequence separately.
Options:
1) filenames of data files are assumed to be in the form
filename_part1_x_filename_part2, where x is the number corresponding to a
particular simulation run.
2) num_trials: number of trials to plot from each simulation
3) num_runs: number of repetitions of the experiment
4) fig_num: index of new figure
5) plot_mean_SD: whether to plot mean of trials and a measure of the
deviation from the mean
6) line_plot: if this is set to true, then plot trajectory of each
individual run
7) color: color of lines and shaded area
8) alpha: for setting transparency of shaded area (if any)
9) norm: if true, then normalize each objective function to lie between
0 and 1
10) plot_SD: if false, do not plot shaded area corresponding to standard
deviation or standard error. This is useful for just plotting the mean
of all the trials.
11) mean_linestyle and mean_linewidth: arguments for plotting the mean,
in case you want to change them from the defaults.
12) plot_SE: if True, then plot standard error instead of standard deviation.
"""
plt.figure(fig_num)
# Obtain the objective values over the runs.
obj_vals = np.empty((num_trials, num_runs))
for run in range(num_runs):
# Load and unpack results:
results = io.loadmat(filename_part1 + str(run) + filename_part2)
obj = results['objective_values'].flatten()[: num_trials]
if norm: # Normalize objective function values
obj_function = io.loadmat('Sampled_functions_2D/30_by_30/Sampled_objective_' + \
str(run) + '.mat')
obj_function = obj_function['sample'].flatten()
obj = (obj - np.min(obj_function)) / \
(np.max(obj_function) - np.min(obj_function))
obj_vals[:, run] = obj
if line_plot:
plt.plot(np.arange(1, num_trials + 1), obj_vals[:, run],
color = color)
if plot_mean_SD: # If plotting mean and deviation
mean = np.mean(obj_vals, axis = 1)
stdev = np.std(obj_vals, axis = 1)
if plot_SE: # If plotting standard error rather than standard dev.
stdev /= np.sqrt(num_runs)
# Plot the mean over the trials:
plt.plot(np.arange(1, num_trials + 1), mean, color = color,
linestyle = mean_linestyle, linewidth = mean_linewidth)
# Add deviation to plot
if plot_SD:
plt.fill_between(np.arange(1, num_trials + 1), mean - stdev,
mean + stdev, alpha = alpha, color = color)
#%% Plot an example objective function.
num_pts = [30, 30]
x_vals = np.linspace(0, 1, num_pts[0])
y_vals = np.linspace(0, 1, num_pts[1])
Y, X = np.meshgrid(x_vals, y_vals)
# Folder in which samples were saved:
save_folder = 'Sampled_functions_2D/30_by_30/'
obj_number = 1 # Objective function to plot
data = io.loadmat(save_folder + 'Sampled_objective_' + str(obj_number) + '.mat')
sample = data['sample']
# Normalize the sample:
sample = (sample - np.min(sample)) / (np.max(sample) - np.min(sample))
points_to_sample = data['points_to_sample']
fig = plt.figure(figsize = (7.2, 4.76))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(Y, X, sample, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.xlabel('x', labelpad = 10)
plt.ylabel('y', labelpad = 10)
ax.set_zlabel('\nObjective value', labelpad = 19)
plt.colorbar(surf, pad = 0.15, ticks = [0, 0.2, 0.4, 0.6, 0.8, 1])
plt.xticks([0, 0.5, 1])
plt.yticks([0, 0.5, 1])
ax.set_zticks([0, 0.5, 1])
ax.tick_params(axis='z', which='major', pad=13)
##%% Calculates and save the posterior mean that we will plot in the next cell,
## so that it can be loaded without needing to be recalculated each time.
#
#from Preference_GP_learning import feedback
#
## Load data from experiment:
#
#buffer_size = 1
#save_folder = 'Buffer_dueling_mixed_initiative/'
#filename = save_folder + 'Opt_2D_900_buffer_' + str(buffer_size) + \
# '_vary_obj_run_' + str(obj_number) + '.mat'
#
#data = io.loadmat(filename)
#
## Load preference feedback:
#data_pt_idxs = data['data_pt_idxs']
#labels = data['labels'][:, 1]
#
## Load coactive feedback:
#virtual_pt_idxs = data['virtual_pt_idxs']
#virtual_labels = data['virtual_labels'][:, 1]
#
#preference_noise = data['preference_noise'][0][0]
#lengthscales = data['lengthscale'][0][0] * np.ones(2)
#signal_variance = data['signal_variance'][0][0]
#GP_noise_var = data['GP_noise_var'][0][0]
#
## Determine dimensionality of state space:
#if len(points_to_sample.shape) == 1:
# state_dim = 1
#else:
# state_dim = points_to_sample.shape[1]
#
#num_pts_sample = points_to_sample.shape[0]
#
## Instantiate the prior covariance matrix, using a squared exponential
## kernel in each dimension of the input space:
#GP_prior_cov = signal_variance * np.ones((num_pts_sample, num_pts_sample))
#
#for i in range(num_pts_sample):
#
# pt1 = points_to_sample[i, :]
#
# for j in range(num_pts_sample):
#
# pt2 = points_to_sample[j, :]
#
# for dim in range(state_dim):
#
# lengthscale = lengthscales[dim]
#
# if lengthscale > 0:
# GP_prior_cov[i, j] *= np.exp(-0.5 * ((pt2[dim] - pt1[dim]) / \
# lengthscale)**2)
#
# elif lengthscale == 0 and pt1[dim] != pt2[dim]:
#
# GP_prior_cov[i, j] = 0
#
#GP_prior_cov += GP_noise_var * np.eye(num_pts_sample)
#
#GP_prior_cov_inv = np.linalg.inv(GP_prior_cov)
#
## Update the Gaussian process preference model:
#posterior_model = feedback(np.vstack((data_pt_idxs, virtual_pt_idxs)),
# np.concatenate((labels, virtual_labels)), GP_prior_cov_inv,
# preference_noise)
#
## Posterior mean:
#post_mean = posterior_model['mean'].reshape(tuple(num_pts))
#
#io.savemat('Post_mean_for_plot.mat', {'post_mean': post_mean})
#%% Plot the posterior mean by loading a saved file, rather than re-fitting the model:
rcParams.update({'font.size': 18})
post_mean = io.loadmat('Post_mean_for_plot.mat')['post_mean']
# Plot posterior mean:
fig = plt.figure(figsize = (7.2, 4.76))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(Y, X, post_mean, cmap=cm.coolwarm, linewidth=0,
antialiased=False)
plt.xlabel('x', labelpad = 10)
plt.ylabel('y', labelpad = 10)
ax.set_zlabel('\nPosterior Utility', labelpad = 19)
plt.colorbar(surf, pad = 0.15)
plt.xticks([0, 0.5, 1])
plt.yticks([0, 0.5, 1])
ax.set_zticks([0, 0.03])
ax.tick_params(axis='z', which='major', pad=13)
#%% Make a plot with all learning curves on one plot (mean +/- standard error).
# Plot multi-dueling bandits cases.
rcParams.update({'font.size': 12})
# Color-blind friendly palette: https://gist.github.com/thriveth/8560036
CB_colors = ['#377eb8', '#4daf4a', '#ff7f00',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
colors = CB_colors[:3]
fig_num = 3
num_runs = 100 # Times experiment was repeated
filename_part2 = '.mat'
num_trials = 150 # Total number of posterior samples/trials
# Plot multi-dueling cases:
num_samples_values = [2, 3]
alpha = 0.4
for i, num_samples in enumerate(num_samples_values):
# Folder into which results are saved:
save_folder = 'GP_preference_multi_dueling/'
filename_part1 = save_folder + 'Opt_2D_900_' + str(num_samples) + '_samples_' \
+ 'vary_obj_run_'
# Plot mean +/- stdev:
plot_avg_objective_vals(filename_part1, filename_part2, num_trials, num_runs,
fig_num, plot_mean_SD = True, line_plot = False,
color = colors[i], norm = True, alpha = alpha,
mean_linestyle = 'dotted', mean_linewidth = 2)
# Folder into which results are saved:
save_folder = 'Multi_dueling_mixed_initiative/'
filename_part1 = save_folder + 'Opt_2D_900_' + str(num_samples) + '_samples_' \
+ 'vary_obj_run_'
# Plot mean +/- stdev:
plot_avg_objective_vals(filename_part1, filename_part2, num_trials, num_runs,
fig_num, plot_mean_SD = True, line_plot = False,
color = colors[i], norm = True, alpha = alpha,
mean_linewidth = 2)
# Plot preference buffer trials, multi-dueling:
buffer_size = 1
# Folder into which results are saved:
save_folder = 'Buffer_dueling/'
filename_part1 = save_folder + 'Opt_2D_900_buffer_' + str(buffer_size) + \
'_vary_obj_run_'
# Plot mean +/- stdev:
plot_avg_objective_vals(filename_part1, filename_part2, num_trials, num_runs,
fig_num, plot_mean_SD = True, line_plot = False,
color = colors[2], norm = True, alpha = alpha,
mean_linestyle = 'dotted', mean_linewidth = 2)
# Plot preference buffer trials, mixed-initiative:
# Folder into which results are saved:
save_folder = 'Buffer_dueling_mixed_initiative/'
filename_part1 = save_folder + 'Opt_2D_900_buffer_' + str(buffer_size) + \
'_vary_obj_run_'
# Plot mean +/- stdev:
plot_avg_objective_vals(filename_part1, filename_part2, num_trials, num_runs,
fig_num, plot_mean_SD = True, line_plot = False,
color = colors[2], norm = True, alpha = alpha,
mean_linewidth = 2)
plt.xlabel('Number of objective function evaluations')
plt.ylabel('Objective function value')
plt.ylim([0.4, 1])
plt.legend(['n = 2, b = 0', 'n = 2, b = 0, coactive',
'n = 3, b = 0', 'n = 3, b = 0, coactive',
'n = 1, b = 1', 'n = 1, b = 1, coactive'])
#%% Plot color-blind-friendly palette:
#CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
# '#f781bf', '#a65628', '#984ea3',
# '#999999', '#e41a1c', '#dede00']
#plt.figure()
#
#for i, color in enumerate(CB_color_cycle):
#
# plt.plot([0, 1], [i, i], c = color)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Topic :: System :: Logging",
"Topic :: System :: Monitoring",
"Topic :: Text Processing :: Filters"
]
setup(
name="logmole",
version="0.9.1",
author="Rico Koschmitzky",
author_email="contact@ricokoschmitzky.com",
classifiers=classifiers,
packages=find_packages("src"),
package_dir={"": "src"},
url='https://github.com/rkoschmitzky/logmole',
license="MIT",
description='An Extendable and Versatile Logparsing System',
test_suite="tests"
)
|
nilq/baby-python
|
python
|
import numpy as np
import cv2 as cv
def dist(p1x, p1y, p2x, p2y):
return np.sqrt((p1x-p2x)**2 + (p1y-p2y)**2)
class Map:
def __init__(self, length, height, thickness):
self.length = length
self.height = height
self.wallThickness = thickness
self.map = np.zeros((self.height, self.length, 3), dtype=np.uint8)
# Walls
for r in range(self.height):
for c in range(self.length):
if (r >= 0 and r < self.wallThickness) or (c >= 0 and c <self.wallThickness) or \
(r >= self.height-self.wallThickness and r < self.height) or \
(c >= self.length-self.wallThickness and c < self.length):
self.map[r][c][:] = (255, 255, 255)
def addCircle(self, posR, posC, radius):
self.map = cv.circle(self.map, (posC, posR), radius, (255, 255, 255), self.wallThickness)
def addBox(self, CornerR, CornerC, height, length):
self.map = cv.rectangle(self.map, (CornerC, CornerR), (CornerC+length, CornerR+height), (255, 255, 255), self.wallThickness)
def display(self):
cv.imshow("SLAM Environment", self.map)
def createMap(length, height, thickness):
return Map(length, height, thickness)
def main():
length = 960
height = 9*length//16
thickness = 5
room = Map(length, height, thickness)
room.addBox(200, 300, 100, 50)
room.addCircle(100, 100, 50)
room.display()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import re
f = open('/Users/kosta/dev/advent-of-code-17/day12/input.txt')
links = f.readlines()
graph = {}
def traverse_graph(node):
if node == 0:
return True
node = graph[node]
node['is_visited'] = True
for edge in node['edges']:
if not graph[edge]['is_visited']:
if traverse_graph(edge):
return True
return False
for link in links:
edges = re.findall('(\d+)\s<->\s(.*)', link)[0]
node = int(edges[0])
edges = list(map(int, edges[1].split(',')))
graph[node] = {'is_visited': False, 'edges': edges}
def clear_graph(graph):
for key in graph:
graph[key]['is_visited'] = False
total = 0
for node in graph:
if traverse_graph(node):
total += 1
clear_graph(graph)
print(total)
|
nilq/baby-python
|
python
|
""" Useful neuroimaging coordinate map makers and utilities """
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from nibabel.affines import from_matvec
from ...fixes.nibabel import io_orientation
from .coordinate_system import CoordSysMaker, is_coordsys, is_coordsys_maker
from .coordinate_map import CoordMapMaker
from ...externals.six import string_types
# Legacy repr printing from numpy.
from nipy.testing import legacy_printing as setup_module # noqa
class XYZSpace(object):
""" Class contains logic for spaces with XYZ coordinate systems
>>> sp = XYZSpace('hijo')
>>> print(sp)
hijo: [('x', 'hijo-x=L->R'), ('y', 'hijo-y=P->A'), ('z', 'hijo-z=I->S')]
>>> csm = sp.to_coordsys_maker()
>>> cs = csm(3)
>>> cs
CoordinateSystem(coord_names=('hijo-x=L->R', 'hijo-y=P->A', 'hijo-z=I->S'), name='hijo', coord_dtype=float64)
>>> cs in sp
True
"""
x_suffix = 'x=L->R'
y_suffix = 'y=P->A'
z_suffix = 'z=I->S'
def __init__(self, name):
self.name = name
@property
def x(self):
""" x-space coordinate name """
return "%s-%s" % (self.name, self.x_suffix)
@property
def y(self):
""" y-space coordinate name """
return "%s-%s" % (self.name, self.y_suffix)
@property
def z(self):
""" z-space coordinate name """
return "%s-%s" % (self.name, self.z_suffix)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self.name)
def __str__(self):
return "%s: %s" % (self.name, sorted(self.as_map().items()))
def __eq__(self, other):
""" Equality defined as having the same xyz names """
try:
otuple = other.as_tuple()
except AttributeError:
return False
return self.as_tuple() == otuple
def __ne__(self, other):
return not self == other
def as_tuple(self):
""" Return xyz names as tuple
>>> sp = XYZSpace('hijo')
>>> sp.as_tuple()
('hijo-x=L->R', 'hijo-y=P->A', 'hijo-z=I->S')
"""
return self.x, self.y, self.z
def as_map(self):
""" Return xyz names as dictionary
>>> sp = XYZSpace('hijo')
>>> sorted(sp.as_map().items())
[('x', 'hijo-x=L->R'), ('y', 'hijo-y=P->A'), ('z', 'hijo-z=I->S')]
"""
return dict(zip('xyz', self.as_tuple()))
def register_to(self, mapping):
""" Update `mapping` with key=self.x, value='x' etc pairs
The mapping will then have keys that are names we (``self``) identify as
being x, or y, or z, values are 'x' or 'y' or 'z'.
Note that this is the opposite way round for keys, values, compared to
the ``as_map`` method.
Parameters
----------
mapping : mapping
such as a dict
Returns
-------
None
Examples
--------
>>> sp = XYZSpace('hijo')
>>> mapping = {}
>>> sp.register_to(mapping)
>>> sorted(mapping.items())
[('hijo-x=L->R', 'x'), ('hijo-y=P->A', 'y'), ('hijo-z=I->S', 'z')]
"""
mapping.update(dict(zip(self.as_tuple(), 'xyz')))
def to_coordsys_maker(self, extras=()):
""" Make a coordinate system maker for this space
Parameters
----------
extra : sequence
names for any further axes after x, y, z
Returns
-------
csm : CoordinateSystemMaker
Examples
--------
>>> sp = XYZSpace('hijo')
>>> csm = sp.to_coordsys_maker()
>>> csm(3)
CoordinateSystem(coord_names=('hijo-x=L->R', 'hijo-y=P->A', 'hijo-z=I->S'), name='hijo', coord_dtype=float64)
"""
return CoordSysMaker(self.as_tuple() + tuple(extras), name=self.name)
def __contains__(self, obj):
""" True if `obj` can be thought of as being 'in' this space
`obj` is an object that is in some kind of space - it can be a
coordinate system, a coordinate map, or an object with a ``coordmap``
attribute. We test the output coordinate system of `obj` against our
own space definition.
A coordinate system is in our space if it has all the axes of our space.
Parameters
----------
obj : object
Usually a coordinate system, a coordinate map, or an Image (with a
``coordmap`` attribute)
Returns
-------
tf : bool
True if `obj` is 'in' this space
Examples
--------
>>> from nipy.core.api import Image, AffineTransform, CoordinateSystem
>>> sp = XYZSpace('hijo')
>>> names = sp.as_tuple()
>>> cs = CoordinateSystem(names)
>>> cs in sp
True
>>> cs = CoordinateSystem(names + ('another_name',))
>>> cs in sp
True
>>> cmap = AffineTransform('ijk', names, np.eye(4))
>>> cmap in sp
True
>>> img = Image(np.zeros((3,4,5)), cmap)
>>> img in sp
True
"""
try:
obj = obj.coordmap
except AttributeError:
pass
try:
obj = obj.function_range
except AttributeError:
pass
my_names = self.as_tuple()
return set(my_names).issubset(obj.coord_names)
# Generic coordinate map maker for voxels (function_domain). Unlike nifti
# loading, by default the 4th axis is not time (because we don't know what it
# is).
voxel_csm = CoordSysMaker('ijklmnop', 'voxels')
# Module level mapping from key=name to values in 'x' or 'y' or 'z'
known_names = {}
known_spaces = []
# Standard spaces defined
for _name in ('unknown', 'scanner', 'aligned', 'mni', 'talairach'):
_space = XYZSpace(_name)
known_spaces.append(_space)
_space.register_to(known_names)
_csm = _space.to_coordsys_maker('tuvw')
_cmm = CoordMapMaker(voxel_csm, _csm)
# Put these into the module namespace
exec('%s_space = _space' % _name)
exec('%s_csm = _csm' % _name)
exec('vox2%s = _cmm' % _name)
def known_space(obj, spaces=None):
""" If `obj` is in a known space, return the space, otherwise return None
Parameters
----------
obj : object
Object that can be tested against an XYZSpace with ``obj in sp``
spaces : None or sequence, optional
spaces to test against. If None, use the module level ``known_spaces``
list to test against.
Returns
-------
sp : None or XYZSpace
If `obj` is not in any of the `known_spaces`, return None. Otherwise
return the first matching space in `known_spaces`
Examples
--------
>>> from nipy.core.api import CoordinateSystem
>>> sp0 = XYZSpace('hijo')
>>> sp1 = XYZSpace('hija')
Make a matching coordinate system
>>> cs = sp0.to_coordsys_maker()(3)
Test whether this coordinate system is in either of ``(sp0, sp1)``
>>> known_space(cs, (sp0, sp1))
XYZSpace('hijo')
So, yes, it's in ``sp0``. How about another generic CoordinateSystem?
>>> known_space(CoordinateSystem('xyz'), (sp0, sp1)) is None
True
So, no, that is not in either of ``(sp0, sp1)``
"""
if spaces is None:
# use module level global
spaces = known_spaces
for sp in spaces:
if obj in sp:
return sp
return None
def get_world_cs(world_id, ndim=3, extras='tuvw', spaces=None):
""" Get world coordinate system from `world_id`
Parameters
----------
world_id : str, XYZSPace, CoordSysMaker or CoordinateSystem
Object defining a world output system. If str, then should be a name of
an XYZSpace in the list `spaces`.
ndim : int, optional
Number of dimensions in this world. Default is 3
extras : sequence, optional
Coordinate (axis) names for axes > 3 that are not named by `world_id`
spaces : None or sequence, optional
List of known (named) spaces to compare a str `world_id` to. If None,
use the module level ``known_spaces``
Returns
-------
world_cs : CoordinateSystem
A world coordinate system
Examples
--------
>>> get_world_cs('mni')
CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64)
>>> get_world_cs(mni_space, 4)
CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64)
>>> from nipy.core.api import CoordinateSystem
>>> get_world_cs(CoordinateSystem('xyz'))
CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64)
"""
if is_coordsys(world_id):
if world_id.ndim != ndim:
raise SpaceError("Need %d-dimensional CoordinateSystem" % ndim)
return world_id
if spaces is None:
spaces = known_spaces
if isinstance(world_id, string_types):
space_names = [s.name for s in spaces]
if world_id not in space_names:
raise SpaceError('Unkown space "%s"; known spaces are %s'
% (world_id, ', '.join(space_names)))
world_id = spaces[space_names.index(world_id)]
if is_xyz_space(world_id):
world_id = world_id.to_coordsys_maker(extras)
if is_coordsys_maker(world_id):
return world_id(ndim)
raise ValueError('Expecting CoordinateSystem, CoordSysMaker, '
'XYZSpace, or str, got %s' % world_id)
class SpaceError(Exception):
pass
class SpaceTypeError(SpaceError):
pass
class AxesError(SpaceError):
pass
class AffineError(SpaceError):
pass
def xyz_affine(coordmap, name2xyz=None):
""" Return (4, 4) affine mapping voxel coordinates to XYZ from `coordmap`
If no (4, 4) affine "makes sense"(TM) for this `coordmap` then raise errors
listed below. A (4, 4) affine makes sense if the first three output axes
are recognizably X, Y, and Z in that order AND they there are corresponding
input dimensions, AND the corresponding input dimensions are the first three
input dimension (in any order). Thus the input axes have to be 3D.
Parameters
----------
coordmap : ``CoordinateMap`` instance
name2xyz : None or mapping, optional
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Returns
-------
xyz_aff : (4,4) array
voxel to X, Y, Z affine mapping
Raises
------
SpaceTypeError : if this is not an affine coordinate map
AxesError : if not all of x, y, z recognized in `coordmap` output, or they
are in the wrong order, or the x, y, z axes do not correspond to the first
three input axes.
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates.
Notes
-----
We could also try and "make sense" (TM) of a coordmap that had X, Y and Z
outputs, but not in that order, nor all in the first three axes. In that
case we could just permute the affine to get the output order we need. But,
that could become confusing if the returned affine has different output
coordinates than the passed `coordmap`. And it's more complicated. So,
let's not do that for now.
Examples
--------
>>> cmap = vox2mni(np.diag([2,3,4,5,1]))
>>> cmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> xyz_affine(cmap)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
"""
if name2xyz is None:
name2xyz = known_names
try:
affine = coordmap.affine
except AttributeError:
raise SpaceTypeError('Need affine coordinate map')
order = xyz_order(coordmap.function_range, name2xyz)
if order[:3] != [0, 1, 2]:
raise AxesError('First 3 output axes must be X, Y, Z')
# Check equivalent input axes
ornt = io_orientation(affine)
if set(ornt[:3, 0]) != set((0, 1, 2)):
raise AxesError('First 3 input axes must correspond to X, Y, Z')
# Check that dropped dimensions don't provide xyz coordinate info
extra_cols = affine[:3,3:-1]
if not np.allclose(extra_cols, 0):
raise AffineError('Dropped dimensions not orthogonal to xyz')
return from_matvec(affine[:3,:3], affine[:3,-1])
def xyz_order(coordsys, name2xyz=None):
""" Vector of orders for sorting coordsys axes in xyz first order
Parameters
----------
coordsys : ``CoordinateSystem`` instance
name2xyz : None or mapping, optional
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Returns
-------
xyz_order : list
Ordering of axes to get xyz first ordering. See the examples.
Raises
------
AxesError : if there are not all of x, y and z axes
Examples
--------
>>> from nipy.core.api import CoordinateSystem
>>> xyzt_cs = mni_csm(4) # coordsys with t (time) last
>>> xyzt_cs
CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64)
>>> xyz_order(xyzt_cs)
[0, 1, 2, 3]
>>> tzyx_cs = CoordinateSystem(xyzt_cs.coord_names[::-1], 'reversed')
>>> tzyx_cs
CoordinateSystem(coord_names=('t', 'mni-z=I->S', 'mni-y=P->A', 'mni-x=L->R'), name='reversed', coord_dtype=float64)
>>> xyz_order(tzyx_cs)
[3, 2, 1, 0]
"""
if name2xyz is None:
name2xyz = known_names
names = coordsys.coord_names
N = len(names)
axvals = np.zeros(N, dtype=int)
for i, name in enumerate(names):
try:
xyz_char = name2xyz[name]
except KeyError:
axvals[i] = N+i
else:
axvals[i] = 'xyz'.index(xyz_char)
if not set(axvals).issuperset(range(3)):
raise AxesError("Not all of x, y, z recognized in coordinate map")
return list(np.argsort(axvals))
def is_xyz_space(obj):
""" True if `obj` appears to be an XYZ space definition """
return (hasattr(obj, 'x') and
hasattr(obj, 'y') and
hasattr(obj, 'z') and
hasattr(obj, 'to_coordsys_maker'))
def is_xyz_affable(coordmap, name2xyz=None):
""" Return True if the coordap has an xyz affine
Parameters
----------
coordmap : ``CoordinateMap`` instance
Coordinate map to test
name2xyz : None or mapping, optional
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Returns
-------
tf : bool
True if `coordmap` has an xyz affine, False otherwise
Examples
--------
>>> cmap = vox2mni(np.diag([2,3,4,5,1]))
>>> cmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(cmap)
True
>>> time0_cmap = cmap.reordered_domain([3,0,1,2])
>>> time0_cmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 0., 2., 0., 0., 0.],
[ 0., 0., 3., 0., 0.],
[ 0., 0., 0., 4., 0.],
[ 5., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(time0_cmap)
False
"""
try:
xyz_affine(coordmap, name2xyz)
except SpaceError:
return False
return True
|
nilq/baby-python
|
python
|
from unittest import TestCase
import copy
from chibi.atlas import Chibi_atlas
from chibi_command import Command
from chibi_command import Command_result
from chibi_command.nix.systemd_run import System_run
from chibi_command.nix.systemd import Journal_status, Journal_show
class Test_systemd_run( TestCase ):
def test_should_work( self ):
result = System_run().preview()
self.assertEqual(
result,
( f'systemd-run --unit={System_run.kw["unit"]} '
'--property=Delegate=yes --user --scope' ) )
def test_set_command( self ):
result = System_run( 'lxc-ls', '-f' )
self.assertEqual(
result,
( f'systemd-run --unit={System_run.kw["unit"]} '
'--property=Delegate=yes --user --scope lxc-ls -f' ) )
|
nilq/baby-python
|
python
|
tuple = (1, 2, 4, 5, 6, 6)
print(f'{tuple =})
print(f'{tuple.count(6) =}')
|
nilq/baby-python
|
python
|
import unittest
import time
from app import create_app, db
from app.models import Permission, Role, User
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='password')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='password')
with self.assertRaises(AttributeError):
u.password()
def test_password_verification(self):
u = User(password='password')
self.assertTrue(u.verify_password('password'))
self.assertFalse(u.verify_password('notpassword'))
def test_password_salts_are_random(self):
u = User(password='password')
u2 = User(password='password')
self.assertTrue(u.password_hash != u2.password_hash)
def test_valid_confirmation_token(self):
u = User(password='password')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token()
self.assertTrue(u.confirm_account(token))
def test_invalid_confirmation_token(self):
u1 = User(password='password')
u2 = User(password='notpassword')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_confirmation_token()
self.assertFalse(u2.confirm_account(token))
def test_expired_confirmation_token(self):
u = User(password='password')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token(1)
time.sleep(2)
self.assertFalse(u.confirm_account(token))
def test_valid_reset_token(self):
u = User(password='password')
db.session.add(u)
db.session.commit()
token = u.generate_password_reset_token()
self.assertTrue(u.reset_password(token, 'notpassword'))
self.assertTrue(u.verify_password('notpassword'))
def test_invalid_reset_token(self):
u1 = User(password='password')
u2 = User(password='notpassword')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_password_reset_token()
self.assertFalse(u2.reset_password(token, 'notnotpassword'))
self.assertTrue(u2.verify_password('notpassword'))
def test_valid_email_change_token(self):
u = User(email='user@example.com', password='password')
db.session.add(u)
db.session.commit()
token = u.generate_email_change_token('otheruser@example.org')
self.assertTrue(u.change_email(token))
self.assertTrue(u.email == 'otheruser@example.org')
def test_invalid_email_change_token(self):
u1 = User(email='user@example.com', password='password')
u2 = User(email='otheruser@example.org', password='notpassword')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_email_change_token('otherotheruser@example.net')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'otheruser@example.org')
def test_duplicate_email_change_token(self):
u1 = User(email='user@example.com', password='password')
u2 = User(email='otheruser@example.org', password='notpassword')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u2.generate_email_change_token('user@example.com')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'otheruser@example.org')
def test_roles_and_permissions(self):
Role.insert_roles()
u = User(email='user@example.com', password='password')
self.assertFalse(u.can(Permission.ADMINISTER))
def test_make_administrator(self):
Role.insert_roles()
u = User(email='user@example.com', password='password')
self.assertFalse(u.can(Permission.ADMINISTER))
u.role = Role.query.filter_by(
permissions=Permission.ADMINISTER).first()
self.assertTrue(u.can(Permission.ADMINISTER))
def test_administrator(self):
Role.insert_roles()
r = Role.query.filter_by(permissions=Permission.ADMINISTER).first()
u = User(email='user@example.com', password='password', role=r)
self.assertTrue(u.can(Permission.ADMINISTER))
self.assertTrue(u.is_admin())
|
nilq/baby-python
|
python
|
# coding: utf-8
import typing
from rolling.model.measure import Unit
class GlobalTranslation:
def __init__(self) -> None:
self._translation: typing.Dict[typing.Any, str] = {
Unit.LITTER: "litres",
Unit.CUBIC: "mètre cubes",
Unit.GRAM: "grammes",
Unit.KILOGRAM: "kilo-grammes",
Unit.UNIT: "unités",
}
self._short_translation: typing.Dict[typing.Any, str] = {
Unit.LITTER: "l",
Unit.CUBIC: "m³",
Unit.GRAM: "g",
Unit.KILOGRAM: "kg",
Unit.UNIT: "u",
}
def get(self, key: typing.Any, short: bool = False) -> str:
if short:
return self._short_translation[key]
return self._translation[key]
|
nilq/baby-python
|
python
|
"""The tests for the Xiaogui ble_parser."""
from ble_monitor.ble_parser import BleParser
class TestXiaogui:
"""Tests for the Xiaogui parser"""
def test_xiaogui_tzc4_stab(self):
"""Test Xiaogui parser for Xiaogui TZC4 (stabilized weight)."""
data_string = "043e1d0201030094e0e5295a5f1110ffc0a30276138b0002215f5a29e5e094bd"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaogui"
assert sensor_msg["type"] == "TZC4"
assert sensor_msg["mac"] == "5F5A29E5E094"
assert sensor_msg["packet"] == 41761
assert sensor_msg["data"]
assert sensor_msg["non-stabilized weight"] == 63.0
assert sensor_msg["weight"] == 63.0
assert sensor_msg["impedance"] == 500.3
assert sensor_msg["stabilized"] == 1
assert sensor_msg["rssi"] == -67
def test_xiaogui_tzc4_non_stab(self):
"""Test Xiaogui parser for Xiaogui TZC4 (not stabilized weight)."""
data_string = "043e1d0201030094e0e5295a5f1110ffc05d008c00000002205f5a29e5e094bf"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaogui"
assert sensor_msg["type"] == "TZC4"
assert sensor_msg["mac"] == "5F5A29E5E094"
assert sensor_msg["packet"] == 23840
assert sensor_msg["data"]
assert sensor_msg["non-stabilized weight"] == 14.0
assert "weight" not in sensor_msg
assert "impedance" not in sensor_msg
assert sensor_msg["stabilized"] == 0
assert sensor_msg["rssi"] == -65
def test_xiaogui_maxxmee_qjj_stab(self):
"""Test Xiaogui parser for MaxxMee Mod QJ-J (stabilized weight)."""
data_string = "043e1d0201030094e0e5295a5f1110ffc07d2c4700000a01255f5a29e5e094bd"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaogui"
assert sensor_msg["type"] == "QJ-J"
assert sensor_msg["mac"] == "5F5A29E5E094"
assert sensor_msg["packet"] == 32037
assert sensor_msg["data"]
assert sensor_msg["non-stabilized weight"] == 113.35
assert sensor_msg["weight"] == 113.35
assert sensor_msg["stabilized"] == 1
assert sensor_msg["rssi"] == -67
def test_xiaogui_maxxmee_qjj_non_stab(self):
"""Test Xiaogui parser for MaxxMee Mod QJ-J (not stabilized weight)."""
data_string = "043e1d0201030094e0e5295a5f1110ffc024000000000a01245f5a29e5e094bd"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaogui"
assert sensor_msg["type"] == "QJ-J"
assert sensor_msg["mac"] == "5F5A29E5E094"
assert sensor_msg["packet"] == 9252
assert sensor_msg["data"]
assert sensor_msg["non-stabilized weight"] == 0.0
assert "weight" not in sensor_msg
assert "impedance" not in sensor_msg
assert sensor_msg["stabilized"] == 0
assert sensor_msg["rssi"] == -67
|
nilq/baby-python
|
python
|
from apiv1 import blueprint as apiv1
from flask import Flask
app = Flask(__name__)
app.debug = True
app.secret_key = 'cc_development'
app.register_blueprint(apiv1)
if __name__ == "__main__":
app.run()
|
nilq/baby-python
|
python
|
from flask import render_template, url_for, flash, redirect, request, abort, Blueprint
from flask_login import login_user, logout_user, current_user, login_required
from thewarden import db
from thewarden.users.forms import (RegistrationForm, LoginForm,
UpdateAccountForm, RequestResetForm,
ResetPasswordForm, ApiKeysForm)
from werkzeug.security import check_password_hash, generate_password_hash
from thewarden.models import User, Trades, AccountInfo
from thewarden.users.utils import send_reset_email, fx_list, regenerate_nav
users = Blueprint("users", __name__)
@users.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect(url_for("main.home"))
form = RegistrationForm()
if form.validate_on_submit():
hash = generate_password_hash(form.password.data)
user = User(username=form.username.data,
email=form.email.data,
password=hash)
db.session.add(user)
db.session.commit()
flash(f"Account created for {form.username.data}.", "success")
return redirect(url_for("users.login"))
return render_template("register.html", title="Register", form=form)
@users.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for("main.home"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
# The get method below is actually very helpful
# it returns None if empty. Better than using [] for a dictionary.
next_page = request.args.get("next") # get the original page
if next_page:
return redirect(next_page)
else:
return redirect(url_for("main.home"))
else:
flash("Login failed. Please check e-mail and password", "danger")
return render_template("login.html", title="Login", form=form)
@users.route("/logout")
def logout():
logout_user()
return redirect(url_for("main.home"))
@users.route("/account", methods=["GET", "POST"])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
# Recalculate the NAV
current_user.image_file = form.basefx.data
current_user.email = form.email.data
db.session.commit()
regenerate_nav()
flash(
f"Account updated and NAV recalculated to use " +
f"{form.basefx.data} as a base currency", "success")
return redirect(url_for("users.account"))
elif request.method == "GET":
form.email.data = current_user.email
# Check if the current value is in list of fx
# If not, default to USD
fx = fx_list()
found = [item for item in fx if current_user.image_file in item]
if found != []:
form.basefx.data = current_user.image_file
else:
form.basefx.data = "USD"
return render_template("account.html", title="Account", form=form)
@users.route("/delacc", methods=["GET"])
@login_required
# Takes one argument {id} - user id for deletion
def delacc():
if request.method == "GET":
id = request.args.get("id")
trade = Trades.query.filter_by(id=id)
if trade[0].user_id != current_user.username:
abort(403)
AccountInfo.query.filter_by(account_id=id).delete()
db.session.commit()
flash("Account deleted", "danger")
return redirect(url_for("transactions.tradeaccounts"))
else:
return redirect(url_for("transactions.tradeaccounts"))
@users.route("/reset_password", methods=["GET", "POST"])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for("main.home"))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash(
"An email has been sent with instructions to reset your" +
" password.",
"info",
)
return redirect(url_for("users.login"))
return render_template("reset_request.html",
title="Reset Password",
form=form)
@users.route("/reset_password/<token>", methods=["GET", "POST"])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for("main.home"))
user = User.verify_reset_token(token)
if user is None:
flash("That is an invalid or expired token", "warning")
return redirect(url_for("users.reset_request"))
form = ResetPasswordForm()
if form.validate_on_submit():
hash = generate_password_hash(form.password.data)
user.password = hash
db.session.commit()
flash("Your password has been updated! You are now able to log in",
"success")
return redirect(url_for("users.login"))
return render_template("reset_token.html",
title="Reset Password",
form=form)
@users.route("/services", methods=["GET"])
def services():
return render_template("services.html", title="Services Available")
# API Keys Management
@users.route("/apikeys_management", methods=["GET", "POST"])
def apikeys_management():
from thewarden.pricing_engine.pricing import api_keys_class
api_keys_json = api_keys_class.loader()
form = ApiKeysForm()
if request.method == "GET":
form.dojo_key.data = api_keys_json['dojo']['api_key']
form.dojo_onion.data = api_keys_json['dojo']['onion']
form.bitmex_key.data = api_keys_json['bitmex']['api_key']
form.bitmex_secret.data = api_keys_json['bitmex']['api_secret']
form.aa_key.data = api_keys_json['alphavantage']['api_key']
return render_template("apikeys_management.html",
title="API Keys Management",
form=form)
if request.method == "POST":
api_keys_json['dojo']['api_key'] = form.dojo_key.data
api_keys_json['dojo']['onion'] = form.dojo_onion.data
api_keys_json['bitmex']['api_key'] = form.bitmex_key.data
api_keys_json['bitmex']['api_secret'] = form.bitmex_secret.data
api_keys_json['alphavantage']['api_key'] = form.aa_key.data
api_keys_class.saver(api_keys_json)
flash("Keys Updated Successfully", "success")
return render_template("apikeys_management.html",
title="API Keys Management",
form=form)
# API Keys Management
@users.route("/pricing_status", methods=["GET"])
def pricing_status():
return render_template("pricing_status.html",
title="Status of Pricing services")
|
nilq/baby-python
|
python
|
from python import radar
import matplotlib.pyplot as plt
import glob
import os
import imageio
import cv2
import numpy as np
import scipy.io as sio
from skimage import io
Rad_img=True
if Rad_img:
i=0
ncols=4
else:
i=-1
ncols=3
#scene = 3
scene = 'city_3_7'
data_dir_image_info = '/home/ms75986/Desktop/Qualcomm/RADIATE/radiate_sdk/data/radiate/'+scene+'/20-final-rad-info-polar-test-10/radar-cart-img_annotated_nw_orig/'
data_dir_original = '/home/ms75986/Desktop/Qualcomm/RADIATE/radiate_sdk/data/radiate/'+scene+'/Navtech_Polar/radar-cart-img_annotated_nw_orig/'
data_dir_sparse = '/home/ms75986/Desktop/Qualcomm/RADIATE/radiate_sdk/data/radiate/'+scene+'/reconstruct-polar-same-meas-20/radar-cart-img_annotated_nw_orig/'#reconstruct-same-meas-20_annotated/'#reconstruct/reshaped_annotated/'
data_dir_prev_info = '/home/ms75986/Desktop/Qualcomm/RADIATE/radiate_sdk/data/radiate/'+scene+'/20-final-rad-info-polar-test-12/radar-cart-img_annotated_nw_orig/'
data_path = os.path.join(data_dir_image_info,'*png')
files = sorted(glob.glob(data_path))
for num,images in enumerate(files):
#if Rad_img==True:
# if num<1:
# continue
#print(images)
images = data_dir_image_info + str(num+1)+'.png'
X_image_info = Xorig = cv2.imread(images)#, cv2.IMREAD_GRAYSCALE)
original_file = data_dir_original + str(num+1)+'.png' #images[100:]
print(original_file)
X_original = cv2.imread(original_file)#, cv2.IMREAD_GRAYSCALE)
sparse_file = data_dir_sparse + str(num+1)+'.png' #images[100:]
print(sparse_file)
X_sparse = cv2.imread(sparse_file)#, cv2.IMREAD_GRAYSCALE)
if Rad_img:
prev_file = data_dir_prev_info + str(num+1)+'.png' #images[100:]
X_info_prev = cv2.imread(prev_file)#, cv2.IMREAD_GRAYSCALE)
fig, axs = plt.subplots(nrows=1, ncols=ncols, figsize=(20,20))
if Rad_img:
axs[i].axis('off')
full_title = images[100:] + ' Rad-Info-1'# prev Image info'
axs[i].title.set_text(full_title)
axs[i].imshow(X_info_prev, cmap='gray', vmin=0, vmax=255)
full_title = images[100:] + ' Rad-info-2'
axs[i+1].axis('off')
axs[i+1].title.set_text(full_title)
axs[i+1].imshow(X_image_info, cmap='gray', vmin=0, vmax=255)
axs[i+2].axis('off')
axs[i+2].title.set_text('Sparse-baseline')
axs[i+2].imshow(X_sparse, cmap='gray', vmin=0, vmax=255)
axs[i+3].axis('off')
axs[i+3].title.set_text('orig-radar-network')
axs[i+3].imshow(X_original, cmap='gray', vmin=0, vmax=255)
#plt.savefig('test.png')
plt.show()
#break
|
nilq/baby-python
|
python
|
from django.db import models
class User(models.Model):
name = models.CharField(max_length=30)
surname = models.CharField(max_length=30)
password = models.CharField(max_length=12, blank=True)
email = models.CharField(max_length=50, blank=True)
telephone = models.CharField(max_length=15)
isAdmin = models.BooleanField(default=False)
|
nilq/baby-python
|
python
|
from dku_error_analysis_decision_tree.node import Node, NumericalNode, CategoricalNode
from dku_error_analysis_utils import safe_str
from mealy import ErrorAnalyzerConstants
import pandas as pd
from collections import deque
class InteractiveTree(object):
"""
A decision tree
ATTRIBUTES
df: pd.DataFrame, the dataset
target: str, the name of the target feature
nodes: dict, a map from ids to the corresponding nodes in the tree
num_features: set, a set containing the numerical feature names
ranked_features: list of dict with three keys:
* name - name of the feature
* numerical - whether the feature is numerical
* rank - the feature importance
bin_edges: dict, mapping numerical features to a list containing the bin edges for whole data
leaves: set, set of leaves id
"""
def __init__(self, df, target, ranked_features, num_features):
self.df = df.dropna(subset=[target]) # TODO
self.target = target
self.num_features = num_features
self.nodes = {}
self.leaves = set()
self.add_node(Node(0, -1))
self.ranked_features = []
for idx, ranked_feature in enumerate(ranked_features):
self.ranked_features.append({
"rank": idx,
"name": ranked_feature,
"numerical": ranked_feature in num_features
})
self.bin_edges = {}
def to_dot_string(self, size=(50, 50)):
dot_str = 'digraph Tree {{\n size="{0},{1}!";\nnode [shape=box, style="filled, rounded", color="black", fontname=helvetica] ;\n'.format(size[0], size[1])
dot_str += 'edge [fontname=helvetica] ;\ngraph [ranksep=equally, splines=polyline] ;\n'
ids = deque()
ids.append(0)
while ids:
node = self.get_node(ids.popleft())
dot_str += node.to_dot_string() + "\n"
if node.parent_id >= 0:
edge_width = max(1, ErrorAnalyzerConstants.GRAPH_MAX_EDGE_WIDTH * node.global_error)
dot_str += '{} -> {} [penwidth={}];\n'.format(node.parent_id, node.id, edge_width)
ids += node.children_ids
dot_str += '{rank=same ; '+ '; '.join(map(safe_str, self.leaves)) + '} ;\n'
dot_str += "}"
return dot_str
def set_node_info(self, node_id, class_samples):
node = self.get_node(node_id)
if node_id == 0:
node.set_node_info(self.df.shape[0], class_samples, 1)
else:
root = self.get_node(0)
global_error = class_samples[ErrorAnalyzerConstants.WRONG_PREDICTION] / root.local_error[1]
node.set_node_info(root.samples[0], class_samples, global_error)
def jsonify_nodes(self):
jsonified_tree = {}
for key, node in self.nodes.items():
jsonified_tree[str(key)] = node.jsonify()
return jsonified_tree
def add_node(self, node):
self.nodes[node.id] = node
self.leaves.add(node.id)
parent_node = self.get_node(node.parent_id)
if parent_node is not None:
parent_node.children_ids.append(node.id)
self.leaves.discard(node.parent_id)
def get_node(self, i):
return self.nodes.get(i)
def add_split_no_siblings(self, node_type, parent_id, feature, value, left_node_id, right_child_id):
if node_type == Node.TYPES.NUM:
left = NumericalNode(left_node_id, parent_id, feature, end=value)
right = NumericalNode(right_child_id, parent_id, feature, beginning=value)
else:
left = CategoricalNode(left_node_id, parent_id, feature, value)
right = CategoricalNode(right_child_id, parent_id, feature, list(value), others=True)
self.add_node(left)
self.add_node(right)
def get_filtered_df(self, node_id, df=None):
df = self.df if df is None else df
while node_id > 0:
node = self.get_node(node_id)
df = node.apply_filter(df)
node_id = node.parent_id
return df
def get_stats(self, i, col, nr_bins, enforced_bins=None): #TODO
filtered_df = self.get_filtered_df(i)
column = filtered_df[col]
target_column = filtered_df[self.target]
if col in self.num_features:
if column.empty:
bins = column
else:
if col not in self.bin_edges or len(self.bin_edges[col]) != nr_bins + 1:
_, bin_edges = pd.cut(self.df[col], bins=min(nr_bins, self.df[col].nunique()),
retbins=True, include_lowest=True, right=False)
self.bin_edges[col] = bin_edges
bins = column if column.empty else pd.cut(column, bins=self.bin_edges[col], right=False)
return InteractiveTree.get_stats_numerical_node(bins, target_column)
if i == 0:
nr_bins = -1
return InteractiveTree.get_stats_categorical_node(column, target_column, nr_bins, enforced_bins)
@staticmethod
def get_stats_numerical_node(binned_column, target_column):
stats = {
"bin_edge": [],
"target_distrib": {ErrorAnalyzerConstants.WRONG_PREDICTION: [], ErrorAnalyzerConstants.CORRECT_PREDICTION: []},
"mid": [],
"count": []
}
if not binned_column.empty:
target_grouped = target_column.groupby(binned_column)
target_distrib = target_grouped.apply(lambda x: x.value_counts())
col_distrib = target_grouped.count()
for interval, count in col_distrib.items():
target_distrib_dict = target_distrib[interval].to_dict() if count > 0 else {}
stats["target_distrib"][ErrorAnalyzerConstants.WRONG_PREDICTION].append(target_distrib_dict.get(ErrorAnalyzerConstants.WRONG_PREDICTION, 0))
stats["target_distrib"][ErrorAnalyzerConstants.CORRECT_PREDICTION].append(target_distrib_dict.get(ErrorAnalyzerConstants.CORRECT_PREDICTION, 0))
stats["count"].append(count)
stats["mid"].append(interval.mid)
if len(stats["bin_edge"]) == 0:
stats["bin_edge"].append(interval.left)
stats["bin_edge"].append(interval.right)
return stats
@staticmethod
def get_stats_categorical_node(column, target_column, nr_bins, bins):
stats = {
"bin_value": [],
"target_distrib": {ErrorAnalyzerConstants.WRONG_PREDICTION: [], ErrorAnalyzerConstants.CORRECT_PREDICTION: []},
"count": []
}
if not column.empty:
if bins:
nr_bins = len(bins)
target_grouped = target_column.groupby(column.fillna("No values").apply(safe_str))
target_distrib = target_grouped.value_counts(dropna=False)
col_distrib = target_grouped.count().sort_values(ascending=False)
values = col_distrib.index if not bins else bins
for value in values:
target_distrib_dict = target_distrib[value].to_dict()
stats["target_distrib"][ErrorAnalyzerConstants.WRONG_PREDICTION].append(target_distrib_dict.get(ErrorAnalyzerConstants.WRONG_PREDICTION, 0))
stats["target_distrib"][ErrorAnalyzerConstants.CORRECT_PREDICTION].append(target_distrib_dict.get(ErrorAnalyzerConstants.CORRECT_PREDICTION, 0))
stats["count"].append(col_distrib[value])
stats["bin_value"].append(value)
if len(stats["bin_value"]) == nr_bins:
return stats
return stats
|
nilq/baby-python
|
python
|
from io import StringIO
from differently.cli import entry
def test__text_vs_text() -> None:
writer = StringIO()
assert entry(["examples/1.md", "examples/2.md"], writer) == 0
assert (
writer.getvalue()
== """# "differently" example file = # "differently" example file
=
To run this example, install `differently` then run: = To run this example, install `differently` then run:
=
```bash = ```bash
differently 1.md 2.md = differently 1.md 2.md
``` = ```
=
This line says "foo" in 1.md. ~ This line says "bar" in 2.md.
=
Now, a deletion: = Now, a deletion:
x
Hello from 1.md. x
=
The line above should appear in 1.md but deleted in = The line above should appear in 1.md but deleted in
the diff because it's not in 2.md. = the diff because it's not in 2.md.
=
And finally, this next line doesn't exist in 1.md but = And finally, this next line doesn't exist in 1.md but
should be added in the diff because it's in 2.md: = should be added in the diff because it's in 2.md:
>
> Hello from 2.md.
"""
)
def test__json_vs_yaml_as_json() -> None:
writer = StringIO()
assert (
entry(
[
"examples/1.json",
"examples/2.yml",
"--in-format",
"json,yaml",
"--out-format",
"json",
],
writer,
)
== 0
)
assert (
writer.getvalue()
== """{ = {
"array_of_dictionaries": [ = "array_of_dictionaries": [
{ = {
"name": "Bobby Pringles", ~ "name": "Bobby Salami",
"occupation": "Fire Starter" ~ "occupation": "Fire Fighter"
}, = },
{ = {
"name": "Susan Cheddar", = "name": "Susan Cheddar",
"occupation": "Transporter Chief" = "occupation": "Transporter Chief"
}, = },
{ = {
"name": "Jade Rat", = "name": "Jade Rat",
"occupation": "Lightning Conductor" ~ "occupation": "Lightning Chaser"
} = }
], = ],
"array_of_strings": [ = "array_of_strings": [
"This is the first line.", = "This is the first line.",
"This is the second line.", = "This is the second line.",
> "This is the second-point-five line.",
"This is the third line." = "This is the third line."
], = ],
"dictionary": { = "dictionary": {
> "flavour": "Cheese and Onion",
"greeting": "Hello", = "greeting": "Hello",
"sound": "Fire Truck", x
"username": "operator" ~ "username": "root"
} = }
} = }
"""
)
def test_multiple_in_no_out() -> None:
writer = StringIO()
assert entry(["--in-format", "json,yaml"], writer) == 1
assert (
writer.getvalue()
== 'You must include "--out-format" when you specify multiple values for "--in-format".\n'
)
def test_version() -> None:
writer = StringIO()
assert entry(["--version"], writer) == 0
assert writer.getvalue() == "-1.-1.-1\n"
|
nilq/baby-python
|
python
|
# .--. .-'. .--. .--. .--. .--. .`-. .%
#:::::.\::::::::.\::::::::.\::::::::.\::::::::.\::::::::.\::::::::.\::::::%
#' `--' `.-' `--' `--' `--' `-.' `--' %
# Information %
#' .--. .'-. .--. .--. .--. .-'. .--. %
#:::::'/::::::::'/::::::::'/::::::::'/::::::::'/::::::::'/::::::::'/::::::%
# `--' `-.' `--' `--' `--' `--' `.-' `%
#File type: Nexus Project Python Function File
#File name: robotCommunication (robotCommunication_BBB.py)
#Description: Robot communication file for the Pi. Talks to the BBB connected to the Khan chassis and BBB Khan cape.
#Inputs/Resources: serial
#Output/Created files: N/A
#Written by: Keith Tiemann
#Created: 1/3/2015
#Last modified: 1/3/2016
#Version: 1.0.0
#Example usage: N/A
#Notes: N/A
#=========================================================================%
# Imports %
#=========================================================================%
import serial
import time
#=========================================================================%
# Functions %
#=========================================================================%
def setupPins():
port = serial.Serial("/dev/ttyAMA0", baudrate=9600, timeout=None)
port.close()
port.open()
def cleanupPins():
port.close()
def receiveCode():
string = port.read()
time.sleep(0.1)
remaining_bytes = port.inWaiting()
string += port.read(remaining_bytes)
return string
def sendCode(string):
if port.isOpen():
port.write(string)
time.sleep(0.1)
# .--. .-'. .--. .--. .--. .--. .`-. .%
#:::::.\::::::::.\::::::::.\::::::::.\::::::::.\::::::::.\::::::::.\::::::%
#' `--' `.-' `--' `--' `--' `-.' `--' %
# End %
#' .--. .'-. .--. .--. .--. .-'. .--. %
#:::::'/::::::::'/::::::::'/::::::::'/::::::::'/::::::::'/::::::::'/::::::%
# `--' `-.' `--' `--' `--' `--' `.-' `%
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 3 11:51:03 2018
@author: robertcarson
"""
import numpy as np
a = 3.0 * np.ones((5, 2))
a[:, 0] = 1.0
print(a)
a[a < 3.0] = 4.0
print(a)
'''
Let's do a rotation example next using Bunge angles and then a simple passive rotation of our
coordinate system. The difference between a passive and active rotation can pretty much come
down to whether we want to rotate our coordinate system or simply the body itself. If we
are rotating the body then it's an active rotation. If we are rotating the coordinate
system it's a passive rotation. Also the active and passive rotation matrices by a simple
transpose operation on the rotation matrix.
We're going to be going row by row here so it makes since to keep the standard
row memory stride setup
'''
bunge = np.ones((3, 4))
s1 = np.sin(bunge[0, :])
c1 = np.cos(bunge[0, :])
s2 = np.sin(bunge[1, :])
c2 = np.cos(bunge[1, :])
s3 = np.sin(bunge[2, :])
c3 = np.cos(bunge[2, :])
nelems = bunge.shape[1]
#We're going to make this a column memory stride setup since we'll be using the
#first two dimensions the most often.
rmat = np.zeros((3, 3, nelems), order='F')
'''
We could also do this using iterators like the above. However, we would be taking
a hit due to the fact that we aren't striding over memory instead of operating on
consecutive memory.
Also, if we'd wanted to we could have also have just calculated the necessary sines and
cosines in this loop instead of doing it all at once like we did above.
However, if we'd done that then we'd would want to change the bunge array so that it was
using column strides for its memory layout.
'''
for i in range(nelems):
rmat[0, 0, i] = c1[i] * c3[i] - s1[i] * s3[i] * c2[i]
rmat[0, 1, i] = -c1[i] * s3[i] - s1[i] * c2[i] * c3[i]
rmat[0, 2, i] = s1[i] * s2[i]
rmat[1, 0, i] = s1[i] * c3[i] + c1[i] * c2[i] * s3[i]
rmat[1, 1, i] = -s1[i] * s3[i] + c1[i] * c2[i] * c3[i]
rmat[1, 2, i] = -c1[i] * s2[i]
rmat[2, 0, i] = s2[i] * s3[i]
rmat[2, 1, i] = s2[i] * c3[i]
rmat[2, 2, i] = c2[i]
print(rmat[:, :, 0])
eye2d = np.eye(3)
mat_rot = np.zeros((3, 3, nelems), order='F')
crd_sys_rot = np.zeros((3, 3, nelems), order='F')
for i in range(nelems):
mat_rot[:,:,i] = rmat[:,:,i].dot(eye2d.dot(rmat[:,:,i]).T)
#Since we are just multiplying my identity here our
#coordinate system is just equal to our Rotation matrix
crd_sys_rot[:,:,i] = rmat[:,:,i].dot(eye2d)
print(crd_sys_rot[:,:,0])
print(mat_rot[:,:,0])
|
nilq/baby-python
|
python
|
from platon_env.base.host import Host
# host = Host('10.10.8.209', 'juzhen', 'Juzhen123!')
from platon_env.utils.md5 import md5
# host = Host('192.168.16.121', 'juzix', password='123456')
host = Host('192.168.21.42', 'shing', password='aa123456')
base_dir = '/home/shing'
def test_pid():
pid = host.pid('cpu')
assert type(pid) is str
def test_ssh():
# result = host.ssh('ls')
# assert type(result) is str
host.ssh('mkdir tests')
dir_list = host.ssh('ls')
assert 'tests' in dir_list
def test_is_exist():
assert host.file_exist(base_dir)
assert host.file_exist(base_dir + "/hello") is False
def test_put_via_tmp():
platon_bin = 'file/platon'
tmp_file = host.fast_put(platon_bin)
tem_dir, md5_value = tmp_file.split('/')[0], tmp_file.split('/')[1]
assert tem_dir == host.tmp_dir and md5_value == md5(platon_bin)
result = host.fast_put('file/platon', 'platon_evn/platon')
assert result is None
def test_save_to_file():
result = host.write_file('hello world', '/home/juzix/test.txt')
assert result is None
def test_add_to_platon():
pass
def test_add_to_alaya():
pass
def test_add_to_private_chain():
pass
|
nilq/baby-python
|
python
|
import numpy as np
import gym
from gym import spaces
import math
import cv2
import random
import time
import pybullet
import pybullet_data
from src.mini_cheetah_class import Mini_Cheetah
from src.dynamics_randomization import DynamicsRandomizer
class Terrain():
def __init__(self,render = True,on_rack = False, terrain_type = 'plane'):
self._is_render = render
self._on_rack = on_rack
if self._is_render:
pybullet.connect(pybullet.GUI)
else:
pybullet.connect(pybullet.DIRECT)
#Robot Positions
self._robot_init_pos =[0,0,0.4]
self._robot_init_ori = [0, 0, 0, 1]
#Simulation Parameters
self.dt = 0.005
self._frame_skip = 25
pybullet.resetSimulation()
pybullet.setPhysicsEngineParameter(numSolverIterations=int(300))
pybullet.setTimeStep(self.dt/self._frame_skip)
pybullet.setGravity(0, 0, -9.8)
# Load Terrain
if(terrain_type == 'plane' or terrain_type == 'stairs'):
self.plane = pybullet.loadURDF("%s/plane.urdf" % pybullet_data.getDataPath())
pybullet.changeVisualShape(self.plane,-1,rgbaColor=[1,1,1,0.9])
if(terrain_type=='stairs'):
boxHalfLength = 0.15
boxHalfWidth = 1
boxHalfHeight = 0.05
sh_colBox = pybullet.createCollisionShape(pybullet.GEOM_BOX,halfExtents=[boxHalfLength,boxHalfWidth,boxHalfHeight])
boxOrigin = 1
n_steps = 15
self.stairs = []
for i in range(n_steps):
step =pybullet.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,basePosition = [boxOrigin + i*2*boxHalfLength,0,boxHalfHeight + i*2*boxHalfHeight],baseOrientation=[0.0,0.0,0.0,1])
self.stairs.append(step)
pybullet.changeDynamics(step, -1, lateralFriction=0.8)
elif(terrain_type == 'distorted'):
numHeightfieldRows = 256
numHeightfieldColumns = 256
heightPerturbationRange = 0.06
heightfieldData = [0]*numHeightfieldRows*numHeightfieldColumns
for j in range (int(numHeightfieldColumns/2)):
for i in range (int(numHeightfieldRows/2) ):
height = random.uniform(0,heightPerturbationRange)
heightfieldData[2*i+2*j*numHeightfieldRows]=height
heightfieldData[2*i+1+2*j*numHeightfieldRows]=height
heightfieldData[2*i+(2*j+1)*numHeightfieldRows]=height
heightfieldData[2*i+1+(2*j+1)*numHeightfieldRows]=height
terrainShape = pybullet.createCollisionShape(shapeType = pybullet.GEOM_HEIGHTFIELD, meshScale=[.05,.05,1], heightfieldTextureScaling=(numHeightfieldRows-1)/2, heightfieldData=heightfieldData, numHeightfieldRows=numHeightfieldRows, numHeightfieldColumns=numHeightfieldColumns)
self.plane = pybullet.createMultiBody(0, terrainShape)
#Load Robot
self.robot = Mini_Cheetah(pybullet)
self.DynaRandom = DynamicsRandomizer(pybullet,self.robot)
#Set Camera
self._cam_dist = 1.0
self._cam_yaw = 0.0
self._cam_pitch = 0.0
pybullet.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0])
if self._on_rack:
self.robot._set_on_rack()
def _simulate(self,torques):
for _ in range(self._frame_skip):
self.robot._apply_motor_torques(torques)
pybullet.stepSimulation()
def _reset_world(self):
# reset the robot
self.robot._reset_base()
self.robot._reset_legs()
# reset any disturbances in the terrain also (eg. obstacles)
pass
def _get_observation(self):
FPV_image = self._get_FPV_image()
_,base_orientation = self.robot._get_base_pose()
motor_angles, motor_velocities = self.robot._get_motor_states()
# flatten the observation and return accordingly
return FPV_image
def _get_FPV_image(self):
#FPV Camera Properties
width = 128
height = 128
fov = 60
aspect = width / height
near = 0.02
far = 20
#View camera transformatios
pos,ori = self.robot._get_base_pose()
ori = -1*np.array(ori)
camera_point, _ = pybullet.multiplyTransforms(pos, ori, [0.2+near,0,0], [0,0,0,1])
target_point, _ = pybullet.multiplyTransforms(pos, ori, [0.2+far,0,0], [0,0,0,1])
up_vector, _ = pybullet.multiplyTransforms(pos, ori, [0,0,1], [0,0,0,1])
view_matrix = pybullet.computeViewMatrix(camera_point, target_point, up_vector)
projection_matrix = pybullet.computeProjectionMatrixFOV(fov, aspect, near, far)
# Get depth values using the OpenGL renderer
images = pybullet.getCameraImage(width,
height,
view_matrix,
projection_matrix,
shadow=True,
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
#rgb and depth components
rgb_opengl = np.reshape(images[2], (height, width, 4))
depth_buffer_opengl = np.reshape(images[3], [width, height])
depth_opengl = far * near / (far - (far - near) * depth_buffer_opengl)
seg_opengl = np.reshape(images[4], [width, height]) * 1. / 255.
# converting to openCV colour space
rgb_image = cv2.cvtColor(rgb_opengl, cv2.COLOR_BGR2RGB)
return rgb_image
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
import sys
import re
import os
if len(sys.argv) < 2 or not re.match(r"\d{4}-\d\d-\d\d", sys.argv[1]):
print "Usage: git daylog 2013-01-01 ..."
sys.exit(1)
day = sys.argv[1]
after = "--after=%s 00:00" % day
before = "--before=%s 23:59" % day
os.execlp("git", "git", "log", after, before, *sys.argv[2:])
|
nilq/baby-python
|
python
|
"""Extractor for hpfanficarchive.com."""
from fanfic_scraper.base_fanfic import BaseFanfic, BaseChapter
from urllib.parse import urlparse, urljoin, parse_qs
from bs4 import BeautifulSoup, Comment
from collections import defaultdict
import re
import os
from datetime import datetime
def chapter_nav(tag):
test = (tag.name == 'select')
test = (test and 'chap_select' in tag['id'])
return test
class HPFanficArchive(BaseFanfic):
def get_fanfic_title(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
for div in soup.find_all('div', {'id': 'pagetitle'}):
ch_regex = re.compile(r'^viewstory.php\?sid=')
title = div.find_all('a', href=ch_regex)[0]
title = title.get_text()
break
return title
def get_story_url(self, storyid):
base_url = 'http://www.hpfanficarchive.com/stories/viewstory.php?sid='
return base_url + storyid
def extract_chapters(self):
"""Extract chapters function (backbone)."""
fanfic_name = self.name
url = self.url
urlscheme = urlparse(url)
# Set story_id from url
self.fanfic_id = parse_qs(urlscheme.query,
keep_blank_values=True)['sid'][0]
# Get chapters
r = self.send_request(url)
soup = BeautifulSoup(r.text, 'html5lib')
self.title = self.get_fanfic_title(r)
chapters = defaultdict(HPFanficArchiveChapter)
try:
ch_regex = re.compile(r'^viewstory.php\?sid=')
chapter_list = soup.find_all('a', href=ch_regex)
for link in chapter_list:
chapter = link.get('href')
if 'chapter' in chapter:
chapter_link = urljoin(
urlscheme.scheme + "://" + urlscheme.netloc,
'stories/' + str(chapter))
ch_qs = parse_qs(urlparse(chapter_link).query)
chapter_num = ch_qs['chapter'][0]
chapter_num = int(chapter_num)
chapters[chapter_num] = HPFanficArchiveChapter(
self, chapter_num, chapter_link)
return chapters
except:
return chapters
def get_update_date(self):
r = self.send_request(self.url)
soup = BeautifulSoup(r.text, 'lxml')
for c in soup.find_all(text=lambda text: isinstance(text, Comment)):
if c in [' UPDATED START ']:
update_date = c.next_element.strip()
update_date = datetime.strptime(update_date, '%B %d, %Y')
break
return update_date
class HPFanficArchiveChapter(BaseChapter):
"""Base chapter class."""
def get_fanfic_title(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
regex = re.compile(r'^viewstory.php\?sid=')
for div in soup.find_all('div', {'id': 'pagetitle'}):
title = div.find_all('a', href=regex)[0]
title = title.get_text()
break
return title
def get_fanfic_author(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
regex = re.compile(r'^viewuser.php\?uid=')
for div in soup.find_all('div', {'id': 'pagetitle'}):
author = div.find_all('a', href=regex)[0]
author = author.get_text()
break
return author
def get_fanfic_category(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
category = ''
regex = re.compile(r'^browse.php\?type=categories')
desc = soup.find_all('div', {'class': 'content'})[2]
cat = desc.find_all('a', href=regex)
cat2 = []
for a in cat:
cat2.append(a.get_text())
s = ', '
category = s.join(cat2)
return category
def get_fanfic_genre(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
category = ''
regex = re.compile(r'type_id=1')
desc = soup.find_all('div', {'class': 'content'})[2]
cat = desc.find_all('a', href=regex)
cat2 = []
for a in cat:
cat2.append(a.get_text())
s = ', '
category = s.join(cat2)
return category
def get_fanfic_description(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
desc = soup.find_all('div', {'class': 'content'})[2]
para = desc.find_all('p')
temp = []
for p in para:
temp.append(p.get_text())
desc = "".join(temp)
return desc
def get_update_date(self, r):
soup = BeautifulSoup(r.text, 'lxml')
for c in soup.find_all(text=lambda text: isinstance(text, Comment)):
if c in [' UPDATED START ']:
update_date = c.next_element.strip()
break
return update_date
def get_publish_date(self, r):
soup = BeautifulSoup(r.text, 'lxml')
for c in soup.find_all(text=lambda text: isinstance(text, Comment)):
if c in [' PUBLISHED START ']:
publish_date = c.next_element.strip()
break
return publish_date
def get_chapter_title(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
chapters = soup.find_all('select', {'name': 'chapter'})[0]
chapter_list = chapters.find_all('option')
for option in chapter_list:
if int(option.get('value')) == self.chapter_num:
chapter_title = option.get_text()
break
return chapter_title
def get_chapter_count(self, r):
"""Extract chapters function (backbone)."""
soup = BeautifulSoup(r.text, 'html5lib')
chapters = 0
try:
ch_regex = re.compile(r'^viewstory.php\?sid=')
chapter_list = soup.find_all('a', href=ch_regex)
for link in chapter_list:
chapter = link.get('href')
if 'chapter' in chapter:
chapters = chapters + 1
return chapters
except:
return chapters
def get_chapter_html(self, r):
soup = BeautifulSoup(r.text, 'html5lib')
story = soup.find_all('div', {'id': 'story'})[0]
return str(story)
def render_p(self, value):
return '<p>' + value + '</p>'
def story_info(self):
r = self.send_request(self.fanfic_url)
title = self.get_fanfic_title(r)
author = self.get_fanfic_author(r)
category = self.get_fanfic_category(r)
genre = self.get_fanfic_genre(r)
desc = self.get_fanfic_description(r)
update_date = self.get_update_date(r)
publish_date = self.get_publish_date(r)
chapter_count = self.get_chapter_count(r)
info = {}
info['StoryId'] = self.fanfic_id
info['Title'] = title
info['Author'] = author
info['Description'] = desc
info['Publish_Date'] = publish_date
info['Update_Date'] = update_date
info['Count'] = chapter_count
return info
def download_chapter(self):
filename = self.fanfic_name + '-%03d.htm' % (self.chapter_num)
print(self.chapter_url)
r = self.send_request(self.fanfic_url)
title = self.get_fanfic_title(r)
author = self.get_fanfic_author(r)
category = self.get_fanfic_category(r)
genre = self.get_fanfic_genre(r)
desc = self.get_fanfic_description(r)
update_date = self.get_update_date(r)
publish_date = self.get_publish_date(r)
chapter_count = self.get_chapter_count(r)
r = self.send_request(self.chapter_url)
chapter_title = self.get_chapter_title(r)
story = self.get_chapter_html(r)
# print(title)
# print(author)
# print('Categories: '+category)
# print('Genres: '+genre)
# print("Summary: ", textwrap.fill(desc))
# print('Chapter '+chapter_title)
# print('Published: '+publish_date)
# print('Updated: '+update_date)
# print(chapter_count)
# print(story)
target = os.path.join(self.fanfic_download_location, filename)
if os.path.isfile(target):
os.remove(target)
f1 = open(target, "w")
f1.write('<html>')
f1.write('<body>')
f1.write(self.render_p(title))
f1.write(self.render_p(author))
f1.write(self.render_p('Categories: ' + category))
f1.write(self.render_p('Summary: ' + desc))
f1.write(self.render_p('Chapter ' + chapter_title))
if self.chapter_num == 1:
f1.write(self.render_p('Published: ' + publish_date))
if self.chapter_num == chapter_count:
f1.write(self.render_p('Updated: ' + update_date))
f1.write(self.render_p('========='))
f1.write(story)
f1.flush()
os.fsync(f1.fileno())
f1.close
|
nilq/baby-python
|
python
|
# Filename: ZerkGameState.py
# Author: Greg M. Krsak
# License: MIT
# Contact: greg.krsak@gmail.com
#
# Zerk is an Interactive Fiction (IF) style interpreter, inspired by Infocom's
# Zork series. Zerk allows the use of custom maps, which are JSON-formatted.
#
# This file contains game state constants, which are implemented as if they
# were a global, C-style enum.
#
Starting = 1
Started = 2
Playing = 3
FinishedWon = 4
FinishedLost = 5
Quitting = 6
Quit = 7
|
nilq/baby-python
|
python
|
from ws import ws
import unittest
import json
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
ws.app.config['TESTING'] = True
self.app = ws.app.test_client()
def test_hello(self):
response = self.app.get('/')
self.assertEquals(200, response.status_code)
def test_create_project(self):
pass
# req_data = dict({'project_name': 'test_project_1'})
# req_data['sources'] = [{"type": "cdr", "url": "http://...", "index_name": "name of the index",
# "elastic_search_doctype": "the type in elastic search", "elastic_search_query": {},
# "start_date": "date-in-iso-format-at-any-resolution",
# "end_date": "date-in-iso-format-at-any-resolution"}]
# response = self.app.post('/projects', data=json.dumps(req_data))
# print 'create'
# print response
def test_add_tag_entity(self):
req_data = dict({'project_name': 'dig3-ht'})
req_data['sources'] = [{"type": "cdr", "url": "http://...", "index_name": "name of the index",
"elastic_search_doctype": "the type in elastic search", "elastic_search_query": {},
"start_date": "date-in-iso-format-at-any-resolution",
"end_date": "date-in-iso-format-at-any-resolution"}]
print json.dumps(req_data)
response = self.app.post('/projects', data=json.dumps(req_data))
req_data = dict()
req_data['human_annotation'] = 0
req_data['tags'] = 'test-tag'
response = self.app.post("/projects/dig3-ht/entities/092F55350A6125D8550D7652F867EBB9EB027C8EADA2CC1BAC0BEB1F48FE6D2B/tags", data=json.dumps(req_data))
req_data['human_annotation'] = 1
print json.dumps(req_data)
response = self.app.post(
"/projects/dig3-ht/entities/CAFAE7C3F6B4A45A6ADB342A8C09051E34DDE45D4ECD7A9620BDFFCE55702C58/tags",
data=json.dumps(req_data))
print response
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from .base import *
from birdway import Type, ArgumentModifier, Composite
from .string_literal import StringLiteral
class Parameter(SyntaxNodeABC, PrettyAutoRepr, Identified):
def __init__(self):
self.type = Type.UNKNOWN
self.modifier = ArgumentModifier.NONE
self.name = str()
self.description = str()
@classmethod
def _parse(cls, parser):
parameter = cls()
if parser.peek(0) == UnaryOperator(operator=Unary.ISDEF):
parser.eat()
parameter.modifier = ArgumentModifier.OPTIONAL
elif parser.peek(0) == UnaryOperator(operator=Unary.ISNTDEF):
raise BirdwaySyntaxError(
"The unique modifier ‘!’ can't be used on parameters"
)
elif parser.peek(0) == BinaryOperator(operator=Binary.MULTIPLICATION):
parser.eat()
parameter.modifier = ArgumentModifier.MULTIPLE
match parser.peek(0):
case TypeName(type=t):
parser.eat()
parameter.type = t
case other:
raise BirdwaySyntaxError(
f"""expected type{
' or modifier' if parameter.modifier == ArgumentModifier.NONE else ''
}, got {other} at line {other._line}"""
)
match parser.peek(0):
case Identifier(name=ident):
parser.eat()
parameter.name = ident
case other:
raise BirdwaySyntaxError(
f"expected identifier, got {other} at line {other._line}"
)
if parser.peek(0) == FormattedStringDelimiter():
parser.eat()
parameter.description = parser.parse_formatted_string()
elif parser.peek(0) == StringDelimiter():
parser.eat()
parameter.description = StringLiteral._parse(parser)
return parameter
def _initialise(self):
if self.modifier == ArgumentModifier.OPTIONAL:
T = Composite.Nullable(self.type)
init = "= NULL"
else:
raise NotImplementedError()
return f"{ctype(T)} {self.id} {init};\n"
|
nilq/baby-python
|
python
|
"""Subjects interface
Access to the subjects endpoint.
The user is not expected to use this class directly. It is an attribute of the
:class:`Archivist` class.
For example instantiate an Archivist instance and execute the methods of the class:
.. code-block:: python
with open(".auth_token", mode="r") as tokenfile:
authtoken = tokenfile.read().strip()
# Initialize connection to Archivist
arch = Archivist(
"https://rkvst.poc.jitsuin.io",
auth=authtoken,
)
asset = arch.subjects.create(...)
"""
from .constants import (
SUBJECTS_SUBPATH,
SUBJECTS_LABEL,
)
DEFAULT_PAGE_SIZE = 500
class _SubjectsClient:
"""SubjectsClient
Access to subjects entitiies using CRUD interface. This class is usually
accessed as an attribute of the Archivist class.
Args:
archivist (Archivist): :class:`Archivist` instance
"""
def __init__(self, archivist):
self._archivist = archivist
def create(self, display_name, wallet_pub_keys, tessera_pub_keys):
"""Create subject
Creates subject with defined attributes.
Args:
display_name (str): dispaly name of subject.
wallet_pub_keys (list): wallet public keys
tessera_pub_keys (list): tessera public keys
Returns:
:class:`Subject` instance
"""
return self.create_from_data(
self.__query(
display_name=display_name,
wallet_pub_keys=wallet_pub_keys,
tessera_pub_keys=tessera_pub_keys,
),
)
def create_from_data(self, data):
"""Create subject
Creates subject with request body from data stream.
Suitable for reading data from a file using json.load or yaml.load
Args:
data (dict): request body of subject.
Returns:
:class:`Subject` instance
"""
return Subject(
**self._archivist.post(
f"{SUBJECTS_SUBPATH}/{SUBJECTS_LABEL}",
data,
)
)
def read(self, identity):
"""Read Subject
Reads subject.
Args:
identity (str): subjects identity e.g. subjects/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`Subject` instance
"""
return Subject(
**self._archivist.get(
SUBJECTS_SUBPATH,
identity,
)
)
def update(
self,
identity,
*,
display_name=None,
wallet_pub_keys=None,
tessera_pub_keys=None,
):
"""Update Subject
Update subject.
Args:
identity (str): subjects identity e.g. subjects/xxxxxxxxxxxxxxxxxxxxxxx
display_name (str): display name of subject.
wallet_pub_keys (list): wallet public keys
tessera_pub_keys (list): tessera public keys
Returns:
:class:`Subject` instance
"""
return Subject(
**self._archivist.patch(
SUBJECTS_SUBPATH,
identity,
self.__query(
display_name=display_name,
wallet_pub_keys=wallet_pub_keys,
tessera_pub_keys=tessera_pub_keys,
),
)
)
def delete(self, identity):
"""Delete Subject
Deletes subject.
Args:
identity (str): subjects identity e.g. subjects/xxxxxxxxxxxxxxxxxxxxxxx
Returns:
:class:`Subject` instance - empty?
"""
return self._archivist.delete(SUBJECTS_SUBPATH, identity)
@staticmethod
def __query(*, display_name=None, wallet_pub_keys=None, tessera_pub_keys=None):
query = {}
if display_name is not None:
query["display_name"] = display_name
if wallet_pub_keys is not None:
query["wallet_pub_key"] = wallet_pub_keys
if tessera_pub_keys is not None:
query["tessera_pub_key"] = tessera_pub_keys
return query
def count(self, *, display_name=None):
"""Count subjects.
Counts number of subjects that match criteria.
Args:
display_name (str): display name (optional0
Returns:
integer count of subjects.
"""
return self._archivist.count(
f"{SUBJECTS_SUBPATH}/{SUBJECTS_LABEL}",
query=self.__query(display_name=display_name),
)
def list(
self,
*,
page_size=DEFAULT_PAGE_SIZE,
display_name=None,
):
"""List subjects.
List subjects that match criteria.
TODO: filtering on display_name does not currently work
Args:
display_name (str): display name (optional)
page_size (int): optional page size. (Rarely used).
Returns:
iterable that returns :class:`Subject` instances
"""
return (
Subject(**a)
for a in self._archivist.list(
f"{SUBJECTS_SUBPATH}/{SUBJECTS_LABEL}",
SUBJECTS_LABEL,
page_size=page_size,
query=self.__query(display_name=display_name),
)
)
class Subject(dict):
"""Subject object"""
|
nilq/baby-python
|
python
|
import os
import requests
import json
import hikari
import lightbulb
from dotenv import load_dotenv, find_dotenv
from datetime import datetime
from geopy.geocoders import Nominatim
weather_plugin = lightbulb.Plugin("Weather")
class Weather:
"""Weather class that interacts with OpenWeatherMap API
for weather information
"""
def __init__(self):
load_dotenv(dotenv_path=find_dotenv(usecwd=True))
self._weather_token = os.environ.get('WEATHER_TOKEN')
self.name = os.environ.get('BOT_NAME')
self.location = os.environ.get('DEFAULT_LOCATION')
def t_convert(self, t, time_format = "%m/%d %H:%M"):
"""Converting UNIX time to human readable time
Args:
t (int): UNIX timestamp
time_format (str, optional): Date format. Defaults to "%m/%d %H:%M".
Returns:
str: Human readable time
"""
return datetime.utcfromtimestamp(t).strftime(time_format)
def get_weather(self, location, exclude):
"""Get weather for a given location using OpenWeatherMap OneCall API
API reference: https://openweathermap.org/api/one-call-api
Args:
location (string): Target location (e.g. London, New York, Paris)
exclude (string): Fields to exclude from OneCall API response
Returns:
dict: OneCall API response dictionary
"""
self.endpoint = "https://api.openweathermap.org/data/2.5/onecall"
self.headers = {
"user-agent": self.name
}
self.geolocator = Nominatim(user_agent = self.name)
self.latitude = self.geolocator.geocode(location).latitude
self.longitude = self.geolocator.geocode(location).longitude
self.params = {
"lat" : self.latitude,
"lon" : self.longitude,
"exclude" : exclude,
"appid" : self._weather_token
}
self.response = requests.request("POST", self.endpoint, params = self.params, headers = self.headers)
self.data = json.loads(self.response.text)
return self.data
def get_city_name(self, location):
"""Generate location name in `{City}, {Country}` format.
For example: London, United Kingdom
Args:
location (str): Target location
Returns:
str: Location name in `{City}, {Country}`
"""
# Example geolocation value
# Location(London, Greater London, England, United Kingdom, (51.5073219, -0.1276474, 0.0))
self.geolocator = Nominatim(user_agent = self.name)
self.geolocation = self.geolocator.geocode(location, language = "en-us")
self.city = self.geolocation[0].split(", ")[0]
self.country = self.geolocation[0].split(", ")[-1]
return f"{self.city}, {self.country}"
def get_current(self, location):
"""Get current weather for a given location
Args:
location (str): Target location
Returns:
dict: dict with the current weather data
"""
self.exclude = "minutely,hourly,daily",
self.data = self.get_weather(location, self.exclude)
self.icon = self.data["current"]["weather"][0]["icon"]
self.icon_url = f"http://openweathermap.org/img/wn/{self.icon}@2x.png"
# Celsius = Kelvin - 273.15
self.current_temp = self.data["current"]["temp"] - 273.15
self.feels_like = self.data["current"]["feels_like"] - 273.15
self.current_data = {
"location" : self.get_city_name(location),
"current_temp" : self.current_temp,
"feels_like" : self.feels_like,
"icon_url" : self.icon_url,
}
return self.current_data
@weather_plugin.command
@lightbulb.option("location", "Location for current weather", str, required = False)
@lightbulb.command("current", "Get current weather")
@lightbulb.implements(lightbulb.PrefixCommand, lightbulb.SlashCommand)
async def current_weather(ctx: lightbulb.Context) -> None:
"""Get current weather command
`/current [location]`
"""
weather = Weather()
location = weather.location
if ctx.options.location:
location = ctx.options.location
current_data = weather.get_current(location)
icon_url = current_data["icon_url"]
temp = round(current_data["current_temp"])
feels_like = round(current_data["feels_like"])
location = current_data["location"]
embed = (
hikari.Embed(
title = f"Current weather in {location}",
timestamp = datetime.now().astimezone(),
)
.set_footer(text=f"Your weather was brought to you by {weather.name}.")
.set_thumbnail(icon_url)
.add_field(
"Temperature",
f"{temp}°C",
inline = True,
)
.add_field(
"Feels like",
f"{feels_like}°C",
inline = True,
)
)
await ctx.respond(embed)
def load(bot: lightbulb.BotApp) -> None:
bot.add_plugin(weather_plugin)
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url('^$', views.home, name='welcome'),
url(r'^category/$', views.search_image, name='search_image'),
url(r'^location/(\d+)$', views.filter_by_location, name='location'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from music.models import Music
# Create your views here.
def index(request):
musiclist=Music.objects.all()
context={'music':musiclist}
return render(request,'music/index.htm',context)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import logging
import os
import sys
from collections import defaultdict
import configargparse
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
import camsa
import camsa.utils.ragout.io as ragout_io
from camsa.utils.ragout.shared import filter_indels, filter_duplications
from camsa.utils.ragout.shared import filter_blocks_by_good_genomes, filter_blocks_by_bad_genomes, get_all_genomes_from_blocks
if __name__ == "__main__":
full_description = camsa.full_description_template.format(
names=camsa.CAMSA_AUTHORS,
affiliations=camsa.AFFILIATIONS,
dummy=" ",
tool="Computing coverage report for Ragout blocks.",
information="For more information refer to {docs}".format(docs=camsa.CAMSA_DOCS_URL),
contact=camsa.CONTACT)
full_description = "=" * 80 + "\n" + full_description + "=" * 80 + "\n"
parser = configargparse.ArgParser(description=full_description, formatter_class=configargparse.RawTextHelpFormatter,
default_config_files=[os.path.join(camsa.root_dir, "logging.ini")])
parser.add_argument("-c", "--config", is_config_file=True, help="Config file overwriting some of the default settings as well as any flag starting with \"--\".")
parser.add_argument("--version", action="version", version=camsa.VERSION)
parser.add_argument("ragout_coords", type=str, help="A path to ragout coords file")
parser.add_argument("--filter-indels", action="store_true", dest="filter_indels", default=False)
parser.add_argument("--no-fragment-stats", action="store_false", dest="fragment_stats", default=True)
parser.add_argument("--no-genome-stats", action="store_false", dest="genome_stats", default=True)
parser.add_argument("--filter-duplications", action="store_true", dest="filter_duplications", default=False)
parser.add_argument("--good-genomes", type=str, default="", help="A coma separated list of genome names, to be processed and conversed.\nDEFAULT: \"\" (i.e., all genomes are good)")
parser.add_argument("--bad-genomes", type=str, default="", help="A coma separated list of genome names, to be excluded from processing and conversion.\nDEFAULT: \"\" (i.e., no genomes are bad)")
parser.add_argument("-o", "--output", type=configargparse.FileType("wt"), default=sys.stdout)
parser.add_argument("--c-logging-level", dest="c_logging_level", default=logging.INFO, type=int,
choices=[logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL],
help="Logging level for the converter.\nDEFAULT: {info}".format(info=logging.INFO))
parser.add_argument("--c-logging-formatter-entry",
help="Format string for python logger.")
args = parser.parse_args()
start_time = datetime.datetime.now()
logger = logging.getLogger("CAMSA.utils.ragout_coords2fasta")
ch = logging.StreamHandler()
ch.setLevel(args.c_logging_level)
logger.setLevel(args.c_logging_level)
logger.addHandler(ch)
logger.info(full_description)
logger.info(parser.format_values())
ch.setFormatter(logging.Formatter(args.c_logging_formatter_entry))
logger.info("Starting the converting process")
sequences_by_ids, blocks_by_ids = ragout_io.read_from_file(path=args.ragout_coords, silent_fail=False, delimiter="\t")
all_genomes = get_all_genomes_from_blocks(blocks_as_ids=blocks_by_ids)
if args.good_genomes != "":
args.good_genomes = set(args.good_genomes.split(","))
filter_blocks_by_good_genomes(blocks_by_ids=blocks_by_ids, good_genomes=args.good_genomes)
if args.bad_genomes != "":
args.bad_genomes = set(args.bad_genomes.split(","))
filter_blocks_by_bad_genomes(blocks_by_ids=blocks_by_ids, bad_genomes=args.bad_genomes)
if args.filter_indels:
filter_indels(blocks_by_ids=blocks_by_ids, all_genomes_as_set=(all_genomes if len(args.good_genomes) == 0 else args.good_genomes) - args.bad_genomes)
if args.filter_duplications:
filter_duplications(blocks_by_ids=blocks_by_ids)
all_filtered_genomes = get_all_genomes_from_blocks(blocks_as_ids=blocks_by_ids)
genomes = defaultdict(lambda: defaultdict(list))
for block_list in blocks_by_ids.values():
for block in block_list:
genomes[block.parent_seq.genome_name][block.parent_seq.ragout_id].append(block)
fragment_cov = {}
if args.fragment_stats:
for genome_name in genomes.keys():
for seq_id in genomes[genome_name]:
seq = sequences_by_ids[seq_id]
cumulative_blocks_length = sum(block.length for block in genomes[genome_name][seq_id])
fragment_cov[seq_id] = cumulative_blocks_length * 100.0 / seq.length
genome_cov = {}
if args.genome_stats:
for genome_name in genomes.keys():
total_genome_length = 0
total_blocks_length = 0
for seq_id in genomes[genome_name]:
seq = sequences_by_ids[seq_id]
total_genome_length += seq.length
total_blocks_length += sum(block.length for block in genomes[genome_name][seq_id])
genome_cov[genome_name] = total_blocks_length * 100.0 / total_genome_length
if args.genome_stats:
print("-" * 80, file=args.output)
for genome_name in sorted(genomes.keys()):
print("For genome \"{genome_name}\" {cov:.2f}% of its length is covered by filtered blocks".format(genome_name=genome_name, cov=genome_cov[genome_name]), file=args.output)
if args.fragment_stats:
for genome_name in sorted(genomes.keys()):
print("-"*80, file=args.output)
print("Detailed coverage stats for fragments in genome \"{genome_name}\"".format(genome_name=genome_name), file=args.output)
for seq_id in sorted(genomes[genome_name].keys()):
print("For fragment \"{fragment_name}\" {cov:.2f}% of its length is covered by filtered blocks".format(fragment_name=sequences_by_ids[seq_id].seq_name, cov=fragment_cov[seq_id]))
logger.info("All done!")
end_time = datetime.datetime.now()
logger.info("Elapsed time: {el_time}".format(el_time=str(end_time - start_time)))
|
nilq/baby-python
|
python
|
from requests.adapters import HTTPAdapter
from nivacloud_logging.log_utils import LogContext, generate_trace_id
class TracingAdapter(HTTPAdapter):
"""
Subclass of HTTPAdapter that:
1. Adds Trace-Id if it exists in LogContext.
2. Adds Span-Id if it exists in LogContext or auto-generates it otherwise.
Sample usage:
session = requests.Session()
session.mount('http://', TracingAdapter())
session.mount('https://', TracingAdapter())
r = session.get("https://httpbin.org/headers")
print(f"Trace-ID is {r.json()['headers'].get('Trace-Id')}")
"""
def add_headers(self, request, **kwargs):
super().add_headers(request, **kwargs)
incoming_trace_id = LogContext.getcontext("trace_id")
if incoming_trace_id:
request.headers["Trace-Id"] = incoming_trace_id
incoming_user_id = LogContext.getcontext("user_id")
if incoming_user_id:
request.headers["User-Id"] = incoming_user_id
request.headers["Span-Id"] = (
LogContext.getcontext("span_id") or generate_trace_id()
)
|
nilq/baby-python
|
python
|
"""
Util functions for dictionary
"""
__copyright__ = '2013, Room77, Inc.'
__author__ = 'Yu-chi Kuo, Kyle Konrad <kyle@room77.com>'
from collections import MutableMapping, OrderedDict
def dict_key_filter(function, dictionary):
"""
Filter dictionary by its key.
Args:
function: takes key as argument and returns True if that item should be
included
dictionary: python dict to filter
"""
return {k: v for k, v in dictionary.items() if function(k)}
def dict_val_filter(function, dictionary):
"""
Filter dictionary by its value.
Args:
function: takes value as argument and returns True if that item should be
included
dictionary: python dict to filter
"""
return {k: v for k, v in dictionary.items() if function(v)}
def dict_filter(function, dictionary):
"""
Filter dictionary by its key and value.
Args:
function: takes k, v as argument and returns True if that item should be
included
dictionary: python dict to filter
"""
return {k: v for k, v in dictionary.items() if function(k, v)}
def dict_reverse(dictionary):
"""
Reverse a dictionary. If values are not unique, only one will be used. Which one is not specified
Args:
dictionary (dict): dict to reverse
Returns:
reversed (dict): reversed dictionary
"""
return {v: k for k, v in dictionary.items()}
class LRUDict(MutableMapping):
"""
A dictionary of limited size where items are evicted in LRU-order
inspired by http://stackoverflow.com/a/2438926
"""
def __init__(self, size, *args, **kwargs):
self.size = size
self.dict = OrderedDict(*args, **kwargs)
while len(self) > self.size:
self.dict.popitem(last=False)
def __iter__(self):
return iter(self.dict)
def __len__(self):
return len(self.dict)
def __getitem__(self, key):
return self.dict[key]
def __setitem__(self, key, value):
if key not in self and len(self) == self.size:
self.dict.popitem(last=False)
if key in self: # need to delete and reinsert to maintain order
del self[key]
self.dict[key] = value
def __delitem__(self, key):
del self.dict[key]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.6
# coding=utf-8
'''
This reader reads all psd vallex file,
and add possible cannonical vallex lemma
to the corresponding copying dictionary of a word and aliases of the word
@author: Jie Cao(jiessie.cao@gmail.com)
@since: 2019-06-28
'''
import xml.etree.ElementTree as ET
from utility.psd_utils.PSDGraph import *
import re
from utility.constants import *
import logging
logger = logging.getLogger("mrp.psd")
def add_concept(lemmas_to_concept,le,con):
if not le in lemmas_to_concept:
lemmas_to_concept[le]= [con]
else:
lemmas_to_concept[le].append(con)
sense_reg=r"(f\d+.*)"
class VallexReader:
def parse(self):
"""
parse all the psd vallex frames
"""
# for every word key, there is a set of fram, every frame is a sense
self.frames = dict()
self.word_ids = dict()
self.non_sense_frames = dict()
self.frame_all_args = dict()
self.frame_oblig_args = dict()
self.frame_lemmas = set()
self.joints = set()
self.joints_map = {}
# for psd, only one file extised for vallex lexicon
self.parse_file(self.frame_file_path)
def __init__(self, file_path=vallex_file_path):
self.frame_file_path = file_path
self.parse()
def parse_file(self,f):
tree = ET.parse(f)
root = tree.getroot()
for child in root:
if child.tag == "body":
# iterate every word
for wordChild in child:
self.add_lemma(wordChild)
@staticmethod
def extract_sense_with_wordid(word_id, frame_id):
"""
word id is the prefix of the frame_id
"""
if word_id in frame_id:
return frame_id.replace(word_id)
else:
logger.error("{} is not prefix of {}".format(word_id, frame_id))
# when cannot be splitted, we just use the frame_id
return frame_id
def extract_word_and_sense(frame_id):
"""
without using the lexicon, split by string match
"""
splits = re.split(sense_reg, frame_id)
word_id = splits[0]
sense = splits[1]
return word_id, sense
def extract_sense_with_lemma(self,lemma, frame_id):
"""
extract the lemma and sense, mot use the word_id, because it is not word
# we only support the connected lemma, replace space with "_"
"""
if lemma in self.word_ids:
word_id = self.word_ids[lemma]
sense = VallexReader.extract_sense_with_wordid(word_id, frame_id)
return sense
else:
logger.error("{} is not in our vallex lexicon, use whole frame_id as sense ={}".format(lemma, frame_id))
return frame_id
def get_frame_id(self, lemma, sense):
"""
given a lemma and sense, return the full frame id
"""
if lemma in self.word_ids and sense in self.frames[lemma]:
word_id = self.word_ids[lemma]
frame_id = word_id + sense
else:
# lemma is not in the dictionary
# try to find the most similar one
logger.error("{} is not vallex dict".format(lemma))
frame_id = "N/A"
return frame_id
def check_functor_in_oblig_args(self, frame_id, arg):
if frame_id in self.frame_oblig_args:
return arg in self.frame_oblig_args[frame_id]
else:
return False
def add_lemma(self,node):
"""
add cannonical amr lemma to possible set of words including for aliases of the words
adding the Frame into lemma mapping
"""
# heat_up is underscore for psd, 20088019
lemma = node.attrib["lemma"]
word_id = node.attrib["id"]
self.word_ids[lemma] = word_id
self.frame_lemmas.add(lemma)
self.frames.setdefault(lemma,[])
# frame id is attaching some suffix frame id after word_id, {word_id}
# we don't classify sense_id, just use a mapping here.
# POS can be ignored, most of them are V,
# 11 POS="A"
# 5 POS="M"
# 1 POS="N"
# 4337 POS="V"
splits = lemma.split("_")
if len(splits) > 1:
self.joints.add(" ".join(splits))
compounds = splits+["<MWE_END>"]
past = ""
for w in compounds:
self.joints_map.setdefault(past[:-1],[]).append(w)
past = past + w + "_"
# self.frames[lemma] = set()
for child in node:
if child.tag == "valency_frames":
for frame in child:
if frame.tag == "frame":
frame_id = frame.attrib["id"]
args = self.frame_oblig_args.setdefault(frame_id,[])
all_args = self.frame_all_args.setdefault(frame_id,[])
# we can use the whole thing as sense
x_word_id, sense = VallexReader.extract_word_and_sense(frame_id)
if x_word_id != word_id:
logger.error("{} != {}, extracted word_id from frameid is not equal to the original word_id".format(x_word_id, word_id))
add_concept(self.frames,lemma,sense)
for f_elements in frame:
if f_elements.tag == "frame_elements":
# get all of its fuctors
for elem in f_elements:
if elem.tag == "element":
functor = elem.attrib["functor"]
all_args.append(functor)
if "type" in elem.attrib and elem.attrib["type"] == "oblig":
args.append(functor)
elif elem.tag == "element_alternation":
# see w1255f4
for s_elem in elem:
if s_elem.tag == "element":
functor = s_elem.attrib["functor"]
all_args.append(functor)
if "type" in s_elem.attrib and s_elem.attrib["type"] == "oblig":
args.append(functor)
def get_frames(self):
return self.frames
g_vallex_reader = VallexReader()
def main():
with open(semi_folder_path+"/vallex_joint.txt", "w+") as fout:
for i in g_vallex_reader.joints:
fout.write("{}\n".format(i))
logger.info("len(self.frame_lemma)={}".format(len(f_r.frame_lemmas)))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
load("@bazel_gazelle//:deps.bzl", "go_repository")
def nogo_deps():
go_repository(
name = "com_github_gostaticanalysis_analysisutil",
importpath = "github.com/gostaticanalysis/analysisutil",
sum = "h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=",
version = "v0.7.1",
)
go_repository(
name = "com_github_gostaticanalysis_comment",
importpath = "github.com/gostaticanalysis/comment",
sum = "h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=",
version = "v1.4.2",
)
go_repository(
name = "com_github_timakin_bodyclose",
importpath = "github.com/timakin/bodyclose",
sum = "h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro=",
version = "v0.0.0-20210704033933-f49887972144",
)
|
nilq/baby-python
|
python
|
from ._download import download
def airline_tweets(directory: str):
"""
Downloads a modified version of the 'Twitter US Airlines Sentiment'
dataset, in the given directory
"""
download(directory, "airline_tweets.csv",
"https://drive.google.com/file/d"
"/1Lu4iQucxVBncxeyCj_wFKGkq8Wz0-cuL/view?usp=sharing")
|
nilq/baby-python
|
python
|
from typing import Iterable
from stock_indicators._cslib import CsIndicator
from stock_indicators._cstypes import List as CsList
from stock_indicators.indicators.common.candles import CandleResult, CandleResults
from stock_indicators.indicators.common.quote import Quote
def get_doji(quotes: Iterable[Quote], max_price_change_percent: float = 0.1):
"""Get Doji calculated.
(preview)
Doji is a single candlestick pattern where open and
close price are virtually identical, representing market indecision.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`max_price_change_percent` : float, defaults 0.1
Maximum absolute decimalized percent difference in open and close price.
Returns:
`CandleResults[CandleResult]`
CandleResults is list of CandleResult with providing useful helper methods.
See more:
- [Doji Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/Doji/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
results = CsIndicator.GetDoji[Quote](CsList(Quote, quotes), max_price_change_percent)
return CandleResults(results, CandleResult)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 12:07:46 2019
@author: jamiesom
"""
from electricitylci.model_config import replace_egrid, use_primaryfuel_for_coal
from electricitylci.elementaryflows import map_emissions_to_fedelemflows
import pandas as pd
import numpy as np
from electricitylci.globals import output_dir
from datetime import datetime
from electricitylci.dqi import lookup_score_with_bound_key
from scipy.stats import t, norm
import ast
import logging
module_logger = logging.getLogger("alt_generation.py")
def aggregate_facility_flows(df):
"""Thus function aggregates flows from the same source (NEI, netl, etc.) within
a facility. The main problem this solves is that if several emissions
are mapped to a single federal elementary flow (CO2 biotic, CO2 land use change,
etc.) then those were showing up as separate emissions in the inventory
and artificially inflating the number of emissions for uncertainty
calculations.
Parameters
----------
df : dataframe
dataframe with facility-level emissions that might contain duplicate
emission species within the facility.
Returns
-------
dataframe
"""
emission_compartments = [
"emission/air",
"emission/water",
"emission/ground",
"emission/soil",
"air",
"water",
"soil",
"ground",
"waste",
]
groupby_cols = [
"FuelCategory",
"FacilityID",
"Electricity",
"FlowName",
"Source",
"Compartment_path",
"stage_code"
]
emissions = df["Compartment"].isin(emission_compartments)
df_emissions = df[emissions]
df_nonemissions = df[~emissions]
df_dupes = df_emissions.duplicated(subset=groupby_cols, keep=False)
df_red = df_emissions.drop(df_emissions[df_dupes].index)
group_db = (
df_emissions.loc[df_dupes, :]
.groupby(groupby_cols, as_index=False)["FlowAmount"]
.sum()
)
# group_db=df.loc[emissions,:].groupby(groupby_cols,as_index=False)['FlowAmount'].sum()
group_db_merge = group_db.merge(
right=df_emissions.drop_duplicates(subset=groupby_cols),
on=groupby_cols,
how="left",
suffixes=("", "_right"),
)
try:
delete_cols = ["FlowAmount_right"]
group_db_merge.drop(columns=delete_cols, inplace=True)
except KeyError:
pass
df = pd.concat(
[df_nonemissions, df_red, group_db_merge], ignore_index=True
)
return df
def _combine_sources(p_series, df, cols, source_limit=None):
"""
Take the list of sources from a groupby.apply and return a dataframe
that contains one column containing a list of the sources and another
that concatenates them into a string. This is all in an effort to find
another approach for summing electricity for all plants in an aggregation
that match the same data sources.
Parameters
----------
df: dataframe
Dataframe containing merged generation and emissions data - includes
a column for data source (i.e., eGRID, NEI, RCRAInfo...)
Returns
----------
dataframe
"""
module_logger.debug(
f"Combining sources for {str(df.loc[p_series.index[0],cols].values)}"
)
source_list = list(np.unique(p_series))
if source_limit:
if len(source_list) > source_limit:
# result = pd.DataFrame()
# result=dict({"source_list":float("nan"),"source_string":float("nan")})
# result["source_list"]=float("nan")
# result["source_string"]=float("nan")
result = [float("nan"), float("nan")]
return result
else:
# result = pd.DataFrame()
source_list.sort()
source_list_string = "_".join(source_list)
# result=dict({"source_list":source_list,"source_string":source_list_string})
result = [source_list, source_list_string]
# result["source_list"] = pd.DataFrame(data=[source_list]).values.tolist()
# result["source_string"] = source_list_string
return result
else:
# result = pd.DataFrame()
source_list.sort()
source_list_string = "_".join(source_list)
# result = pd.DataFrame()
# result["source_list"] = pd.DataFrame(data=[source_list]).values.tolist()
# result["source_string"] = source_list_string
source_list.sort()
source_list_string = "_".join(source_list)
# result=dict({"source_list":source_list,"source_string":source_list_string})
result = [source_list, source_list_string]
return result
def add_data_collection_score(db, elec_df, subregion="BA"):
"""
Adds the data collection score which is a function of how much of the
total electricity generated in a subregion is captured by the denominator
used in the final emission factor.
Parameters
----------
db : datafrane
Dataframe containing facility-level emissions as generated by
create_generation_process_df.
elec_df : dataframe
Dataframe containing the totals for various subregion/source
combinations. These are used as the denominators in the emissions
factors
subregion : str, optional
The level of subregion that the data will be aggregated to. Choices
are 'all', 'NERC', 'BA', 'US', by default 'BA'
"""
from electricitylci.dqi import data_collection_lower_bound_to_dqi
from electricitylci.aggregation_selector import subregion_col
region_agg = subregion_col(subregion)
fuel_agg = ["FuelCategory"]
if region_agg:
groupby_cols = region_agg + fuel_agg + ["Year"]
else:
groupby_cols = fuel_agg + ["Year"]
temp_df = db.merge(
right=elec_df,
left_on=groupby_cols + ["source_string"],
right_on=groupby_cols + ["source_string"],
how="left",
)
reduced_db = db.drop_duplicates(subset=groupby_cols + ["eGRID_ID"])
region_elec = reduced_db.groupby(groupby_cols, as_index=False)[
"Electricity"
].sum()
region_elec.rename(
columns={"Electricity": "region_fuel_electricity"}, inplace=True
)
temp_df = temp_df.merge(
right=region_elec,
left_on=groupby_cols,
right_on=groupby_cols,
how="left",
)
db["Percent_of_Gen_in_EF_Denominator"] = (
temp_df["electricity_sum"] / temp_df["region_fuel_electricity"]
)
db["DataCollection"] = db["Percent_of_Gen_in_EF_Denominator"].apply(
lambda x: lookup_score_with_bound_key(
x, data_collection_lower_bound_to_dqi
)
)
db = db.drop(columns="Percent_of_Gen_in_EF_Denominator")
return db
def calculate_electricity_by_source(db, subregion="BA"):
"""
This function calculates the electricity totals by region and source
using the same approach as the original generation.py with attempts made to
speed it up. That is each flow will have a source associated with it
(eGRID, NEI, TRI, RCRAInfo). To develop an emission factor, the FlowAmount
will need to be divided by electricity generation. This routine sums all
electricity generation for all source/subregion combinations. So if
a subregion aggregates FlowAmounts source from NEI and TRI then the
denominator will be all production from plants that reported into NEI or
TRI for that subregion.
Parameters
----------
db : dataframe
Dataframe containing facility-level emissions as generated by
create_generation_process_df.
subregion : str, optional
The level of subregion that the data will be aggregated to. Choices
are 'all', 'NERC', 'BA', 'US', by default 'BA'
"""
from electricitylci.aggregation_selector import subregion_col
all_sources='_'.join(sorted(list(db["Source"].unique())))
power_plant_criteria=db["stage_code"]=="Power plant"
db_powerplant=db.loc[power_plant_criteria,:]
db_nonpower=db.loc[~power_plant_criteria,:]
region_agg = subregion_col(subregion)
fuel_agg = ["FuelCategory"]
if region_agg:
groupby_cols = (
region_agg
+ fuel_agg
+ ["Year", "stage_code", "FlowName", "Compartment"]
)
elec_groupby_cols = region_agg + fuel_agg + ["Year"]
else:
groupby_cols = fuel_agg + [
"Year",
"stage_code",
"FlowName",
"Compartment",
]
elec_groupby_cols = fuel_agg + ["Year"]
combine_source_by_flow = lambda x: _combine_sources(
x, db, ["FlowName", "Compartment"], 1
)
combine_source_lambda = lambda x: _combine_sources(
x, db_multiple_sources, groupby_cols
)
# power_db = db.loc[db["stage_code"]=='Power plant',:]
# This is a pretty expensive process when we have to start looking at each
# flow generated in each compartment for each balancing authority area.
# To hopefully speed this up, we'll group by FlowName and Comparment and look
# and try to eliminate flows where all sources are single entities.
source_df = pd.DataFrame()
source_df = pd.DataFrame(
db_powerplant.groupby(["FlowName", "Compartment"])[["Source"]].apply(
combine_source_by_flow
),
columns=["source_list"],
)
source_df[["source_list", "source_string"]] = pd.DataFrame(
source_df["source_list"].values.tolist(), index=source_df.index
)
source_df.reset_index(inplace=True)
old_index = db_powerplant.index
db_powerplant = db_powerplant.merge(
right=source_df,
left_on=["FlowName", "Compartment"],
right_on=["FlowName", "Compartment"],
how="left",
)
db_powerplant.index=old_index
db_multiple_sources = db_powerplant.loc[db_powerplant["source_string"].isna(), :]
if len(db_multiple_sources) > 0:
source_df = pd.DataFrame(
db_multiple_sources.groupby(groupby_cols)[["Source"]].apply(
combine_source_lambda
),
columns=["source_list"],
)
source_df[["source_list", "source_string"]] = pd.DataFrame(
source_df["source_list"].values.tolist(), index=source_df.index
)
source_df.reset_index(inplace=True)
db_multiple_sources.drop(
columns=["source_list", "source_string"], inplace=True
)
old_index = db_multiple_sources.index
db_multiple_sources = db_multiple_sources.merge(
right=source_df,
left_on=groupby_cols,
right_on=groupby_cols,
how="left",
)
db_multiple_sources.index = old_index
# db[["source_string","source_list"]].fillna(db_multiple_sources[["source_string","source_list"]],inplace=True)
db_powerplant.loc[
db_powerplant["source_string"].isna(), ["source_string", "source_list"]
] = db_multiple_sources[["source_string", "source_list"]]
unique_source_lists = list(db_powerplant["source_string"].unique())
# unique_source_lists = [x for x in unique_source_lists if ((str(x) != "nan")&(str(x)!="netl"))]
unique_source_lists = [
x for x in unique_source_lists if ((str(x) != "nan"))
]
# One set of emissions passed into this routine may be life cycle emissions
# used as proxies for Canadian generation. In those cases the electricity
# generation will be equal to the Electricity already in the dataframe.
elec_sum_lists = list()
unique_source_lists = unique_source_lists+[all_sources]
for src in unique_source_lists:
module_logger.info(f"Calculating electricity for {src}")
# src_filter = db.apply(lambda x: x["Source"] in src, axis=1)
db["temp_src"] = src
src_filter = [
a in b
for a, b in zip(
db["Source"].values.tolist(), db["temp_src"].values.tolist()
)
]
# total_filter = ~fuelcat_all & src_filter
sub_db = db.loc[src_filter, :]
sub_db.drop_duplicates(subset=fuel_agg + ["eGRID_ID"], inplace=True)
sub_db_group = sub_db.groupby(elec_groupby_cols, as_index=False).agg(
{"Electricity": [np.sum, np.mean], "eGRID_ID": "count"}
)
sub_db_group.columns = elec_groupby_cols + [
"electricity_sum",
"electricity_mean",
"facility_count",
]
# zero_elec_filter = sub_db_group["electricity_sum"]==0
sub_db_group["source_string"] = src
elec_sum_lists.append(sub_db_group)
db_nonpower["source_string"]=all_sources
db_nonpower["source_list"]=[all_sources]*len(db_nonpower)
elec_sums = pd.concat(elec_sum_lists, ignore_index=True)
elec_sums.sort_values(by=elec_groupby_cols, inplace=True)
db=pd.concat([db_powerplant,db_nonpower])
return db, elec_sums
def create_generation_process_df():
"""
Reads emissions and generation data from different sources to provide
facility-level emissions. Most important inputs to this process come
from the model configuration file.
Parameters
----------
None
Returns
----------
dataframe
Datafrane includes all facility-level emissions
"""
from electricitylci.eia923_generation import build_generation_data
from electricitylci.egrid_filter import (
egrid_facilities_to_include,
emissions_and_waste_for_selected_egrid_facilities,
)
from electricitylci.generation import egrid_facilities_w_fuel_region
from electricitylci.generation import (
add_technological_correlation_score,
add_temporal_correlation_score,
)
import electricitylci.emissions_other_sources as em_other
import electricitylci.ampd_plant_emissions as ampd
from electricitylci.model_config import eia_gen_year
from electricitylci.combinator import ba_codes
COMPARTMENT_DICT = {
"emission/air": "air",
"emission/water": "water",
"emission/ground": "ground",
"input": "input",
"output": "output",
"waste": "waste",
"air": "air",
"water": "water",
"ground": "ground",
}
if replace_egrid:
generation_data = build_generation_data().drop_duplicates()
cems_df = ampd.generate_plant_emissions(eia_gen_year)
cems_df.drop(columns=["FlowUUID"], inplace=True)
emissions_and_waste_for_selected_egrid_facilities = em_other.integrate_replace_emissions(
cems_df, emissions_and_waste_for_selected_egrid_facilities
)
else:
generation_data = build_generation_data(
egrid_facilities_to_include=egrid_facilities_to_include
)
emissions_and_waste_for_selected_egrid_facilities.drop(
columns=["FacilityID"]
)
emissions_and_waste_for_selected_egrid_facilities[
"eGRID_ID"
] = emissions_and_waste_for_selected_egrid_facilities["eGRID_ID"].astype(
int
)
final_database = pd.merge(
left=emissions_and_waste_for_selected_egrid_facilities,
right=generation_data,
right_on=["FacilityID", "Year"],
left_on=["eGRID_ID", "Year"],
how="left",
)
egrid_facilities_w_fuel_region[
"FacilityID"
] = egrid_facilities_w_fuel_region["FacilityID"].astype(int)
final_database = pd.merge(
left=final_database,
right=egrid_facilities_w_fuel_region,
left_on="eGRID_ID",
right_on="FacilityID",
how="left",
suffixes=["", "_right"],
)
key_df = (
final_database[["eGRID_ID", "FuelCategory"]]
.dropna()
.drop_duplicates(subset="eGRID_ID")
.set_index("eGRID_ID")
)
final_database.loc[
final_database["FuelCategory"].isnull(), "FuelCategory"
] = final_database.loc[
final_database["FuelCategory"].isnull(), "eGRID_ID"
].map(
key_df["FuelCategory"]
)
if replace_egrid:
final_database["FuelCategory"].fillna(
final_database["FuelCategory_right"], inplace=True
)
final_database["Final_fuel_agg"] = final_database["FuelCategory"]
if use_primaryfuel_for_coal:
final_database.loc[
final_database["FuelCategory"] == "COAL", ["Final_fuel_agg"]
] = final_database.loc[
final_database["FuelCategory"] == "COAL", "Primary_Fuel"
]
try:
year_filter = final_database["Year_x"] == final_database["Year_y"]
final_database = final_database.loc[year_filter, :]
final_database.drop(columns="Year_y", inplace=True)
except KeyError:
pass
final_database.rename(columns={"Year_x": "Year"}, inplace=True)
final_database = map_emissions_to_fedelemflows(final_database)
dup_cols_check = [
"FacilityID",
"FuelCategory",
"FlowName",
"FlowAmount",
"Compartment",
]
final_database = final_database.loc[
:, ~final_database.columns.duplicated()
]
final_database = final_database.drop_duplicates(subset=dup_cols_check)
final_database.drop(
columns=["FuelCategory", "FacilityID_x", "FacilityID_y"], inplace=True
)
final_database.rename(
columns={
"Final_fuel_agg": "FuelCategory",
"TargetFlowUUID": "FlowUUID",
},
inplace=True,
)
final_database = add_temporal_correlation_score(final_database)
final_database = add_technological_correlation_score(final_database)
final_database["DataCollection"] = 5
final_database["GeographicalCorrelation"] = 1
final_database["eGRID_ID"] = final_database["eGRID_ID"].astype(int)
final_database.sort_values(
by=["eGRID_ID", "Compartment", "FlowName"], inplace=True
)
final_database["stage_code"] = "Power plant"
final_database["Compartment_path"] = final_database["Compartment"]
final_database["Compartment"] = final_database["Compartment_path"].map(
COMPARTMENT_DICT
)
final_database["EIA_Region"] = final_database["Balancing Authority Code"].map(
ba_codes["EIA_Region"]
)
final_database["FERC_Region"] = final_database["Balancing Authority Code"].map(
ba_codes["FERC_Region"]
)
return final_database
def aggregate_data(total_db, subregion="BA"):
"""
Aggregates facility-level emissions to the specified subregion and
calculates emission factors based on the total emission and total
electricity generation.
Parameters
----------
total_db : dataframe
Facility-level emissions as generated by created by
create_generation_process_df
subregion : str, optional
The level of subregion that the data will be aggregated to. Choices
are 'all', 'NERC', 'BA', 'US', by default 'BA'.
"""
from electricitylci.aggregation_selector import subregion_col
def geometric_mean(p_series, df, cols):
# I think I actually need to replace this with the function contained in
# process_exchange_aggregator_uncertainty.py. The approach to add 1 will
# also lead to some large errors when dealing with small numbers.
# Alternatively we can use scipy.stats.lognorm to fit a distribution
# and provide the parameters
if (len(p_series) > 3) & (p_series.quantile(0.5) > 0):
# result = gmean(p_series.to_numpy()+1)-1
module_logger.debug(
f"Calculating confidence interval for"
f"{df.loc[p_series.index[0],groupby_cols].values}"
)
module_logger.debug(f"{p_series.values}")
with np.errstate(all='raise'):
try:
data = p_series.to_numpy()
except ArithmeticError or ValueError or FloatingPointError:
module_logger.debug("Problem with input data")
return None
try:
log_data = np.log(data)
except ArithmeticError or ValueError or FloatingPointError:
module_logger.debug("Problem with log function")
return None
try:
mean = np.mean(log_data)
except ArithmeticError or ValueError or FloatingPointError:
module_logger.debug("Problem with mean function")
return None
l = len(data)
try:
sd = np.std(log_data)
sd2 = sd ** 2
except ArithmeticError or ValueError or FloatingPointError:
module_logger.debug("Problem with std function")
return None
try:
pi1, pi2 = t.interval(alpha=0.90, df=l - 2, loc=mean, scale=sd)
except ArithmeticError or ValueError or FloatingPointError:
module_logger.debug("Problem with t function")
return None
try:
upper_interval = np.max(
[
mean
+ sd2 / 2
+ pi2 * np.sqrt(sd2 / l + sd2 ** 2 / (2 * (l - 1))),
mean
+ sd2 / 2
- pi2 * np.sqrt(sd2 / l + sd2 ** 2 / (2 * (l - 1))),
]
)
except:
module_logger.debug("Problem with interval function")
return None
try:
result = (np.exp(mean), 0, np.exp(upper_interval))
except ArithmeticError or ValueError or FloatingPointError:
print("Prolem with result")
return None
if result is not None:
return result
else:
module_logger.debug(
f"Problem generating uncertainty parameters \n"
f"{df.loc[p_series.index[0],groupby_cols].values}\n"
f"{p_series.values}"
f"{p_series.values+1}"
)
return None
else:
return None
def calc_geom_std(df):
if df["uncertaintyLognormParams"] is None:
return None, None
if isinstance(df["uncertaintyLognormParams"], str):
params = ast.literal_eval(df["uncertaintyLognormParams"])
try:
length = len(df["uncertaintyLognormParams"])
except TypeError:
module_logger.info(
f"Error calculating length of uncertaintyLognormParams"
f"{df['uncertaintyLognormParams']}"
)
return None, None
if length != 3:
module_logger.info(
f"Error estimating standard deviation - length: {len(params)}"
)
try:
geomean = df["Emission_factor"]
geostd = np.exp(
(
np.log(df["uncertaintyLognormParams"][2])
- np.log(df["Emission_factor"])
)
/ norm.ppf(0.95)
)
except ArithmeticError:
module_logger.info("Error estimating standard deviation")
return None, None
if (
(geostd is np.inf)
or (geostd is np.NINF)
or (geostd is np.nan)
or (geostd is float("nan"))
or str(geostd) == "nan"
):
return None, None
if geostd * geomean > df["uncertaintyMax"]:
return None, None
return str(geomean), str(geostd)
region_agg = subregion_col(subregion)
fuel_agg = ["FuelCategory"]
if region_agg:
groupby_cols = (
region_agg
+ fuel_agg
+ ["stage_code", "FlowName", "Compartment", "FlowUUID"]
)
elec_df_groupby_cols = (
region_agg + fuel_agg + ["Year", "source_string"]
)
else:
groupby_cols = fuel_agg + [
"stage_code",
"FlowName",
"Compartment",
"FlowUUID",
]
elec_df_groupby_cols = fuel_agg + ["Year", "source_string"]
total_db["FlowUUID"] = total_db["FlowUUID"].fillna(value="dummy-uuid")
total_db = aggregate_facility_flows(total_db)
total_db, electricity_df = calculate_electricity_by_source(
total_db, subregion
)
total_db = add_data_collection_score(total_db, electricity_df, subregion)
total_db["facility_emission_factor"] = (
total_db["FlowAmount"] / total_db["Electricity"]
)
total_db.dropna(subset=["facility_emission_factor"], inplace=True)
def wtd_mean(pdser, total_db, cols):
try:
wts = total_db.loc[pdser.index, "Electricity"]
result = np.average(pdser, weights=wts)
except:
module_logger.info(
f"Error calculating weighted mean for {pdser.name}-"
f"{total_db.loc[pdser.index[0],cols]}"
)
result = float("nan")
return result
wm = lambda x: wtd_mean(x, total_db, groupby_cols)
geo_mean = lambda x: geometric_mean(x, total_db, groupby_cols)
geo_mean.__name__ = "geo_mean"
print(
"Aggregating flow amounts, dqi information, and calculating uncertainty"
)
database_f3 = total_db.groupby(
groupby_cols + ["Year", "source_string"], as_index=False
).agg(
{
"FlowAmount": ["sum", "count"],
"TemporalCorrelation": wm,
"TechnologicalCorrelation": wm,
"GeographicalCorrelation": wm,
"DataCollection": wm,
"ReliabilityScore": wm,
"facility_emission_factor": ["min", "max", geo_mean],
}
)
database_f3.columns = groupby_cols + [
"Year",
"source_string",
"FlowAmount",
"FlowAmountCount",
"TemporalCorrelation",
"TechnologicalCorrelation",
"GeographicalCorrelation",
"DataCollection",
"ReliabilityScore",
"uncertaintyMin",
"uncertaintyMax",
"uncertaintyLognormParams",
]
criteria = database_f3["Compartment"] == "input"
database_f3.loc[criteria, "uncertaintyLognormParams"] = None
database_f3 = database_f3.merge(
right=electricity_df,
left_on=elec_df_groupby_cols,
right_on=elec_df_groupby_cols,
how="left",
)
canadian_criteria = database_f3["FuelCategory"] == "ALL"
if region_agg:
canada_db = pd.merge(
left=database_f3.loc[canadian_criteria, :],
right=total_db[groupby_cols + ["Electricity"]],
left_on=groupby_cols,
right_on=groupby_cols,
how="left",
).drop_duplicates(subset=groupby_cols)
else:
total_grouped = total_db.groupby(by=groupby_cols, as_index=False)[
"Electricity"
].sum()
canada_db = pd.merge(
left=database_f3.loc[canadian_criteria, :],
right=total_grouped,
left_on=groupby_cols,
right_on=groupby_cols,
how="left",
)
canada_db.index = database_f3.loc[canadian_criteria, :].index
database_f3.loc[
database_f3["FlowUUID"] == "dummy-uuid", "FlowUUID"
] = float("nan")
database_f3.loc[canada_db.index, "electricity_sum"] = canada_db[
"Electricity"
]
database_f3["Emission_factor"] = (
database_f3["FlowAmount"] / database_f3["electricity_sum"]
)
database_f3["GeomMean"], database_f3["GeomSD"] = zip(
*database_f3[
[
"Emission_factor",
"uncertaintyLognormParams",
"uncertaintyMin",
"uncertaintyMax",
]
].apply(calc_geom_std, axis=1)
)
database_f3.sort_values(by=groupby_cols, inplace=True)
return database_f3
def olcaschema_genprocess(database, upstream_dict={}, subregion="BA"):
"""Turns the give database containing generator facility emissions
into dictionaries that contain the required data for insertion into
an openLCA-compatible json-ld. Additionally, default providers
for fuel inputs are mapped, using the information contained in the dictionary
containing openLCA-formatted data for the fuels.
Parameters
----------
database : dataframe
Dataframe containing aggregated emissions to be turned into openLCA
unit processes
upstream_dict : dictionary, optional
Dictionary as created by upstream_dict.py, containing the openLCA
formatted data for all of the fuel inputs. This function will use the
names and UUIDs from the entries to assign them as default providers.
subregion : str, optional
The subregion level of the aggregated data, by default "BA". See
aggregation_selector.py for available subregions.
Returns
-------
dictionary: dictionary contaning openLCA-formatted data
"""
from electricitylci.process_dictionary_writer import (
unit,
flow_table_creation,
ref_exchange_creator,
uncertainty_table_creation,
)
from electricitylci.aggregation_selector import subregion_col
region_agg = subregion_col(subregion)
fuel_agg = ["FuelCategory"]
if region_agg:
base_cols = region_agg + fuel_agg
else:
base_cols = fuel_agg
non_agg_cols = [
"stage_code",
"FlowName",
"FlowUUID",
"Compartment",
"Year",
"source_string",
"TemporalCorrelation",
"TechnologicalCorrelation",
"GeographicalCorrelation",
"DataCollection",
"ReliabilityScore",
"uncertaintyMin",
"uncertaintyMax",
"uncertaintyLognormParams",
"Emission_factor",
"GeomMean",
"GeomSD",
]
def turn_data_to_dict(data, upstream_dict):
module_logger.debug(
f"Turning flows from {data.name} into dictionaries"
)
cols_for_exchange_dict = [
"internalId",
"@type",
"avoidedProduct",
"flow",
"flowProperty",
"input",
"quantitativeReference",
"baseUncertainty",
"provider",
"amount",
"amountFormula",
"unit",
"pedigreeUncertainty",
"dqEntry",
"uncertainty",
"comment",
]
year = ",".join(data["Year"].astype(str).unique())
datasources = ",".join(data["source_string"].astype(str).unique())
data["Maximum"] = data["uncertaintyMax"]
data["Minimum"] = data["uncertaintyMin"]
data["uncertainty"] = ""
data["internalId"] = ""
data["@type"] = "Exchange"
data["avoidedProduct"] = False
data["flowProperty"] = ""
data["input"] = False
input_filter = (data["Compartment"] == "input") | (
data["Compartment"].str.find("resource") != -1
)
data.loc[input_filter, "input"] = True
data["baseUncertainty"] = ""
data["provider"] = ""
data["unit"] = ""
data["ElementaryFlowPrimeContext"] = data["Compartment"]
default_unit = unit("kg")
data["unit"] = [default_unit] * len(data)
data["FlowType"] = "ELEMENTARY_FLOW"
data["flow"] = ""
provider_filter = data["stage_code"].isin(upstream_dict.keys())
for index, row in data.loc[provider_filter, :].iterrows():
provider_dict = {
"name": upstream_dict[getattr(row, "stage_code")]["name"],
"categoryPath": upstream_dict[getattr(row, "stage_code")][
"category"
],
"processType": "UNIT_PROCESS",
"@id": upstream_dict[getattr(row, "stage_code")]["uuid"],
}
data.at[index, "provider"] = provider_dict
data.at[index, "unit"] = unit(
upstream_dict[getattr(row, "stage_code")]["q_reference_unit"]
)
data.at[index, "FlowType"] = "PRODUCT_FLOW"
for index, row in data.iterrows():
data.at[index, "uncertainty"] = uncertainty_table_creation(
data.loc[index:index, :]
)
data.at[index, "flow"] = flow_table_creation(
data.loc[index:index, :]
)
data["amount"] = data["Emission_factor"]
data["amountFormula"] = ""
data["quantitativeReference"] = False
data["dqEntry"] = (
"("
+ str(round(data["ReliabilityScore"].iloc[0], 1))
+ ";"
+ str(round(data["TemporalCorrelation"].iloc[0], 1))
+ ";"
+ str(round(data["GeographicalCorrelation"].iloc[0], 1))
+ ";"
+ str(round(data["TechnologicalCorrelation"].iloc[0], 1))
+ ";"
+ str(round(data["DataCollection"].iloc[0], 1))
+ ")"
)
data["pedigreeUncertainty"] = ""
data["comment"] = f"{datasources} - {year}"
data_for_dict = data[cols_for_exchange_dict]
data_for_dict = data_for_dict.append(
ref_exchange_creator(), ignore_index=True
)
data_dict = data_for_dict.to_dict("records")
return data_dict
database_groupby = database.groupby(by=base_cols)
process_df = pd.DataFrame(
database_groupby[non_agg_cols].apply(
turn_data_to_dict, (upstream_dict)
)
)
process_df.columns = ["exchanges"]
process_df.reset_index(inplace=True)
process_df["@type"] = "Process"
process_df["allocationFactors"] = ""
process_df["defaultAllocationMethod"] = ""
process_df["location"] = ""
process_df["parameters"] = ""
process_df["processDocumentation"] = ""
process_df["processType"] = "UNIT_PROCESS"
process_df["category"] = (
"22: Utilities/2211: Electric Power Generation, Transmission and Distribution/"
+ process_df[fuel_agg].values
)
if region_agg is None:
process_df["description"] = (
"Electricity from "
+ process_df[fuel_agg].values
+ " produced at generating facilities in the US"
)
process_df["name"] = (
"Electricity - " + process_df[fuel_agg].values + " - US"
)
else:
process_df["description"] = (
"Electricity from "
+ process_df[fuel_agg].values
+ " produced at generating facilities in the "
+ process_df[region_agg].values
+ " region"
)
process_df["name"] = (
"Electricity - "
+ process_df[fuel_agg].values
+ " - "
+ process_df[region_agg].values
)
process_cols = [
"@type",
"allocationFactors",
"defaultAllocationMethod",
"exchanges",
"location",
"parameters",
"processDocumentation",
"processType",
"name",
"category",
"description",
]
result = process_df[process_cols].to_dict("index")
return result
if __name__ == "__main__":
plant_emission_df = create_generation_process_df()
aggregated_emissions_df = aggregate_data(plant_emission_df, subregion="BA")
datetimestr = datetime.now().strftime("%Y%m%d_%H%M%S")
aggregated_emissions_df.to_csv(
f"{output_dir}/aggregated_emissions_{datetimestr}.csv"
)
plant_emission_df.to_csv(f"{output_dir}/plant_emissions_{datetimestr}.csv")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# encoding: utf-8
"""
DatabaseManager.py
This class handles saving the list of tweets and pruning it according to age.
"""
from ManagerBase import *
import sqlite3
import os
from typing import List
from GlobalSettings import *
from RSSItemTuple import *
import string
class DatabaseManager(ManagerBase):
"""
This class abstracts our file management.
pyTwittertoRSS keeps a list of tweet items that is converted into the RSS feed. This class
handles reading/writing the list as well as pruning the files based on age.
"""
# *********************************************************************************************
def __init__(self):
"""
Constructor to initialize DatabaseManager
"""
super().__init__()
# Set this here so we can use it later
self.printableChars = string.printable
# If the DB is not there, create it
if not os.path.exists(itemFilename):
self.logger.info("Creating the database file {}".format(itemFilename))
self.__CreateDatabaseFile()
# *********************************************************************************************
def __CreateDatabaseFile(self) -> bool:
"""
Create the inital empty sqlite3 database to store past tweets
:return: True if successful, False otherwise
"""
try:
sqlStr = 'CREATE TABLE rssitems (tweet_id integer PRIMARY KEY, tweet_url text, ' \
'user_name text, screen_name text, user_image_url text, tweet_text text, ' \
'found_urls text, created_at integer)'
# Create our connection object
tConnection = sqlite3.connect(itemFilename)
tCursor = tConnection.cursor()
# Create the items table
tCursor.execute(sqlStr)
# Commit changes and close
tConnection.commit()
tConnection.close()
self.logger.info("Successfully created database file {}".format(itemFilename))
except Exception as tExcept:
self.logger.critical("*** DatabaseManager(__CreateDatabase): Could not create the database file!")
self.logger.error(tExcept)
return False
return True
# *********************************************************************************************
def __EscapeSQLString(self, inString: str) -> str:
"""
Change special characters in the string so we can push them into SQLITE3
:param inString: String to fix
:return: escaped string
"""
if inString is None:
return ""
# Create a temp string by first removing everything not printable
tempString = ''.join(filter(lambda x: x in self.printableChars, inString))
return tempString.replace("'", "''")
# *********************************************************************************************
def GetHighestRead(self) -> int:
"""
Get the highest tweet ID out of the database
:return: Integer of the highest twitter ID
"""
try:
# Create our connections
tConnection = sqlite3.connect(itemFilename)
tCursor = tConnection.cursor()
tCursor.execute("SELECT MAX(tweet_id) from rssitems")
maxValue = tCursor.fetchone()
tConnection.close()
except Exception as tExcept:
self.logger.critical("*** DatabaseManager(GetHighestRead): Unable to find the highest ID read!")
self.logger.error(tExcept)
return -1
if maxValue[0] is None:
return -1
else:
return maxValue[0]
# *********************************************************************************************
def PurgeOldEntries(self) -> bool:
"""
Deletes entries older than purgeDays from the database
:return: True if successful, False otherwise
"""
try:
# Create our connections
tConnection = sqlite3.connect(itemFilename)
tCursor = tConnection.cursor()
# Create the query string and execute it
queryString = "DELETE FROM rssitems WHERE datetime(created_at, 'unixepoch') <= " \
"datetime('now', '-{} hours', 'UTC')".format(purgeHours)
tCursor.execute(queryString)
# Commit changes and close
tConnection.commit()
tConnection.close()
except Exception as tExcept:
self.logger.warning("*** DatabaseManager(PurgeOldEntries): An error occurred while purging old data items")
self.logger.error(tExcept)
return False
return True
# *********************************************************************************************
def ReadItems(self) -> List[RSSItemTuple]:
"""
Reads old items from the database after purging those past our minimum age
:return: True if successful, False otherwise
"""
itemList = list()
try:
# First purge our old entries
if not self.PurgeOldEntries():
return list()
# Create our connections
tConnection = sqlite3.connect(itemFilename)
tCursor = tConnection.cursor()
# Get the rows
tCursor.execute("SELECT * FROM rssitems ORDER BY created_at ASC")
rows = tCursor.fetchall()
# Loop through and enter into our list
for row in rows:
item = RSSItemTuple()
item.tweet_id = row[0]
item.tweet_url = row[1]
item.user_name = row[2]
item.screen_name = row[3]
item.user_url = row[4]
item.tweet_text = row[5]
item.found_urls = row[6]
item.created_at = datetime.datetime.fromtimestamp(row[7])
itemList.append(item)
# Close the connection
tConnection.close()
except Exception as tExcept:
self.logger.critical("*** DatabaseManager(ReadItems): Unable to read in the items!")
self.logger.error(tExcept)
return list()
return itemList
# *********************************************************************************************
def WriteNewItems(self, inItems: List[RSSItemTuple]) -> bool:
"""
Writes new items into the database
:return: True if successful, False otherwise
"""
try:
# Create our connections
tConnection = sqlite3.connect(itemFilename)
tCursor = tConnection.cursor()
for item in inItems:
# First fix our strings
user_name = self.__EscapeSQLString(item.user_name)
tweet_url = self.__EscapeSQLString(item.tweet_url)
screen_name = self.__EscapeSQLString(item.screen_name)
user_url = self.__EscapeSQLString(item.user_url)
tweet_text = self.__EscapeSQLString(item.tweet_text)
found_urls = self.__EscapeSQLString(item.found_urls)
queryString = \
"INSERT INTO rssitems (tweet_id, tweet_url, user_name, screen_name, user_image_url, tweet_text, " \
"found_urls, created_at) VALUES ({tweetid}, '{tweeturl}', '{username}', '{screenname}', " \
"'{userurl}', '{tweettext}', '{foundurls}', {createdat})".format(tweetid=item.tweet_id,
tweeturl=tweet_url,
username=user_name,
screenname=screen_name,
userurl=user_url,
tweettext=tweet_text,
foundurls=found_urls,
createdat=int(
item.created_at.timestamp()))
tCursor.execute(queryString)
tConnection.commit()
tConnection.close()
except Exception as tExcept:
self.logger.critical("*** DatabaseManager(WriteNewItems): Unable to write new items!")
self.logger.error(tExcept)
return False
return True
# *************************************************************************************************
if __name__ == "__main__":
foo = DatabaseManager()
bar = foo.GetHighestRead()
|
nilq/baby-python
|
python
|
import kiui
kiui.try_import('os', 'os', True)
print(os)
kiui.env(verbose=True)
print(globals())
kiui.env('torch', verbose=True)
print(globals())
kiui.env('notapack', verbose=True)
|
nilq/baby-python
|
python
|
import argparse
import os
import sys
import torch
from IPython import get_ipython
from utils.data import ManualSeedReproducible
from utils.dep_free import in_notebook
from utils.filesystems.gdrive.colab import ColabFilesystem, ColabFolder, ColabCapsule
from utils.filesystems.gdrive.remote import GDriveCapsule, GDriveFilesystem, GDriveFolder
from utils.filesystems.local import LocalFilesystem, LocalFolder, LocalCapsule
# Flag to run first test batches locally
from utils.plot import ensure_matplotlib_fonts_exist
##########################################
### Parse CLI Arguments ###
##########################################
parser = argparse.ArgumentParser(description='Trains GAN model in PyTorch.')
parser.add_argument('--device', type=str, default='cpu', choices=['cpu', 'cuda'],
help='execution device (\'cpu\', or \'cuda\')')
parser.add_argument('--log_level', type=str, default='debug', choices=['debug', 'info', 'warning', 'error', 'critical'],
help='default log level (\'debug\', \'info\', \'warning\', \'error\' or \'critical\')')
parser.add_argument('--chkpt_step', type=str, default='latest',
help='model checkpoint to be loaded (\'latest\' or str or int)')
parser.add_argument('--seed', type=int, default=42,
help='random generators seed value (default: 42)')
parser.add_argument('-use_refresh_token', action='store_true',
help='if set will use client_secrets.json to connect to Google Drive, else will ask for auth code')
parser.add_argument('--run_locally', action='store_true',
help='flag must be present to start local running (aka first pass run)')
# New GDrive root (e.g. "/Education/AUTH/COURSES/10th Semester - Thesis/ThesisGStorage")
parser.add_argument('--gdrive_which', type=str, default='auth',
help='Choose which Google Drive will be used as a storage devices (one of "personal", "auth")')
args = parser.parse_args()
##########################################
### Environment Initialization ###
##########################################
run_locally = True
if in_notebook() and not args.run_locally:
run_locally = False # local runs are performed vis IDE runs (and thus terminal)
os.environ['TRAIN_ENV'] = 'local' if run_locally else 'nonlocal'
# ID of Google Drive folder to be considered as project root
# - auth: the entire drive will be used for thesis storage (so no root change would be done)
# - personal: thesis storage is inside a single directory of my personal Google Drive --> this id must be provided
cloud_root = None if args.gdrive_which == 'auth' else '12IiDRSnj6r7Jd66Yxz3ZZTn9EFW-Qnqu'
# Check if running inside Colab or Kaggle
if 'google.colab' in sys.modules or 'google.colab' in str(get_ipython()) or 'COLAB_GPU' in os.environ:
exec_env = 'colab'
local_gdrive_root = '/content/drive/MyDrive'
run_locally = False
elif 'KAGGLE_KERNEL_RUN_TYPE' in os.environ:
exec_env = 'kaggle'
local_gdrive_root = '/kaggle/working/GoogleDrive'
run_locally = False
else:
exec_env = 'ssh'
local_gdrive_root = '/home/achariso/PycharmProjects/gans-thesis/.gdrive'
if not os.path.exists(local_gdrive_root):
run_locally = False
local_gdrive_root = '/workspace/GoogleDrive' # vast.ai
if not os.path.exists(local_gdrive_root):
local_gdrive_root = input('local_gdrive_root = ')
assert os.path.exists(local_gdrive_root), f'local_gdrive_root={local_gdrive_root} NOT FOUND'
os.environ['TRAIN_EXEC_ENV'] = exec_env
# Check if GPU is available
exec_device = torch.device('cuda:0' if 'cuda' == args.device and torch.cuda.is_available() else 'cpu')
os.environ['TRAIN_EXEC_DEV'] = str(exec_device)
# Get log level
log_level = args.log_level
os.environ['TRAIN_LOG_LEVEL'] = log_level
# Reproducibility
seed = ManualSeedReproducible.manual_seed(args.seed)
##########################################
### GDrive Filesystem Initialization ###
##########################################
# - define FilesystemFolder to interact with files/folders under the root folder on Google Drive
if exec_env == 'colab':
# Colab filesystem is a locally-mounted filesystem. Interacts with native OS calls.
fs = ColabFilesystem(ccapsule=ColabCapsule())
groot = ColabFolder.root(capsule_or_fs=fs)
elif run_locally:
# Local filesystem (basically one directory under given root). Interacts with native OS calls.
fs = LocalFilesystem(ccapsule=LocalCapsule(local_root=local_gdrive_root))
groot = LocalFolder.root(capsule_or_fs=fs)
else:
# Remote filesystem. Interacts via GoogleDrive API calls.
use_refresh_token = args.use_refresh_token
try:
use_refresh_token = use_refresh_token or False
except NameError:
use_refresh_token = run_locally
gcapsule = GDriveCapsule(local_gdrive_root=local_gdrive_root, use_http_cache=True, update_credentials=True,
use_refresh_token=use_refresh_token)
fs = GDriveFilesystem(gcapsule=gcapsule)
groot = GDriveFolder.root(capsule_or_fs=fs, update_cache=True, cloud_root=cloud_root)
# - define immediate sub-folders of root folder
# print(json.dumps(groot.subfolders, indent=4))
datasets_groot = groot.subfolder_by_name('Datasets')
models_groot = groot.subfolder_by_name('Models')
fonts_groot = groot.subfolder_by_name('Fonts')
# - ensure that system and matplotlib fonts directories exist and have the correct font files
rebuilt_fonts = ensure_matplotlib_fonts_exist(fonts_groot, force_rebuild=False)
if rebuilt_fonts and exec_env != 'ssh':
groot.fs.logger.critical('Fonts rebuilt! Terminating python process now.')
os.kill(os.getpid(), 9)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from cats.models import Cat,Breed
# Register your models here.
admin.site.register(Cat)
admin.site.register(Breed)
|
nilq/baby-python
|
python
|
if __name__ == '__main__':
n = int(input())
vars = input().split()
integer_list = map(int, vars)
print(hash(tuple(integer_list)))
|
nilq/baby-python
|
python
|
#
# Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution.
#
# Example:
#
# Given array nums = [-1, 2, 1, -4], and target = 1.
#
# The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
class Solution(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
length = len(nums)
cloest, mindiff = 0, 2147483647
for i in range(length):
left, right = i + 1, length - 1
while left < right:
res = nums[left] + nums[right] + nums[i]
diff = abs(target - res)
if mindiff > diff:
cloest = res
mindiff = diff
if res < target:
left += 1
elif res > target:
right -= 1
else:
return res
return cloest
s = Solution()
s.threeSumClosest([-1, 2, 1, -4], 1)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from apprise import plugins
from helpers import AppriseURLTester
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
# Our Testing URLs
apprise_url_tests = (
('bark://', {
# No no host
'instance': None,
}),
('bark://:@/', {
# just invalid all around
'instance': None,
}),
('bark://localhost', {
# No Device Key specified
'instance': plugins.NotifyBark,
# Expected notify() response False (because we won't be able
# to actually notify anything if no device_key was specified
'notify_response': False,
}),
('bark://192.168.0.6:8081/device_key', {
# Everything is okay
'instance': plugins.NotifyBark,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'bark://192.168.0.6:8081/',
}),
('bark://user@192.168.0.6:8081/device_key', {
# Everything is okay (test with user)
'instance': plugins.NotifyBark,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'bark://user@192.168.0.6:8081/',
}),
('bark://192.168.0.6:8081/device_key/?sound=invalid', {
# bad sound, but we go ahead anyway
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?sound=alarm', {
# alarm.caf sound loaded
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?sound=NOiR.cAf', {
# noir.caf sound loaded
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?badge=100', {
# set badge
'instance': plugins.NotifyBark,
}),
('barks://192.168.0.6:8081/device_key/?badge=invalid', {
# set invalid badge
'instance': plugins.NotifyBark,
}),
('barks://192.168.0.6:8081/device_key/?badge=-12', {
# set invalid badge
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?category=apprise', {
# set category
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?image=no', {
# do not display image
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?group=apprise', {
# set group
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?level=invalid', {
# bad level, but we go ahead anyway
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/?to=device_key', {
# test use of to= argument
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?click=http://localhost', {
# Our click link
'instance': plugins.NotifyBark,
}),
('bark://192.168.0.6:8081/device_key/?level=active', {
# active level
'instance': plugins.NotifyBark,
}),
('bark://user:pass@192.168.0.5:8086/device_key/device_key2/', {
# Everything is okay
'instance': plugins.NotifyBark,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'bark://user:****@192.168.0.5:8086/',
}),
('barks://192.168.0.7/device_key/', {
'instance': plugins.NotifyBark,
# throw a bizzare code forcing us to fail to look it up
'response': False,
'requests_response_code': 999,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'barks://192.168.0.7/device_key',
}),
('bark://192.168.0.7/device_key', {
'instance': plugins.NotifyBark,
# Throws a series of connection and transfer exceptions when this flag
# is set and tests that we gracfully handle them
'test_requests_exceptions': True,
}),
)
def test_plugin_bark_urls():
"""
NotifyBark() Apprise URLs
"""
# Run our general tests
AppriseURLTester(tests=apprise_url_tests).run_all()
|
nilq/baby-python
|
python
|
def post_order(node):
if node.left:
post_order(node.left)
if node.right:
post_order(node.right)
print(node.data)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""Integration test for traveling to the mast"""
import os
import sys
parent_dir = os.path.dirname(os.path.abspath(__file__))
gparent_dir = os.path.dirname(parent_dir)
ggparent_dir = os.path.dirname(gparent_dir)
gggparent_dir = os.path.dirname(ggparent_dir)
sys.path += [parent_dir, gparent_dir, ggparent_dir, gggparent_dir]
import logging
from flight_manager import FlightManager
from flight.state_settings import StateSettings
if __name__ == "__main__":
try:
state_settings: StateSettings = StateSettings()
state_settings.enable_early_laps(True)
state_settings.set_number_of_early_laps(1)
state_settings.enable_to_mast(True)
state_settings.enable_module_detection(False)
state_settings.set_run_title("Mast Travel Test")
state_settings.set_run_description("Test traveling to mast GPS after completing early laps")
flight_manager: FlightManager = FlightManager(state_settings)
flight_manager.main()
except:
logging.exception("Unfixable error detected")
|
nilq/baby-python
|
python
|
"""
Question:
Distinct ways to climb a n step staircase where
each time you can either climb 1 or 2 steps.
"""
"""
Solution 1:
We can easily find recursive nature in above problem.
The person can reach n’th stair from either (n-1)’th stair or from (n-2)’th stair.
Let the total number of ways to reach n’t stair be ‘ways(n)’.
The value of ‘ways(n)’ can be written as following.
ways(n)=ways(n-1)+ways(n-2)
The above expression is actually the expression for Fibonacci numbers, but there is one thing to notice, the value of ways(n) is equal to fibonacci(n+1).
ways(1) = fib(2) = 1
ways(2) = fib(3) = 2
ways(3) = fib(4) = 3
"""
def fibo(n:int) -> int:
return n if n<=1 else fibo(n-1)+fibo(n-2)
def ways(n:int) -> int:
fmt = "n needs to be positive integer, your input {}"
assert isinstance(n, int) and n > 0, fmt.format(n)
return fibo(n+1)
# print(ways(4))
"""
Solution 2:
This uses bottom to top approach , in tabular method ,
We use table to store the previous values in list.
"""
def climb_stairs(n: int) -> int:
"""
Args:
n: number of steps of staircase
Returns:
Distinct ways to climb a n step staircase
Raises:
AssertionError: n not positive integer
"""
fmt = "n needs to be positive integer, your input {}"
assert isinstance(n, int) and n > 0, fmt.format(n)
if n == 1:
return 1
dp = [0] * (n + 1)
dp[0], dp[1] = (1, 1)
for i in range(2, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n]
# climb_stairs(3)
# 3
# climb_stairs(1)
# 1
# climb_stairs(-7)
# Traceback (most recent call last):
# ...
# AssertionError: n needs to be positive integer, your input -7
|
nilq/baby-python
|
python
|
#!/bin/env python
import os
import logging
import pandas as pd
class DatasetMerger:
def __init__(self, workDir=None):
self.logger = logging.getLogger("DatasetMerger")
self.cwd = os.path.abspath(os.getcwd()) if not workDir else os.path.abspath(workDir)
#
self.dataframes = {
'JHU': os.path.join(self.cwd,'data_JHU.csv'),
'RKI': os.path.join(self.cwd,'data_RKI.csv')
}
self.dataframe = pd.DataFrame()
def formatData(self):
if not os.path.exists(self.dataframes['JHU']):
raise Exception("dataframe jhu does not exist")
if not os.path.exists(self.dataframes['RKI']):
raise Exception("dataframe rki does not exist")
jhu = pd.read_csv(self.dataframes['JHU'])
rki = pd.read_csv(self.dataframes['RKI'])
rki = rki.transpose()
rki = rki.rename(columns={0: "RKI_Cases", 1: "RKI_Deaths"})
rki = rki.drop(rki.index[0])
jhu = jhu.drop([1]) # drop 'Recovered' since no data for RKI
jhu = jhu.transpose()
jhu = jhu.rename(columns={0: "JHU_Cases", 2: "JHU_Deaths"})
jhu = jhu.drop(jhu.index[0:43]) # hacked...
# check for df row len (days entered), due to different update cycle of datasources
while (len(rki.index) != len(jhu.index)):
self.logger.info("DatasetMerger: Different data progression - drop newest entry that is advanced")
if len(rki.index) > len(jhu.index):
rki = rki.drop(rki.index[len(rki.index) - 1])
else:
jhu = jhu.drop(jhu.index[len(jhu.index) - 1])
jhu.index = rki.index
# better option(?): to cast indices and intersect
# x = pd.to_datetime(jhu.columns[2:])
# y = pd.to_datetime(df.columns[1:])
# xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
rki[["JHU_Cases", "JHU_Deaths"]] = jhu[["JHU_Cases", "JHU_Deaths"]] # add JHU columns
# calculate Delta rows
delta_1 = rki["RKI_Cases"] - rki["JHU_Cases"]
rki["Delta_Cases"] = delta_1
delta_2 = rki["RKI_Deaths"] - rki["JHU_Deaths"]
rki["Delta_Deaths"] = delta_2
self.dataframe = rki
def saveData(self, filePath):
self.logger.info("Save data: %s", filePath)
self.dataframe.to_csv(filePath, encoding='utf-8', index=True) # index true to keep dates
if __name__ == "__main__":
logging.basicConfig(format='[%(asctime)s:%(name)s:%(levelname)s]-> %(message)s', level=logging.DEBUG)
m = DatasetMerger()
m.formatData()
m.saveData(os.path.join(os.getcwd(), 'data_final.csv'))
|
nilq/baby-python
|
python
|
"""
WSGI config for my_hubu project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from os.path import join,dirname,abspath
PROJECT_DIR=dirname(dirname(abspath(__file__)))
import sys
sys.path.insert(0,PROJECT_DIR)
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_hubu.settings')
application = get_wsgi_application()
|
nilq/baby-python
|
python
|
from .util import get_groups
def students_processor(request):
absolute_url = "{}://{}:{}".format(request.scheme, request.META['SERVER_NAME'], request.META['SERVER_PORT'])
return {'ABSOLUTE_URL': absolute_url}
def groups_processors(request):
return {'GROUPS': get_groups(request)}
|
nilq/baby-python
|
python
|
from .PercentChangeTransformer import PercentChangeTransformer
from .ColumnDropperTransformer import ColumnDropperTransformer
from .DFFeatureUnion import DFFeatureUnion
from .SMATransformer import SMATransformer
from .EMATransformer import EMATransformer
from .MACDTransformer import MACDTransformer
from .GreaterThanTransformer import GreaterThanTransformer
from .ToBoolTransformer import ToBoolTransformer
from .DateTransformer import MonthTransformer,HourTransformer,DayTransformer
from .STD2xTransformer import STD2xTransformer
from .RSITransformer import RSITransformer
from .BollingerBandTransformer import BollingerBandTransform
from .BoolChangeTransformer import BoolChangeTransformer
from .ProductionPipe import CreatePipeline
|
nilq/baby-python
|
python
|
# project/server/models.py
import jwt
import datetime
from flask import current_app
from service.database import db, bcrypt
from uuid import uuid4
class Organisation(db.Model):
"""Organisation data"""
__tablename__ = "organisation"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String)
credit = db.Column(db.Integer()) # remaining simulation credit
tally = db.Column(db.Integer()) # tally of simulations run
users = db.relationship("User", back_populates="organisation", lazy="joined")
def __init__(self, name="", credit=0, tally=0):
self.name = name
self.credit = credit
self.tally = tally
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, current_app.config.get('SECRET_KEY'))
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
class User(db.Model):
"""User data"""
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
uuid = db.Column(db.String)
tally = db.Column(db.Integer())
credit = db.Column(db.Integer())
organisation_id = db.Column(db.Integer, db.ForeignKey('organisation.id'))
organisation = db.relationship("Organisation", back_populates="users")
def __init__(self, tally=0, credit=0, organisation=None):
self.uuid = str(uuid4())
self.tally = tally
self.credit = credit
self.organisation = organisation
|
nilq/baby-python
|
python
|
"""
Microsoft Archive parser
Author: Victor Stinner
Creation date: 2007-03-04
"""
MAX_NB_FILE = 100000
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, String, UInt32, SubFile
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
class FileIndex(FieldSet):
static_size = 68*8
def createFields(self):
yield String(self, "filename", 56, truncate="\0", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize"))
yield textHandler(UInt32(self, "crc32"), hexadecimal)
yield UInt32(self, "offset")
def createDescription(self):
return "File %s (%s) at %s" % (
self["filename"].value, self["filesize"].display, self["offset"].value)
class MarFile(Parser):
MAGIC = "MARC"
PARSER_TAGS = {
"id": "mar",
"category": "archive",
"file_ext": ("mar",),
"min_size": 80*8, # At least one file index
"magic": ((MAGIC, 0),),
"description": "Microsoft Archive",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["version"].value != 3:
return "Invalid version"
if not(1 <= self["nb_file"].value <= MAX_NB_FILE):
return "Invalid number of file"
return True
def createFields(self):
yield String(self, "magic", 4, "File signature (MARC)", charset="ASCII")
yield UInt32(self, "version")
yield UInt32(self, "nb_file")
files = []
for index in xrange(self["nb_file"].value):
item = FileIndex(self, "file[]")
yield item
if item["filesize"].value:
files.append(item)
files.sort(key=lambda item: item["offset"].value)
for index in files:
padding = self.seekByte(index["offset"].value)
if padding:
yield padding
size = index["filesize"].value
desc = "File %s" % index["filename"].value
yield SubFile(self, "data[]", size, desc, filename=index["filename"].value)
|
nilq/baby-python
|
python
|
import sys
import click
from moulinette import hwserializer, itemserializer, testserializer
from moulinette.homework.models import *
from moulinette.stats_and_logs.models import RequestLog
def startup():
value = click.prompt(
'Please select an action:\n'
'1. Create a homework assignment.\n'
'2. Edit a homework assignment.\n'
'3. Edit a homework item.\n'
'4. Deactivate a homework assignment.\n'
'5. Reactivate a homework assignment.\n'
'6. Delete a homework assignment.\n'
'7. List active homework assignments.\n'
'8. List ALL homework assignments.\n'
'9. Fix all tests with null timeout.\n'
'0. Exit.\n>> ', default=0, type=int, show_default=False)
click.echo('\n')
if value == 1:
create_hw()
elif value == 2:
edit_hw()
elif value == 3:
edit_item()
elif value == 4:
deactivate_hw()
elif value == 5:
activate_hw()
elif value == 6:
delete_hw()
elif value == 7:
list_active()
elif value == 8:
list_all()
elif value == 9:
fix_tests_timeout()
else:
exit()
def fix_tests_timeout():
tests = Test.query.all()
for test in tests:
if not test.timeout:
test.timeout = 10
db.session.add(test)
db.session.commit()
def activate_hw():
id = click.prompt('ID of the homework to activate', type=str)
realid = hwserializer.loads(id)
hw = Homework.query.get(realid)
if hw:
hw.activate()
db.session.commit()
click.echo('Activated homework: ' + hwserializer.dumps(hw.id))
else:
click.echo('No such homework: ' + id)
def create_hw():
name = click.prompt('Name of the assignment', type=str)
click.echo('Description: (Ctrl-D to finish):')
description = sys.stdin.read()
hw = Homework(name, description)
db.session.add(hw)
db.session.commit()
click.echo('Homework created with id: ' + hwserializer.dumps(hw.id))
additem = click.confirm('Do you wish to add an item to this homework?')
while additem:
add_item_to_homework(hw)
additem = click.confirm('Do you wish to add another item?')
def edit_hw():
id = click.prompt('ID of homework to edit: ', type=str)
hw = Homework.query.get(hwserializer.loads(id))
click.echo("Homework name: " + hw.name)
click.echo("Homework description: " + hw.description)
if click.confirm('Change name?', default=True):
name = click.prompt('New name: ', type=str)
hw.name = name
if click.confirm('Change description?', default=True):
click.echo('New description: (Ctrl-D to finish):')
description = sys.stdin.read()
hw.description = description
db.session.add(hw)
db.session.commit()
def edit_item():
active = Item.query.all()
click.echo('Items: (id - name)')
for item in active:
click.echo(itemserializer.dumps(item.id) + ' - ' + item.name)
click.echo('\n')
id = click.prompt('ID of item to edit: ', type=str)
item = Item.query.get(itemserializer.loads(id))
click.echo("Item name: " + item.name)
click.echo("Item description: " + item.description)
if click.confirm('Change name?', default=True):
name = click.prompt('New name: ', type=str)
item.name = name
if click.confirm('Change description?', default=True):
click.echo('New description: (Ctrl-D to finish):')
description = sys.stdin.read()
item.description = description
db.session.add(item)
db.session.commit()
def add_item_to_homework(hw):
name = click.prompt('Name of the homework item', type=str)
click.echo('Description: (Ctrl-D to finish):')
description = sys.stdin.read()
item = hw.add_item(name, description)
click.echo('Created item with id: ' + itemserializer.dumps(item.id))
addtest = click.confirm('Do you wish to add a test to this item?')
while addtest:
add_test_to_item(item)
addtest = click.confirm('Do you wish to add another test?')
def getTestInOut():
click.echo('Enter test input (Ctrl-D to finish):')
stdin = sys.stdin.read()
click.echo('Enter test output (Ctrl-D to finish):')
stdout = sys.stdin.read()
return stdin, stdout
def add_test_to_item(item):
stdin, stdout = '', ''
description = click.prompt('Description')
timeout = click.prompt('Timeout (in seconds)', type=int, default=10)
if click.confirm("Get input and output from files?", default=False):
while True:
infname = click.prompt('Path to input file')
outfname = click.prompt('Path to output file')
with open(infname, 'r') as infile, open(outfname, 'r') as outfile:
stdin = infile.read()
stdout = outfile.read()
click.echo('\nTest input:\n')
click.echo(stdin)
click.echo('\nTest output:\n')
click.echo(stdout)
if click.confirm('\nIs this correct?', default=True):
break
else:
while True:
stdin, stdout = getTestInOut()
click.echo('\nTest input:\n')
click.echo(stdin)
click.echo('\nTest output:\n')
click.echo(stdout)
if click.confirm('\nIs this correct?', default=True):
break
t = item.add_test(description, stdin, stdout, timeout)
click.echo('Created test with id: ' + testserializer.dumps(t.id))
def deactivate_hw():
id = click.prompt('ID of the homework to deactivate', type=str)
realid = hwserializer.loads(id)
hw = Homework.query.get(realid)
if hw:
hw.deactivate()
db.session.commit()
click.echo('Deactivated homework: ' + hwserializer.dumps(hw.id))
else:
click.echo('No such homework: ' + id)
def delete_hw():
id = click.prompt('ID of the homework to delete', type=str)
realid = hwserializer.loads(id)
hw = Homework.query.get(realid)
if hw:
if not click.confirm('Please confirm!', default=False):
return
for item in hw.items:
for test in item.tests:
subs = RequestLog.query.filter(RequestLog.test_id ==
test.id).all()
for sub in subs:
db.session.delete(sub)
db.session.delete(test)
db.session.delete(item)
db.session.delete(hw)
db.session.commit()
click.echo('Deleted homework: ' + hwserializer.dumps(hw.id))
else:
click.echo('No such homework: ' + id)
def list_active():
active = Homework.query.filter(Homework.active).all()
click.echo('Active assigments: (id - name)')
for hw in active:
click.echo(hwserializer.dumps(hw.id) + ' - ' + hw.name)
click.echo('\n')
def list_all():
active = Homework.query.all()
click.echo('Assigments: (id - name)')
for hw in active:
click.echo(hwserializer.dumps(hw.id) + ' - ' + hw.name)
click.echo('\n')
if __name__ == '__main__':
while True:
startup()
|
nilq/baby-python
|
python
|
# Higher order functions are functions that take other functions as parameter
# This function prints its parameter two times
def print2times(x):
print(x)
print(x)
def print3times(x):
print(x)
print(x)
print(x)
# This function calls the function it takes as parameter on each digit
def for_digits(f):
for i in range(0, 10):
f(i)
# The function can be passed as parameter like other variables
for_digits(print3times)
|
nilq/baby-python
|
python
|
import codecs
import jaconv
import etldr.jis0201
from etldr.etl_data_names import ETLDataNames
from etldr.etl_data_set_info import ETLDataSetInfo
class ETLCodes():
"""
A convenience class for using all codecs which are used in the ETL data set.
Warning:
The 'euc_co59.dat'-file from the ETL data set is required in the data set directory.
"""
def __init__(self, euc_co59_file_path : str) -> None:
super().__init__()
self.init_co59(euc_co59_file_path)
self.init_codes()
def init_co59(self, euc_co59_file_path : str):
"""
Initialize reading of "co59"-codes
"""
with codecs.open(euc_co59_file_path, 'r', 'euc-jp') as f:
co59t = f.read()
co59l = co59t.split()
self.conv = {}
for c in co59l:
ch = c.split(':')
co = ch[1].split(',')
co59c = (int(co[0]), int(co[1]))
self.conv[co59c] = ch[0]
def init_codes(self):
"""
Setup a dict which contains ETLDataSetInfo-instances with the necessary info about the data set types.
"""
# TYPE_M -> ETL 1, 6, 7 - works
self.code_M = ETLDataSetInfo("uint:16,bytes:2,uint:16,hex:8,hex:8,4*uint:8,uint:32,4*uint:16,4*uint:8,pad:32,bytes:2016,pad:32",
2052, (64, 63), 4, [3], self.decode_M_type_character)
# TYPE_K -> ETL 2
self.code_K = ETLDataSetInfo("uint:36, uint:6, pad:30, bits:36, bits:36, pad:24, bits:12, pad:180, bytes:2700",
2745, (60, 60), 6, [-2], self.decode_K_type_character)
# TYPE_C -> ETL 3, 4, 5
self.code_C = ETLDataSetInfo("uint:36,uint:36,hex:8,pad:28,hex:8,pad:28,bits:24,pad:12,15*uint:36,pad:1008,bytes:2736",
2952, (72, 76), 4, [2, 4], self.decode_C_type_character)
# TYPE_8B -> ETL 8B
self.code_8B = ETLDataSetInfo(">H 2s 4s 504s".replace(" ", ""),
512, (64, 63), 1, [1], self.decode_8B_type_character)
# TYPE_8G -> ETL 8G
self.code_8G = ETLDataSetInfo(">H 2s 8s I 4B 4H 2B 30x 8128s 11x".replace(" ", ""),
8199, (128, 127), 4, [1], self.decode_8G_type_character)
# TYPE_9B -> ETL 9B
self.code_9B = ETLDataSetInfo(">H 2s 4s 504s 64x".replace(" ", ""),
576, (64, 63), 1, [1], self.decode_9B_type_character)
# TYPE_9G -> ETL 9G
self.code_9G = ETLDataSetInfo(">H 2s 8s I 4B 4H 2B 34x 8128s 7x".replace(" ", ""),
8199, (128, 127), 4, [1], self.decode_9G_type_character)
def T56(self, c : int) -> str:
"""Decodes c into a string using the T56-code.
Args:
c : An integer which should be decoded using the T56-code.
Returns:
The decoded str.
"""
t56s = '0123456789[#@:>? ABCDEFGHI&.](< JKLMNOPQR-$*);\'|/STUVWXYZ ,%="!'
return t56s[c]
def co59_to_utf8(self, co59) -> str:
"""Decodes co59 to utf-8.
Args:
co59 : The string which should be decoded from co59 to utf-8.
Returns:
The decoded utf-8 string
"""
return self.conv[co59]
def decode_M_type_character(self, _bytes : bytes) -> str:
"""Decodes _bytes which encode the label from an entry which has the ETL-M type.
Args:
_bytes : The bytes object which should be decoded.
Returns:
The decoded label.
"""
jis = _bytes
# try to convert the bytes with jis 0201 encoding
try:
t = etldr.jis0201.JIS0201_map[jis.upper()]
t = chr(t)
# fallback to iso2022
except Exception:
return None
return t
def decode_K_type_character(self, _bytes : bytes) -> str:
"""Decodes _bytes which encode the label from an entry which has the ETL-K type.
Args:
_bytes : The bytes object which should be decoded.
Returns:
The decoded label.
"""
tup = tuple([b.uint for b in _bytes.cut(6)])
return self.co59_to_utf8(tup)
def decode_C_type_character(self, _bytes : bytes, char_code) -> str:
"""Decodes _bytes which encode the label from an entry which has the ETL-C type.
Args:
_bytes : The bytes object which should be decoded.
char_code : The T56 code of the entry.
Returns:
[description]
"""
char_code = ''.join([ self.T56(b.uint) for b in char_code.cut(6) ])
char = bytes.fromhex(_bytes).decode('shift_jis')
if char_code[0] == 'H':
char = jaconv.kata2hira(jaconv.han2zen(char)).replace('ぃ', 'ゐ').replace('ぇ', 'ゑ')
elif char_code[0] == 'K':
char = jaconv.han2zen(char).replace('ィ', 'ヰ').replace('ェ', 'ヱ')
return char
def decode_8B_type_character(self, _bytes : bytes) -> str:
"""Decodes _bytes which encode the label from an entry which has the ETL-8B type.
Args:
_bytes : The bytes object which should be decoded.
Returns:
The decoded label.
"""
#print(_bytes, bytes.fromhex(_bytes), bytes.fromhex('1b2442' + _bytes + '1b2842'))
return bytes.fromhex('1b2442' + _bytes.hex() + '1b2842').decode('iso2022_jp')
def decode_8G_type_character(self, _bytes : bytes) -> str:
"""Decodes _bytes which encode the label from an entry which has the ETL-8G type.
Args:
_bytes : The bytes object which should be decoded.
Returns:
The decoded label.
"""
return bytes.fromhex('1b2442' + _bytes.hex() + '1b2842').decode('iso2022_jp')
def decode_9B_type_character(self, _bytes : bytes) -> str:
"""Decodes _bytes which encode the label from an entry which has the ETL-9B type.
Args:
_bytes : The bytes object which should be decoded.
Returns:
The decoded label.
"""
return bytes.fromhex('1b2442' + _bytes.hex() + '1b2842').decode('iso2022_jp')
def decode_9G_type_character(self, _bytes : bytes) -> str:
"""Decodes _bytes which encode the label from an entry which has the ETL-9G type.
Args:
_bytes : The bytes object which should be decoded.
Returns:
The decoded label.
"""
return bytes.fromhex('1b2442' + _bytes.hex() + '1b2842').decode('iso2022_jp')
|
nilq/baby-python
|
python
|
"""make_one_annotation.py
Usage:
make_one_annotation.py <game_id> <anno_id> <dir-prefix> <pnr-prefix> <time-frame-radius> <raw_file>
Arguments:
<dir-prefix> the prefix prepended the directory that will be created to hold the videos
<pnr-prefix> the prefix for annotation filenames (e.g. 'raw')
<time-frame-radius> tfr, let annotated event be T_a, we extract frames [T_a-tfr, T_a+tfr]
<game_id> game file
<anno_id> annotation
<raw_file> location of annotation file
Example:
python make_one_annotation.py 0021500383 3 viz raw 50 rev0.pkl
"""
from pnr.annotation import annotation
from pnr import data
from pnr.vis.Event import Event, EventException
from copy import copy
import os
from docopt import docopt
import pandas as pd
def wrapper_render_one_anno(dir_prefix, gameid, anno_id):
print('Running Scripts::Make_One_Annotation:wrapper_render_one_anno')
### Load game
print ('Loading')
game_basename = gameid+'.pkl'
game_pkl = os.path.join(game_dir, game_basename)
with open(game_pkl,'rb') as f:
raw_data = pd.read_pickle(f)
game_str = "{visitor}@{home}, on {date}".format(
visitor=raw_data['events'][0]['visitor']['abbreviation'],
home=raw_data['events'][0]['home']['abbreviation'],
date=raw_data['gamedate']
)
print (game_str)
### Create a new directory for videos
vid_dir =os.path.join(game_dir, 'video') # base dir that holds all the videos
if not os.path.exists(vid_dir):
os.makedirs(vid_dir)
new_dir = os.path.join(vid_dir, '{prefix}-{game_id}'.format(
prefix=dir_prefix,
game_id=game_basename.split('.')[0]
))
previous_rendered_events = []
if not os.path.exists(new_dir):
os.makedirs(new_dir)
else: # already a directory exists, likely we've tried to do the same thing
print(new_dir)
print('Already exists, not rerunning events rendered and saved previously')
render_one_anno(
raw_data,
new_dir,
anno_id
)
def render_one_anno(raw_data, directory, anno_id):
"""
Input:
raw_data: the huge dictionary of a single game
"""
print('Running Scripts::Make_One_Annotation:render_one_anno')
N = len(raw_data['events'])
anno_id = int(anno_id)
pnr_annotations = annotation.read_annotation_from_raw(os.path.join(pnr_dir, 'roles/%s' % (arguments['<raw_file>'])), raw_data['gameid'])
annos = pnr_annotations[anno_id]
for ind, anno in enumerate(annos):
e = Event(raw_data['events'][anno_id], anno=anno)
## render
try:
e.sequence_around_t(anno, int(arguments['<time-frame-radius>']), pnr=True)
before = copy(e)
after = copy(e)
before.moments = before.moments[:int(arguments['<time-frame-radius>'])]
after.moments = after.moments[int(arguments['<time-frame-radius>']):]
before.show_static(os.path.join(directory, '%i-pnr-%i-before.pdf' %(anno_id, ind)), anno=anno)
after.show_static(os.path.join(directory, '%i-pnr-%i-after.pdf' % (anno_id, ind)), anno=anno)
except EventException as e:
print ('malformed sequence, skipping')
continue
if __name__ == '__main__':
print('Running Scripts::Make_One_Annotation:main')
game_dir = data.constant.game_dir
pnr_dir = os.path.join(game_dir, 'pnr-annotations')
arguments = docopt(__doc__, version='something 1.1.1')
print ("...Docopt... ")
print(arguments)
print ("............\n")
game_id = arguments['<game_id>']
anno_id = arguments['<anno_id>']
dir_prefix = arguments['<dir-prefix>']
wrapper_render_one_anno(dir_prefix, game_id, anno_id)
|
nilq/baby-python
|
python
|
"""
Copyright 2021 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing
import kasa
import rfc3339
from util import KasaDevice
from util.timezones import localize_time
async def handle_status(device: KasaDevice, *args, **kwargs) -> typing.Dict:
k = device.get_kasa()
if not isinstance(k, kasa.SmartPlug):
raise RuntimeError("Device is not a switch")
await device.get_kasa().update()
time = await k.get_time()
if time is not None:
tz = await k.get_timezone()
time = localize_time(time, tz['index'])
resp = {
"mac": k.mac,
"ip": k.host,
"sw_ver": k.hw_info["sw_ver"],
"hw_ver": k.hw_info["hw_ver"],
"hw_id": k.hw_info["hwId"],
"oem_id": k.hw_info["oemId"],
"model": k.model,
"rssi": k.rssi,
"location": k.location,
"led_enabled": k.led,
"time": rfc3339.format(time, utc=True)
}
if "fwId" in k.hw_info:
resp["fw_id"] = k.hw_info["fwId"]
return resp
|
nilq/baby-python
|
python
|
import Layers
import Wavelets
|
nilq/baby-python
|
python
|
from django.contrib import admin
from comments.models import Comment
class CommentAdmin(admin.ModelAdmin):
list_display = ('author', 'text', 'private', 'created_on', 'modified_on',)
search_fields = ('author', 'text',)
# class ToDoAdmin(admin.ModelAdmin):
# list_display = ('author', 'text', 'private', 'done', 'done_by', 'done_on', 'created_on', 'modified_on',)
# search_fields = ('author', 'text',)
admin.site.register(Comment, CommentAdmin)
# admin.site.register(ToDo, ToDoAdmin)
|
nilq/baby-python
|
python
|
import os
import pytest
import merlin.io
from merlin.datasets.advertising import get_criteo
from merlin.datasets.synthetic import generate_data
MAYBE_DATA_DIR = os.environ.get("INPUT_DATA_DIR", None)
def test_synthetic_criteo_data():
dataset = generate_data("criteo", 100)
assert isinstance(dataset, merlin.io.Dataset)
assert dataset.num_rows == 100
assert len(dataset.schema) == 40
@pytest.mark.skipif(
MAYBE_DATA_DIR is None,
reason="No data-dir available, pass it through env variable $INPUT_DATA_DIR",
)
def test_get_criteo(tmp_path):
data_path = os.path.join(MAYBE_DATA_DIR, "criteo")
train, valid = get_criteo(data_path, num_days=2)
assert isinstance(train, merlin.io.Dataset)
assert isinstance(valid, merlin.io.Dataset)
|
nilq/baby-python
|
python
|
__author__ = 'Sergei'
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_contact_first_last(self, Contact):
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(Contact.first_n)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(Contact.last_n)
def create_first_last(self, Contact):
wd = self.app.wd
self.open_contact_page()
wd.find_element_by_link_text("add new").click()
self.fill_contact_first_last(Contact)
if wd.find_element_by_name("submit").click():
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_full(self, Contact):
wd = self.app.wd
self.change_field_value("firstname",Contact.first_n)
self.change_field_value("middlename",Contact.mid_n)
self.change_field_value("lastname",Contact.last_n)
self.change_field_value("nickname",Contact.nick_n)
self.change_field_value("company",Contact.company)
self.change_field_value("address",Contact.address)
self.change_field_value("home",Contact.home_ph)
self.change_field_value("mobile",Contact.cell_ph)
self.change_field_value("email",Contact.email)
def create_c(self,contacts):
wd = self.app.wd
self.open_contact_page()
wd.find_element_by_link_text("add new").click()
self.fill_contact_full(contacts)
if wd.find_element_by_name("submit").click():
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def select_contact_by_index(self,index):
wd = self.app.wd
# self.open_contact_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_css_selector("img[alt=\"Edit\"]").click()
def open_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("http://localhost/addressbook/")):
wd.get("http://localhost/addressbook/")
def contact_delete_by_index(self,index):
wd = self.app.wd
self.open_contact_page()
self.select_contact_by_index(index)
wd.find_element_by_name("update[value=\"Delete\"]").click()
# wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# wd.switch_to_alert().accept()
self.contact_cache = None
def contact_modify_by_index(self,index,cont):
wd = self.app.wd
# self.open_contact_page()
self.select_contact_by_index(index)
# wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.fill_first_last_name(cont)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def fill_first_last_name(self, Contact):
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(Contact.first_n)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(Contact.last_n)
def modify_first_contact(self, cont, index):
wd = self.app.wd
self.open_contact_page()
self.select_contact_by_index(index)
# wd.find_element_by_css_selector("img[alt=\"Edit\"]")[index].click()
self.fill_first_last_name(cont)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def contact_delete(self):
self.contact_delete_by_index(0)
self.contact_cache = None
def contact_first_modify(self):
self.contact_modify_by_index(0)
self.contact_cache = None
def test_edit_contact(self, Contact):
wd = self.app.wd
self.open_contact_page()
wd.find_element_by_name("selected[]").click()
wd.find_element_by_css_selector("img[alt=\"Edit\"]").click()
self.fill_contact_full(Contact)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def count(self):
wd = self.app.wd
return len(wd.find_elements_by_name("selected[]"))
def count_first(self):
wd = self.app.wd
self.open_contact_page()
# wd.find_elements_by_name('entry')
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contact_page()
self.contact_cache = []
for element in wd.find_elements_by_name('entry'):
id = element.find_element_by_name("selected[]").get_attribute("value")
text = element.text
self.contact_cache.append(Contact( id=id, first_n=text))
return list(self.contact_cache)
|
nilq/baby-python
|
python
|
"""
twtxt.models
~~~~~~~~~~~~
This module implements the main models used in twtxt.
:copyright: (c) 2016 by buckket.
:license: MIT, see LICENSE for more details.
"""
from datetime import datetime, timezone
import humanize
from dateutil.tz import tzlocal
class Tweet:
"""A :class:`Tweet` represents a single tweet.
:param str text: text of the tweet in raw format
:param ~datetime.datetime created_at: (optional) when the tweet was created, defaults to :meth:`~datetime.datetime.now` when no value is given
:param Source source: (optional) the :class:`Source` the tweet is from
"""
def __init__(self, text, created_at=None, source=None):
if text:
self.text = text
else:
raise ValueError("empty text")
if created_at is None:
created_at = datetime.now(tzlocal())
try:
self.created_at = created_at.replace(microsecond=0)
except AttributeError:
raise TypeError("created_at is of invalid type")
self.source = source
@staticmethod
def _is_valid_operand(other):
return (hasattr(other, "text") and
hasattr(other, "created_at"))
def __lt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at < other.created_at
def __le__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at < other.created_at or (self.created_at == other.created_at and self.text == other.text)
def __gt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at > other.created_at
def __ge__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at > other.created_at or (self.created_at == other.created_at and self.text == other.text)
def __eq__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return self.created_at == other.created_at and self.text == other.text
def __str__(self):
return "{created_at}\t{text}".format(created_at=self.created_at.isoformat(), text=self.text)
@property
def relative_datetime(self):
"""Return human-readable relative time string."""
now = datetime.now(timezone.utc)
tense = "from now" if self.created_at > now else "ago"
return "{0} {1}".format(humanize.naturaldelta(now - self.created_at), tense)
@property
def absolute_datetime(self):
"""Return human-readable absolute time string."""
return self.created_at.strftime("%a, %d %b %Y %H:%M:%S")
class Source:
"""A :class:`Source` represents a twtxt feed, remote as well as local.
:param str nick: nickname of twtxt user
:param str url: URL to remote twtxt file
:param str file: path to local twtxt file
"""
def __init__(self, nick, url=None, file=None):
self.nick = nick.lower()
self.url = url
self.file = file
|
nilq/baby-python
|
python
|
# %% [markdown]
## Acessando todos os parâmetros (genérico)
# %%
def todos_params(*posicionais, **nomeados):
print(f'Posicionais: {posicionais}')
print(f'Nomeados: {nomeados}\n')
todos_params(1,2,3) #3 Parâmetros posicionais e nenhum parâmetro nomeado
todos_params(1,2,3, nome='Victor', solteiro=True) #3 parâmetros posicionais e 2 parâmetros nomeados
todos_params(nome='Victor', idade=26, solteiro=True) #3 parâmetros posicionais e nenhum parâmetro posicional
todos_params([1,2,3], 'a', 'b', 'c', nome='Victor', solteiro=True) #4 parâmetros posicionais e 2 parâmetros nomeados
# todos_params(nome='Victor', solteiro=True, 1, 2, 3) #Erro: neste caso, a função está aguardando primeiramente os parâmetros posicionais e depois os nomeados
|
nilq/baby-python
|
python
|
import numpy as np
from ivory.callbacks.results import concatenate
def test_libraries(runs):
for run in runs.values():
run.start("both")
for mode in ["val", "test"]:
outputs = []
for run in runs.values():
outputs.append(run.results[mode].output)
for output in outputs[1:]:
assert np.allclose(outputs[0], output)
def callback(index, output, target):
return index, 2 * output, target
gen = (run.results for run in runs.values())
results = concatenate(gen, reduction="mean", callback=callback)
assert np.allclose(2 * outputs[0], results.test.output)
|
nilq/baby-python
|
python
|
from sqlalchemy.orm import Session
from apps.crud.pusher import get_pushers_by_token, get_pushers_by_token_and_type
from apps.serializer.record import RecordSerializer
from apps.pusher import test_wechat, official_wechat, e_mail, android, wechat, qq
type_func_dict = {
1: test_wechat.send_msg,
2: official_wechat.send_msg,
3: e_mail.send_msg,
4: android.send_msg,
5: wechat.send_msg,
6: qq.send_msg,
}
def send_msg(session: Session, record: RecordSerializer):
if record.push_type is not None:
pusher = get_pushers_by_token_and_type(session=session, token=record.token, push_type=record.push_type)
type_func_dict[pusher.push_type](title=record.title, content=record.content, to_user=pusher.params1)
else:
pushers = get_pushers_by_token(session=session, token=record.token)
for p in pushers:
type_func_dict[p.push_type](title=record.title, content=record.content, to_user=p.params1)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import logging
import pandas as pd
import numpy as np
import scipy.stats as stats
from matplotlib.backends.backend_pdf import PdfPages
import os.path
from .storemanager import StoreManager
from .condition import Condition
from .constants import WILD_TYPE_VARIANT
from .sfmap import sfmap_plot
from .dataframe import singleton_dataframe
from .random_effects import rml_estimator
class Experiment(StoreManager):
"""
Class for a coordinating multiple :py:class:`~.selection.Selection`
objects. Creating an
:py:class:`~experiment.Experiment` requires a valid *config* object,
usually from a ``.json`` configuration file.
"""
store_suffix = "exp"
treeview_class_name = "Experiment"
def __init__(self):
StoreManager.__init__(self)
self.conditions = list()
self._wt = None
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
@property
def wt(self):
if self.has_wt_sequence():
if self._wt is None:
self._wt = self.selection_list()[0].wt.duplicate(self.name)
return self._wt
else:
if self._wt is not None:
raise ValueError(
"Experiment should not contain wild type "
"sequence [{}]".format(self.name)
)
else:
return None
def configure(self, cfg, configure_children=True):
"""
Set up the :py:class:`~experiment.Experiment` using the *cfg* object,
usually from a ``.json`` configuration file.
"""
StoreManager.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
if configure_children:
if "conditions" not in cfg:
raise KeyError(
"Missing required config value {} [{}]"
"".format("conditions", self.name)
)
for cnd_cfg in cfg["conditions"]:
cnd = Condition()
cnd.configure(cnd_cfg)
self.add_child(cnd)
selection_names = [x.name for x in self.selection_list()]
if len(set(selection_names)) != len(selection_names):
raise ValueError("Non-unique selection names [{}]" "".format(self.name))
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
"""
cfg = StoreManager.serialize(self)
cfg["conditions"] = [child.serialize() for child in self.children]
return cfg
def _children(self):
"""
Method bound to the ``children`` property. Returns a list of all
:py:class:`~condition.Condition` objects belonging to this object,
sorted by name.
"""
return sorted(self.conditions, key=lambda x: x.name)
def add_child(self, child):
"""
Add a selection.
"""
if child.name in self.child_names():
raise ValueError(
"Non-unique condition name '{}' [{}]" "".format(child.name, self.name)
)
child.parent = self
self.conditions.append(child)
def remove_child_id(self, tree_id):
"""
Remove the reference to a :py:class:`~condition.Condition` with
Treeview id *tree_id*.
"""
self.conditions = [x for x in self.conditions if x.treeview_id != tree_id]
def selection_list(self):
"""
Return the :py:class:`~selection.Selection` objects as a list.
"""
selections = list()
for cnd in self.children:
selections.extend(cnd.children)
return selections
def validate(self):
"""
Calls validate on all child Conditions. Also checks the wild type
sequence status.
"""
# check the wild type sequences
if self.has_wt_sequence():
for child in self.selection_list()[1:]:
if self.selection_list()[0].wt != child.wt:
self.logger.warning("Inconsistent wild type sequences")
break
for child in self.children:
child.validate()
def is_coding(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` count protein-coding variants, else
``False``.
"""
return all(x.is_coding() for x in self.selection_list())
def has_wt_sequence(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` have a wild type sequence, else
``False``.
"""
return all(x.has_wt_sequence() for x in self.selection_list())
def calculate(self):
"""
Calculate scores for all :py:class:`~selection.Selection` objects.
"""
if len(self.labels) == 0:
raise ValueError(
"No data present across all conditions [{}]" "".format(self.name)
)
for s in self.selection_list():
s.calculate()
self.combine_barcode_maps()
for label in self.labels:
self.calc_counts(label)
if self.scoring_method != "counts":
self.calc_shared_full(label)
self.calc_shared(label)
self.calc_scores(label)
if label != "barcodes":
self.calc_pvalues_wt(label)
def combine_barcode_maps(self):
"""
Combine all barcode maps for :py:class:`~selection.Selection` objects
into a single data frame and store it in ``'/main/barcodemap'``.
If multiple variants or IDs map to the same barcode, only the first one
will be present in the barcode map table.
The ``'/main/barcodemap'`` table is not created if no
:py:class:`~selection.Selection` has barcode map information.
"""
if self.check_store("/main/barcodemap"):
return
bcm = None
for sel in self.selection_list():
if "/main/barcodemap" in sel.store.keys():
if bcm is None:
bcm = sel.store["/main/barcodemap"]
else:
bcm = bcm.join(
sel.store["/main/barcodemap"], rsuffix=".drop", how="outer"
)
new = bcm.loc[pd.isnull(bcm)["value"]]
bcm.loc[new.index, "value"] = new["value.drop"]
bcm.drop("value.drop", axis="columns", inplace=True)
if bcm is not None:
bcm.sort_values("value", inplace=True)
self.store.put(
"/main/barcodemap", bcm, format="table", data_columns=bcm.columns
)
def calc_counts(self, label):
"""
Create a data frame of all counts in this Experiment. This data frame
is not used for any calculations, but is provided to facilitate
exploration of the data set.
"""
if self.check_store("/main/{}/counts".format(label)):
return
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
self.logger.info("Creating column multi-index for counts ({})".format(label))
conditions_index = list()
selections_index = list()
values_index = list()
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(sel.timepoints))
selections_index.extend([sel.name] * len(sel.timepoints))
values_index.extend(["c_{}".format(x) for x in sorted(sel.timepoints)])
columns = pd.MultiIndex.from_tuples(
zip(conditions_index, selections_index, values_index),
names=["condition", "selection", "timepoint"],
)
# create union index
self.logger.info("Creating row index for counts ({})".format(label))
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
"/main/{}/counts_unfiltered" "".format(label), "columns='index'"
).index
first = False
else:
combined = combined.join(
s.store.select(
"/main/{}/counts_unfiltered".format(label), "columns='index'"
).index,
how="outer",
)
# create and fill the data frames
self.logger.info(
"Populating Experiment data frame with counts ({})".format(label)
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select(
"/main/{}/counts_unfiltered" "".format(label)
)
for tp in sel.timepoints:
data.loc[:][cnd.name, sel.name, "c_{}".format(tp)] = sel_data[
"c_{}".format(tp)
]
self.store.put("/main/{}/counts".format(label), data, format="table")
def calc_shared_full(self, label):
"""
Use joins to create a data frame containing all scores across all
Selections in the Experiment.
"""
if self.check_store("/main/{}/scores_shared_full".format(label)):
return
# create columns multi-index
# has to be lex-sorted for multi-slicing to work
self.logger.info("Creating column multi-index for scores ({})")
conditions_index = list()
selections_index = list()
values_index = list()
if self.scoring_method == "simple":
values_list = ["score"]
else:
values_list = ["score", "SE"]
for cnd in self.children:
for sel in cnd.children:
conditions_index.extend([cnd.name] * len(values_list))
selections_index.extend([sel.name] * len(values_list))
values_index.extend(sorted(values_list))
columns = pd.MultiIndex.from_tuples(
zip(conditions_index, selections_index, values_index),
names=["condition", "selection", "value"],
)
# create union index
self.logger.info("Creating row index for scores ({})".format(label))
combined = None
first = True
for s in self.selection_list():
if first:
combined = s.store.select(
"/main/{}/scores".format(label), "columns='index'"
).index
first = False
else:
combined = combined.join(
s.store.select(
"/main/{}/scores".format(label), "columns='index'"
).index,
how="outer",
)
# create and fill the data frames
self.logger.info(
"Populating Experiment data frame with scores ({})".format(label)
)
data = pd.DataFrame(index=combined, columns=columns)
for cnd in self.children:
for sel in cnd.children:
sel_data = sel.store.select("/main/{}/scores".format(label))
for v in values_list:
data.loc[:, (cnd.name, sel.name, v)] = sel_data[v]
self.store.put(
"/main/{}/scores_shared_full".format(label), data, format="table"
)
def calc_shared(self, label):
"""
Get the subset of scores that are shared across all Selections in each
Condition.
"""
if self.check_store("/main/{}/scores_shared".format(label)):
return
idx = pd.IndexSlice
self.logger.info(
"Identifying subset shared across all Selections ({})".format(label)
)
data = self.store.select("/main/{}/scores_shared_full".format(label))
# identify variants found in all selections in at least one condition
complete = np.full(len(data.index), False, dtype=bool)
for cnd in data.columns.levels[0]:
complete = np.logical_or(
complete, data.loc[:, idx[cnd, :, :]].notnull().all(axis="columns")
)
data = data.loc[complete]
self.store.put("/main/{}/scores_shared".format(label), data, format="table")
def calc_scores(self, label):
"""
Combine the scores and standard errors within each condition.
"""
if self.check_store("/main/{}/scores".format(label)):
return
self.logger.info("Calculating per-condition scores ({})".format(label))
# set up new data frame
shared_index = self.store.select(
"/main/{}/scores_shared" "".format(label), columns="index"
).index
columns = pd.MultiIndex.from_product(
[sorted(self.child_names()), sorted(["score", "SE", "epsilon"])],
names=["condition", "value"],
)
data = pd.DataFrame(np.nan, index=shared_index, columns=columns)
del shared_index
del columns
# set up local variables
idx = pd.IndexSlice
score_df = self.store.select("/main/{}/scores_shared".format(label))
if self.scoring_method == "simple":
# special case for simple ratios that have no SE
# calculates the average score
for cnd in score_df.columns.levels[0]:
data.loc[:, idx[cnd, "score"]] = score_df.loc[
:, idx[cnd, :, "score"]
].mean(axis=1)
else:
for cnd in score_df.columns.levels[0]:
y = np.array(score_df.loc[:, idx[cnd, :, "score"]].values).T
sigma2i = np.array(score_df.loc[:, idx[cnd, :, "SE"]].values ** 2).T
# single replicate of the condition
if y.shape[0] == 1:
data.loc[:, idx[cnd, "score"]] = y.ravel()
data.loc[:, idx[cnd, "SE"]] = np.sqrt(sigma2i).ravel()
data.loc[:, idx[cnd, "epsilon"]] = 0.0
# multiple replicates
else:
betaML, var_betaML, eps = rml_estimator(y, sigma2i)
data.loc[:, idx[cnd, "score"]] = betaML
data.loc[:, idx[cnd, "SE"]] = np.sqrt(var_betaML)
data.loc[:, idx[cnd, "epsilon"]] = eps
# special case for normalized wild type variant
if self.logr_method == "wt" and WILD_TYPE_VARIANT in data.index:
data.loc[WILD_TYPE_VARIANT, idx[:, "SE"]] = 0.0
data.loc[WILD_TYPE_VARIANT, idx[:, "score"]] = 0.0
data.loc[WILD_TYPE_VARIANT, idx[:, "epsilon"]] = 0.0
# store the data
self.store.put("/main/{}/scores".format(label), data, format="table")
def calc_pvalues_wt(self, label):
"""
Calculate uncorrected pvalue for each variant compared to wild type.
"""
if self.check_store("/main/{}/scores_pvalues_wt".format(label)):
return
idx = pd.IndexSlice
wt = self.store.select(
"/main/{}/scores".format(label), "index=WILD_TYPE_VARIANT"
)
if len(wt) == 0: # no wild type score
self.logger.info(
"Failed to find wild type score, skipping wild type p-value calculations"
)
return
data = self.store.select(
"/main/{}/scores".format(label), "index!=WILD_TYPE_VARIANT"
)
columns = pd.MultiIndex.from_product(
[sorted(self.child_names()), sorted(["z", "pvalue_raw"])],
names=["condition", "value"],
)
result_df = pd.DataFrame(index=data.index, columns=columns)
condition_labels = data.columns.levels[0]
for cnd in condition_labels:
result_df.loc[:, idx[cnd, "z"]] = np.absolute(
wt.loc[WILD_TYPE_VARIANT, idx[cnd, "score"]]
- data.loc[:, idx[cnd, "score"]]
) / np.sqrt(
wt.loc[WILD_TYPE_VARIANT, idx[cnd, "SE"]] ** 2
+ data.loc[:, idx[cnd, "SE"]] ** 2
)
result_df.loc[:, idx[cnd, "pvalue_raw"]] = 2 * stats.norm.sf(
result_df.loc[:, idx[cnd, "z"]]
)
self.store.put(
"/main/{}/scores_pvalues_wt".format(label), result_df, format="table"
)
def calc_pvalues_pairwise(self, label):
"""
Calculate pvalues for each variant in each pair of Conditions.
"""
if self.check_store("/main/{}/scores_pvalues".format(label)):
return
data = self.store["/main/{}/scores".format(label)]
cnd1_index = list()
cnd2_index = list()
values_index = list()
values_list = ["z", "pvalue_raw"]
condition_labels = data.columns.levels[0]
for i, cnd1 in enumerate(condition_labels):
for cnd2 in condition_labels[i + 1 :]:
cnd1_index.extend([cnd1] * len(values_list))
cnd2_index.extend([cnd2] * len(values_list))
values_index.extend(sorted(values_list))
columns = pd.MultiIndex.from_tuples(
zip(cnd1_index, cnd2_index, values_index),
names=["condition1", "condition2", "value"],
)
idx = pd.IndexSlice
result_df = pd.DataFrame(np.nan, index=data.index, columns=columns)
for i, cnd1 in enumerate(condition_labels):
for cnd2 in condition_labels[i + 1 :]:
result_df.loc[:, idx[cnd1, cnd2, "z"]] = np.absolute(
data.loc[:, idx[cnd1, "score"]] - data.loc[:, idx[cnd2, "score"]]
) / np.sqrt(
data.loc[:, idx[cnd1, "SE"]] ** 2
+ data.loc[:, idx[cnd2, "SE"]] ** 2
)
result_df.loc[:, idx[cnd1, cnd2, "pvalue_raw"]] = 2 * stats.norm.sf(
result_df.loc[:, idx[cnd1, cnd2, "z"]]
)
self.store.put(
"/main/{}/scores_pvalues".format(label), result_df, format="table"
)
def make_plots(self):
if self.plots_requested:
self.logger.info("Creating plots")
# sequence-function maps
if self.scoring_method != "counts":
if "synonymous" in self.labels:
pdf = PdfPages(
os.path.join(self.plot_dir, "sequence_function_map_aa.pdf")
)
for condition in self.children:
self.sfmap_wrapper(
condition=condition.name, pdf=pdf, coding=True
)
pdf.close()
if "variants" in self.labels:
pdf = PdfPages(
os.path.join(self.plot_dir, "sequence_function_map_nt.pdf")
)
for condition in self.children:
self.sfmap_wrapper(
condition=condition.name, pdf=pdf, coding=False
)
pdf.close()
for s in self.selection_list():
s.make_plots()
def write_tsv(self):
"""
Write each table from the store to its own tab-separated file.
Files are written to a ``tsv`` directory in the default output
location. File names are the HDF5 key with ``'_'`` substituted for
``'/'``.
"""
if self.tsv_requested:
self.logger.info("Generating tab-separated output files")
for k in self.store.keys():
self.write_table_tsv(k)
for s in self.selection_list():
s.write_tsv()
def sfmap_wrapper(self, condition, pdf, coding):
"""
Create a sequence function map for scores in *condition*.
Uses :py:func:`~sfmap.sfmap_plot` for the plotting.
"""
plot_options = self.get_root().plot_options
if coding:
label = "amino acid"
else:
label = "nucleotide"
self.logger.info(
"Creating sequence-function map ({}, {})".format(condition, label)
)
idx = pd.IndexSlice
if coding:
df_name = "/main/synonymous/scores"
else:
df_name = "/main/variants/scores"
if plot_options is not None:
data, wtseq = singleton_dataframe(
self.store[df_name][idx[condition, "score"]],
self.wt,
coding=coding,
aa_list=plot_options["aa_list"],
)
data_se, _ = singleton_dataframe(
self.store[df_name][idx[condition, "SE"]],
self.wt,
coding=coding,
aa_list=plot_options["aa_list"],
)
else:
data, wtseq = singleton_dataframe(
self.store[df_name][idx[condition, "score"]], self.wt, coding=coding
)
data_se, _ = singleton_dataframe(
self.store[df_name][idx[condition, "SE"]], self.wt, coding=coding
)
# format the title
if coding:
title = "Amino Acid"
else:
title = "Nucleotide"
if self.scoring_method in ("WLS", "OLS"):
title += " Sequence-Function Map\n{} ({} Slope)".format(
condition, self.scoring_method
)
elif self.scoring_method == "ratios":
title += " Sequence-Function Map\n{} ({})".format(
condition, "Enrich2 Ratio"
)
elif self.scoring_method == "simple":
title += " Sequence-Function Map\n{} ({})".format(
condition, "Simplified Ratio"
)
else:
raise ValueError("Invalid scoring method", self.name)
if plot_options is not None:
sfmap_plot(
df=data,
pdf=pdf,
style="scores",
df_se=data_se,
dimensions="tall",
wt=wtseq,
title=title,
aa_list=plot_options["aa_list"],
aa_label_groups=plot_options["aa_label_groups"],
)
else:
sfmap_plot(
df=data,
pdf=pdf,
style="scores",
df_se=data_se,
dimensions="tall",
wt=wtseq,
title=title,
)
def correlation_plot(self, pdf, label):
"""
Create a triangular heatmap showing the Pearson correlation coefficient
for each pairwise comparison of replicate scores.
"""
pass
|
nilq/baby-python
|
python
|
import sys
import os
import glob
import shutil
import xml.etree.ElementTree as ET
if not os.path.exists("../results/"):
os.makedirs("../results/")
if os.path.exists("../results/detection/"):
shutil.rmtree("../results/detection/")
os.makedirs("../results/detection/")
# create VOC format files
xml_list = [f for f in os.listdir('../predictions') if f.endswith('xml')]
if len(xml_list) == 0:
print("Error: no .xml files found in predictions")
sys.exit()
for tmp_file in xml_list:
print(tmp_file)
with open(os.path.join('../results/detection', tmp_file.replace(".xml", ".txt")), "a") as new_f:
root = ET.parse(os.path.join('../predictions', tmp_file)).getroot()
for obj in root.findall('object'):
obj_name = obj.find('name').text.replace(' ', '_').rstrip().lower()
bndbox = obj.find('bndbox')
left = bndbox.find('xmin').text
top = bndbox.find('ymin').text
right = bndbox.find('xmax').text
bottom = bndbox.find('ymax').text
conf = obj.find('difficult').text
new_f.write("%s %s %s %s %s %s\n" % (obj_name, conf, left, top, right, bottom))
print("Conversion completed!")
|
nilq/baby-python
|
python
|
"""
Calculate the number of proteins per kingdom / phylum / genus / species per genera for the phages
"""
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calculate the kingdom / phylum / genus / species per genera for the phages")
parser.add_argument('-d', help='directory with phage flat files, one file per phage', required=True)
parser.add_argument('-i', help='file with id, taxid, taxonomy (just kingdom / phylum / genus / species). Output from blast_tax_to_genera.py', required=True)
parser.add_argument('-l', help='file with location in body (default: phage_host_location.txt)', default='phage_host_location.txt')
parser.add_argument('-b', help='Only print phages for which we have a body site associated with the host', action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
bodysite={}
with open(args.l, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
bodysite[p[0]] = p[3]
genome = {} # this is a hash of proteins -> genomes
count = {}
proteins = {} # list of proteins in this genome
for f in os.listdir(args.d):
if args.v:
sys.stderr.write("Reading genome {}\n".format(f))
with open(os.path.join(args.d, f), 'r') as fin:
for l in fin:
p=l.strip().split("\t")
genome[p[5]] = p[0]
if p[0] not in proteins:
proteins[p[0]] = set()
proteins[p[0]].add(p[5])
count[p[5]] = [set(), set(), set(), set()]
seen = set()
with open(args.i, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
if p[2] not in ['Archaea', 'Bacteria']:
continue
seen.add(p[0])
for i in range(4):
if len(p) < 6:
sys.stderr.write("Not enough elements in {}\n".format("|".join(p)))
continue
count[p[0]][i].add(p[i+2])
genomeavs = {}
for i in seen:
g = genome[i]
if g not in genomeavs:
genomeavs[g] = [[], [], [], []]
for j in range(4):
genomeavs[g][j].append(len(count[i][j]))
for g in genomeavs:
sys.stdout.write(g)
if g in bodysite:
sys.stdout.write("\t{}".format(bodysite[g]))
else:
sys.stdout.write("\t-")
sys.stdout.write("\t{}\t".format(len(proteins[g])))
sys.stdout.write("\t".join(genomeavs[g]))
sys.stdout.write("\n")
|
nilq/baby-python
|
python
|
from flask_restful import Resource, reqparse, request
from lib.objects.namespace import Namespace
from lib.objects.lock import Lock
class LockController(Resource):
# TODO Check access as separate method or decorator
# https://flask-restful.readthedocs.io/en/latest/extending.html#resource-method-decorators
parser = reqparse.RequestParser()
parser.add_argument(
"ttl", type=int, default=60, help="Time for lock to live without refreshes"
)
def __init__(self, storage):
self.storage = storage
def put(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
token = request.headers.get("X-Getlock-Auth")
if token != namespace.token:
return {"message": "Provided wrong auth token"}, 403
args = self.parser.parse_args(strict=True)
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
message = "Lock created"
lock._load(**args)
lock.create()
else:
message = "Lock updated"
lock._load_self()
lock._load(**args)
lock.update()
return {"message": message, "lock": lock._dump()}, 201
def get(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
return {"message": "Lock not found", "lock": None}, 404
lock._load_self()
if lock.expired:
return {"message": "Lock has expired", "lock": lock._dump()}, 410
return {"message": "Lock found", "lock": lock._dump()}, 200
def delete(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
token = request.headers.get("X-Getlock-Auth")
if token != namespace.token:
return {"message": "Provided wrong auth token"}, 403
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
return {"message": "Lock not found", "lock": None}, 404
lock.delete()
return {"message": "Lock removed", "lock": lock._dump()}, 200
|
nilq/baby-python
|
python
|
__author__ = "Polymathian"
__version__ = "0.3.0"
|
nilq/baby-python
|
python
|
# coding=utf-8
"""
The MIT License
Copyright (c) 2013 Mustafa İlhan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class Globals:
_1_DAY = 86400 # 24 * 60 * 60 seconds
_1_WEEK = 604800 # 7 * 24 * 60 * 60 seconds
_1_MONTH = 2592000 # 30 * 24 * 60 * 60 seconds
_10_MINUTES = 600 # seconds
DEFAULT_LIMIT = 15
MAX_REQUESTS = 15
REGIONS = [
1, 23424969
] # regions = [('tr', '23424969'), ('usa', '23424977'), ('world', '1')]
DUAL_LAYER_MEMCACHE_AND_IN_APP_MEMORY_CACHE = 0 # Cache in both memcache and cachepy by default
SINGLE_LAYER_MEMCACHE_ONLY = 1
SINGLE_LAYER_IN_APP_MEMORY_CACHE_ONLY = 2
|
nilq/baby-python
|
python
|
# This is automatically-generated code.
# Uses the jinja2 library for templating.
import cvxpy as cp
import numpy as np
import scipy as sp
# setup
problemID = "quantile_0"
prob = None
opt_val = None
# Variable declarations
# Generate data
np.random.seed(0)
m = 400
n = 10
k = 100
p = 1
sigma = 0.1
x = np.random.rand(m)*2*np.pi*p
y = np.sin(x) + sigma*np.sin(x)*np.random.randn(m)
alphas = np.linspace(1./(k+1), 1-1./(k+1), k)
# RBF features
mu_rbf = np.array([np.linspace(-1, 2*np.pi*p+1, n)])
mu_sig = (2*np.pi*p+2)/n
X = np.exp(-(mu_rbf.T - x).T**2/(2*mu_sig**2))
# Problem construction
Theta = cp.Variable(n,k)
def quantile_loss(alphas, Theta, X, y):
m, n = X.shape
k = len(alphas)
Y = np.tile(y.flatten(), (k, 1)).T
A = np.tile(alphas, (m, 1))
Z = X*Theta - Y
return cp.sum_entries(
cp.max_elemwise(
cp.mul_elemwise( -A, Z),
cp.mul_elemwise(1-A, Z)))
f = quantile_loss(alphas, Theta, X, y)
C = [X*(Theta[:,1:] - Theta[:,:-1]) >= 0]
prob = cp.Problem(cp.Minimize(f), C)
# Problem collection
# Single problem collection
problemDict = {
"problemID" : problemID,
"problem" : prob,
"opt_val" : opt_val
}
problems = [problemDict]
# For debugging individual problems:
if __name__ == "__main__":
def printResults(problemID = "", problem = None, opt_val = None):
print(problemID)
problem.solve()
print("\tstatus: {}".format(problem.status))
print("\toptimal value: {}".format(problem.value))
print("\ttrue optimal value: {}".format(opt_val))
printResults(**problems[0])
|
nilq/baby-python
|
python
|
from starlette.config import Config
# Configuration from environment variables or '.env' file.
config = Config(".env")
DB_NAME = config("DB_NAME")
TEST_DB_NAME = config("TEST_DB_NAME")
DB_USER = config("DB_USER")
DB_PASSWORD = config("DB_PASSWORD")
DB_HOST = config("DB_HOST")
DB_PORT = config("DB_PORT")
SECRET_KEY = config("SECRET_KEY")
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
|
nilq/baby-python
|
python
|
"""Migration for the Submitty system."""
import os
def up(config):
"""
Run up migration.
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
"""
os.system("apt install -qy python3-numpy")
os.system("apt install -qy python3-opencv")
os.system("apt-get update")
def down(config):
"""
Run down migration (rollback).
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
"""
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String, Integer, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from src.model.base import Base
from src.model.EstacaoZona import EstacaoZona
class Zona(Base):
__tablename__ = 'Zona'
Zona_id = Column(Integer, primary_key=True)
Nome = Column(String)
Raio = Column(Float)
Latitude = Column(Float)
Longitude = Column(Float)
Estacoes = relationship('Estacao', secondary=EstacaoZona)
Tags = relationship('Tag')
def format(self):
return {
"Zona_id": f'{self.Zona_id}',
"Nome": self.Nome,
"Raio": f'{self.Raio}',
"Latitude": f'{self.Latitude}',
"Longitude": f'{self.Longitude}'
}
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from playLA.Matrix import Matrix
from playLA.Vector import Vector
import math
if __name__ == "__main__":
points = [[0, 0], [0, 5], [3, 5], [3, 4], [1, 4],
[1, 3], [2, 3], [2, 2], [1, 2], [1, 0]]
x = [point[0] for point in points]
y = [point[1] for point in points]
plt.figure(figsize=(5, 5))
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.plot(x, y)
# plt.show()
P = Matrix(points)
# print(P)
# T = Matrix([[2, 0], [0, 1.5]]) # T: 2 * 2 P: 10 * 2
# T = Matrix([[1, 0], [0, -1]])
# T = Matrix([[-1, 0], [0, 1]])
# T = Matrix([[-1, 0], [0, -1]])
# T = Matrix([[1, 1], [0, 1]])
# T = Matrix([[1, 0], [1, 1]])
# T = Matrix([[1, 0.5], [1, 1]])
# T = Matrix([[1, 0.5], [1, 1]])
theta = math.pi / 3
T = Matrix([[math.cos(theta), math.sin(theta)],
[-math.sin(theta), math.cos(theta)]])
P2 = T.dot(P.T()) # P2: 2 * 10
# print(P2)
plt.plot([P2.col_vector(i)[0] for i in range(P2.col_num())],
[P2.col_vector(i)[1] for i in range(P2.col_num())])
plt.show()
|
nilq/baby-python
|
python
|
import ast
import json
import os
from base_automation import report
# ---------------------------- terminal ------------------------------------#
@report.utils.step('send terminal command: {command}')
def terminal_command(command):
try:
step_data(f"send command to terminal:\n{command}")
return os.system(command)
except Exception as e:
step_data(e)
# ---------------------------- environment ------------------------------------#
@report.utils.step("get environment items")
def get_environment_items(key):
return os.environ.items()
@report.utils.step("get environment variable: {key}")
def get_environment_variable(key):
return os.environ.get(key)
@report.utils.step("set environment variable: {key}, {value}")
def set_environment_variable(key, value):
os.environ.setdefault(key, value)
# ---------------------------- report data ------------------------------------#
@report.utils.step('{step_description}')
def step_data(step_description):
pass
@report.utils.step("assert validation - {step_description}")
def compare_data(first_condition, second_condition, step_description=None, positive_test=True):
if positive_test:
assert first_condition == second_condition
else:
assert first_condition != second_condition
# ---------------------------- files actions ------------------------------------#
@report.utils.step("dict to json")
def dict_to_json(string_content):
return json.dumps(str_to_dict(string_content))
@report.utils.step("str to dict")
def str_to_dict(string_content):
return ast.literal_eval(str(string_content))
@report.utils.step("load json")
def load_json(json_content):
return json.loads(json_content)
@report.utils.step("create temp json")
def create_temp_json(file_path, data):
json_file = open(file_path, "w")
json_file.write(data)
json_file.close()
|
nilq/baby-python
|
python
|
# PLUGIN MADE BY DANGEROUSJATT
# KEEP CREDIT
# MADE FOR HELLBOT
# BY TEAM HELLBOT
# NOW IN darkbot
import math
from darkbot.utils import admin_cmd, sudo_cmd, edit_or_reply
from userbot import CmdHelp
from userbot import bot as darkbot
@darkbot.on(admin_cmd(pattern="sin ?(.*)"))
@darkbot.on(sudo_cmd(pattern="sin ?(.*)", allow_sudo=True))
async def findsin(event):
input_str = int(event.pattern_match.group(1))
output = math.sin(input_str)
await event.edit(f"**Value of Sin** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cos ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cos ?(.*)", allow_sudo=True))
async def find_cos(event):
input_str = int(event.pattern_match.group(1))
output = math.cos(input_str)
await event.edit(f"**Value of Cos** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="tan ?(.*)"))
@darkbot.on(sudo_cmd(pattern="tan ?(.*)", allow_sudo=True))
async def find_tan(event):
input_str = int(event.pattern_match.group(1))
output = math.tan(input_str)
await event.edit(f"**Value of Tan** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cosec ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cosec ?(.*)", allow_sudo=True))
async def find_csc(event):
input_str = float(event.pattern_match.group(1))
output = mpmath.csc(input_str)
await event.edit(f"**Value of Cosec** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="sec ?(.*)"))
@darkbot.on(sudo_cmd(pattern="sec ?(.*)", allow_sudo=True))
async def find_sec(event):
input_str = float(event.pattern_match.group(1))
output = mpmath.sec(input_str)
await event.edit(f"**Value of Sec** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cot ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cot ?(.*)", allow_sudo=True))
async def find_cot(event):
input_str = float(event.pattern_match.group(1))
output = mpmath.cot(input_str)
await event.edit(f"**Value of Cot** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="square ?(.*)"))
@darkbot.on(sudo_cmd(pattern="square ?(.*)", allow_sudo=True))
async def square(event):
input_str = float(event.pattern_match.group(1))
output = input_str * input_str
await event.edit(f"**Square of** `{input_str}`\n== `{output}`")
@darkbot.on(admin_cmd(pattern="cube ?(.*)"))
@darkbot.on(sudo_cmd(pattern="cube ?(.*)", allow_sudo=True))
async def cube(event):
input_str = float(event.pattern_match.group(1)) # DANGEROUSJATT
output = input_str * input_str * input_str
await event.edit(f"**Cube of** `{input_str}`\n== `{output}`")
CmdHelp("maths").add_command(
"cube", "<query>", "Gives the cube of given number"
).add_command(
"square", "<query>", "Gives the square of given number"
).add_command(
"cot", "<query>", "Gives the cot of given query"
).add_command(
"sec", "<query>", "Gives the sec of given query"
).add_command(
"cosec", "<query>", "Gives the cosec of given query"
).add_command(
"tan", "<query>", "Gives the tan of given query"
).add_command(
"sin", "<query>", "Gives the sin of given query"
).add_command(
"cos", "<query>", "Gives the cos of given query"
).add()
|
nilq/baby-python
|
python
|
# Copyright 2021 cstsunfu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from typing import Dict, List, Set
from dlk.core.base_module import SimpleModule, BaseModuleConfig
from . import embedding_register, embedding_config_register
from dlk.core.modules import module_config_register, module_register
@embedding_config_register("pretrained_transformers")
class PretrainedTransformersConfig(BaseModuleConfig):
"""Config for PretrainedTransformers
Config Example1:
>>> {
>>> "module": {
>>> "_base": "roberta",
>>> },
>>> "config": {
>>> "pretrained_model_path": "*@*",
>>> "input_map": {
>>> "input_ids": "input_ids",
>>> "attention_mask": "attention_mask",
>>> "type_ids": "type_ids",
>>> },
>>> "output_map": {
>>> "embedding": "embedding",
>>> },
>>> "dropout": 0, //dropout rate
>>> "embedding_dim": "*@*",
>>> },
>>> "_link": {
>>> "config.pretrained_model_path": ["module.config.pretrained_model_path"],
>>> },
>>> "_name": "pretrained_transformers",
>>> }
Config Example2:
>>> for gather embedding
>>> {
>>> "module": {
>>> "_base": "roberta",
>>> },
>>> "config": {
>>> "pretrained_model_path": "*@*",
>>> "input_map": {
>>> "input_ids": "input_ids",
>>> "attention_mask": "subword_mask",
>>> "type_ids": "type_ids",
>>> "gather_index": "gather_index",
>>> },
>>> "output_map": {
>>> "embedding": "embedding",
>>> },
>>> "embedding_dim": "*@*",
>>> "dropout": 0, //dropout rate
>>> },
>>> "_link": {
>>> "config.pretrained_model_path": ["module.config.pretrained_model_path"],
>>> },
>>> "_name": "pretrained_transformers",
>>> }
"""
def __init__(self, config: Dict):
super(PretrainedTransformersConfig, self).__init__(config)
self.pretrained_transformers_config = config["module"]
self.post_check(config['config'], used=[
"pretrained_model_path",
"embedding_dim",
"output_map",
"input_map",
"dropout",
"return_logits",
])
@embedding_register("pretrained_transformers")
class PretrainedTransformers(SimpleModule):
"""Wrap the hugingface transformers
"""
def __init__(self, config: PretrainedTransformersConfig):
super(PretrainedTransformers, self).__init__(config)
self._provide_keys = {'embedding'}
self._required_keys = {'input_ids', 'attention_mask'}
self.config = config
self.pretrained_transformers = module_register.get(config.pretrained_transformers_config['_name'])(module_config_register.get(config.pretrained_transformers_config['_name'])(config.pretrained_transformers_config))
def init_weight(self, method):
"""init the weight of submodules by 'method'
Args:
method: init method
Returns:
None
"""
self.pretrained_transformers.init_weight(method)
def forward(self, inputs: Dict[str, torch.Tensor])->Dict[str, torch.Tensor]:
"""get the transformers output as embedding
Args:
inputs: one mini-batch inputs
Returns:
one mini-batch outputs
"""
input_ids = inputs[self.get_input_name('input_ids')] if "input_ids" in self.config._input_map else None
attention_mask = inputs[self.get_input_name('attention_mask')] if "attention_mask" in self.config._input_map else None
type_ids = inputs[self.get_input_name('type_ids')] if "type_ids" in self.config._input_map else None
type_ids = inputs[self.get_input_name('type_ids')] if "type_ids" in self.config._input_map else None
inputs_embeds = inputs[self.get_input_name('inputs_embeds')] if "inputs_embeds" in self.config._input_map else None
if (input_ids is None and inputs_embeds is None) or (input_ids is not None and inputs_embeds is not None):
raise PermissionError("input_ids and input_embeds must set one of them to None")
sequence_output, all_hidden_states, all_self_attentions = self.pretrained_transformers(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": type_ids,
"inputs_embeds": inputs_embeds,
}
)
if 'gather_index' in self.config._input_map:
# gather_index.shape == bs*real_sent_len
gather_index = inputs[self.get_input_name("gather_index")]
g_bs, g_seq_len = gather_index.shape
bs, seq_len, hid_size = sequence_output.shape
assert g_bs == bs
assert g_seq_len <= seq_len
sequence_output = torch.gather(sequence_output[:, :, :], 1, gather_index.unsqueeze(-1).expand(bs, g_seq_len, hid_size))
inputs[self.get_output_name('embedding')] = sequence_output
if self._logits_gather.layer_map:
inputs.update(self._logits_gather(all_hidden_states))
return inputs
|
nilq/baby-python
|
python
|
import os
import os.path
from os.path import exists
import hashlib
import json
import uuid
import pprint
import unittest
from pathlib import Path
from collections import defaultdict
import settings
import pathlib
from cromulent import model, vocab, reader
from cromulent.model import factory
from pipeline.util import CromObjectMerger
from pipeline.projects.sales import SalesPipeline
from pipeline.projects.people import PeoplePipeline
from pipeline.projects.knoedler import KnoedlerPipeline
from pipeline.projects.aata import AATAPipeline
from pipeline.projects.sales.util import SalesTree
from pipeline.nodes.basic import Serializer, AddArchesModel
MODELS = {
'Bidding': 'model-bidding',
'Acquisition': 'model-acquisition',
'Activity': 'model-activity',
'SaleActivity': 'model-sale-activity',
'Event': 'model-event',
'Group': 'model-groups',
'HumanMadeObject': 'model-object',
'LinguisticObject': 'model-lo',
'Person': 'model-person',
'Place': 'model-place',
'ProvenanceEntry': 'model-activity',
'Production': 'model-production',
'Set': 'model-set',
'VisualItem': 'model-visual-item',
'Inventorying': 'model-inventorying'
}
class TestWriter():
'''
Deserialize the output of each resource and store in memory.
Merge data for multiple serializations of the same resource.
'''
def __init__(self):
self.output = {}
self.merger = CromObjectMerger()
super().__init__()
def __call__(self, data: dict, *args, **kwargs):
d = data['_OUTPUT']
dd = json.loads(d)
dr = data['_ARCHES_MODEL']
if dr not in self.output:
self.output[dr] = {}
uu = data.get('uuid')
if 'id' in dd:
uu = hashlib.sha256(dd['id'].encode('utf-8')).hexdigest()
elif not uu and 'uri' in data:
uu = hashlib.sha256(data['uri'].encode('utf-8')).hexdigest()
# print(f'*** No UUID in top-level resource. Using a hash of top-level URI: {uu}')
if not uu:
uu = str(uuid.uuid4())
# print(f'*** No UUID in top-level resource;')
# print(f'*** Using an assigned UUID filename for the content: {uu}')
fn = '%s.json' % uu
data = json.loads(d)
if fn in self.output[dr]:
r = reader.Reader()
model_object = r.read(d)
merger = self.merger
content = self.output[dr][fn]
try:
m = r.read(content)
if m == model_object:
self.output[dr][fn] = data
return
else:
merger.merge(m, model_object)
self.output[dr][fn] = json.loads(factory.toString(m, False))
return
except model.DataError:
print(f'Exception caught while merging data from {fn}:')
print(d)
print(content)
raise
else:
self.output[dr][fn] = data
def process_model(self, model):
data = {v['id']: v for v in model.values()}
return data
def process_output(self, output):
data = {k: self.process_model(v) for k, v in output.items()}
return data
def processed_output(self):
return self.process_output(self.output)
##########################################################################################
class SalesTestPipeline(SalesPipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, catalogs, auction_events, contents, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, catalogs, auction_events, contents, **kwargs)
self.writer = writer
self.prev_post_sales_map = {}
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
services.update({
'problematic_records': {},
'location_codes': {}
})
return services
def run(self, **options):
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
post_map = services['post_sale_map']
self.generate_prev_post_sales_data(post_map)
def load_prev_post_sales_data(self):
return {}
def persist_prev_post_sales_data(self, post_sale_rewrite_map):
self.prev_post_sales_map = post_sale_rewrite_map
def load_sales_tree(self):
return SalesTree()
def persist_sales_tree(self, g):
self.sales_tree = g
class TestSalesPipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/sales'))
self.catalogs = {
'header_file': 'tests/data/sales/sales_catalogs_info_0.csv',
'files_pattern': 'tests/data/sales/empty.csv',
}
self.contents = {
'header_file': 'tests/data/sales/sales_contents_0.csv',
'files_pattern': 'tests/data/sales/empty.csv',
}
self.auction_events = {
'header_file': 'tests/data/sales/sales_descriptions_0.csv',
'files_pattern': 'tests/data/sales/empty.csv',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
catalogs = self.catalogs.copy()
events = self.auction_events.copy()
contents = self.contents.copy()
tests_path = Path(f'tests/data/sales/{test_name}')
catalog_files = list(tests_path.rglob('sales_catalogs_info*'))
event_files = list(tests_path.rglob('sales_descriptions*'))
content_files = list(tests_path.rglob('sales_contents*'))
if catalog_files:
if exists(str(tests_path / 'sales_catalogs_info_0.csv')):
catalogs['header_file'] = str(tests_path / 'sales_catalogs_info_0.csv')
catalogs['files_pattern'] = str(tests_path / 'sales_catalogs_info_[!0]*')
if event_files:
if exists(str(tests_path / 'sales_descriptions_0.csv')):
events['header_file'] = str(tests_path / 'sales_descriptions_0.csv')
events['files_pattern'] = str(tests_path / 'sales_descriptions_[!0]*')
if content_files:
if exists(str(tests_path / 'sales_contents_0.csv')):
contents['header_file'] = str(tests_path / 'sales_contents_0.csv')
contents['files_pattern'] = str(tests_path / 'sales_contents_[!0]*')
writer = TestWriter()
pipeline = SalesTestPipeline(
writer,
input_path,
catalogs=catalogs,
auction_events=events,
contents=contents,
models=MODELS,
limit=100,
debug=True
)
pipeline.run()
self.prev_post_sales_map = pipeline.prev_post_sales_map
return writer.processed_output()
##########################################################################################
class AATATestPipeline(AATAPipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, *args, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, *args, **kwargs)
self.writer = writer
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
# services.update({
# })
return services
def run(self, **options):
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
class TestAATAPipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/aata'))
self.patterns = {
'abstracts_pattern': 'tests/data/aata/empty.xml',
'journals_pattern': 'tests/data/aata/empty.xml',
'series_pattern': 'tests/data/aata/empty.xml',
'people_pattern': 'tests/data/aata/empty.xml',
'corp_pattern': 'tests/data/aata/empty.xml',
'geog_pattern': 'tests/data/aata/empty.xml',
'subject_pattern': 'tests/data/aata/empty.xml',
'tal_pattern': 'tests/data/aata/empty.xml',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
tests_path = Path(f'tests/data/aata/{test_name}')
patterns = {
'abstracts_pattern': 'AATA_[0-9]*.xml',
'journals_pattern': 'AATA*Journal.xml',
'series_pattern': 'AATA*Series.xml',
'people_pattern': 'Auth_person.xml',
'corp_pattern': 'Auth_corp.xml',
'geog_pattern': 'Auth_geog.xml',
'subject_pattern': 'Auth_subject.xml',
'tal_pattern': 'Auth_TAL.xml'
}
kwargs = self.patterns.copy()
for k, pattern in patterns.items():
files = list(tests_path.rglob(pattern))
if files:
kwargs[k] = str(tests_path / pattern)
writer = TestWriter()
pipeline = AATATestPipeline(
writer,
input_path,
models=MODELS,
limit=100,
debug=True,
**kwargs,
)
pipeline.run()
return writer.processed_output()
def verify_content(self, data, **kwargs):
for k, expected in kwargs.items():
self.assertIn(k, data)
got = data.get(k)
if isinstance(got, list):
values = [g['content'] for g in got]
self.assertIn(expected, values)
else:
value = got['content']
self.assertEqual(value, expected)
def verify_property(self, data, property, **kwargs):
for k, expected in kwargs.items():
self.assertIn(k, data)
got = data.get(k)
if isinstance(got, list):
values = [g[property] for g in got]
self.assertIn(expected, values)
else:
value = got[property]
self.assertEqual(value, expected)
def get_classification_labels(self, data):
cl = data.get('classified_as', [])
for c in cl:
clabel = c['_label']
yield clabel
def get_typed_referrers(self, data):
return self.get_typed_content('referred_to_by', data)
def get_typed_identifiers(self, data):
return self.get_typed_content('identified_by', data)
def get_typed_content(self, prop, data):
identified_by = data.get(prop, [])
identifiers = defaultdict(set)
for i in identified_by:
content = i['content']
for clabel in self.get_classification_labels(i):
identifiers[clabel].add(content)
for k in identifiers.keys():
if len(identifiers[k]) == 1:
identifiers[k] = identifiers[k].pop()
return dict(identifiers)
def verify_place_hierarchy(self, places, place, expected_names):
while place:
expected = expected_names.pop(0)
self.verify_content(place, identified_by=expected)
place = place.get('part_of', [])
if place:
i = place[0]['id']
place = places.get(i)
self.assertEqual(len(expected_names), 0)
##########################################################################################
class KnoedlerTestPipeline(KnoedlerPipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, data, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, data, **kwargs)
self.writer = writer
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
services.update({
'problematic_records': {},
'location_codes': {},
})
return services
def run(self, **options):
vocab.conceptual_only_parts()
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
class TestKnoedlerPipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/knoedler'))
# os.environ['GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH'] = 'data/common'
self.data = {
'header_file': 'tests/data/knoedler/knoedler_0.csv',
'files_pattern': 'knoedler.csv',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
data = self.data.copy()
tests_path = Path(f'tests/data/knoedler/{test_name}')
files = list(tests_path.rglob('knoedler_ar*'))
if files:
data['files_pattern'] = str(tests_path / 'knoedler_ar*')
writer = TestWriter()
pipeline = KnoedlerTestPipeline(
writer,
input_path,
data=data,
models=MODELS,
limit=100,
debug=True
)
pipeline.run()
return writer.processed_output()
##########################################################################################
class PeopleTestPipeline(PeoplePipeline):
'''
Test Provenance pipeline subclass that allows using a custom Writer.
'''
def __init__(self, writer, input_path, data, **kwargs):
self.uid_tag_prefix = 'tag:getty.edu,2019:digital:pipeline:TESTS:REPLACE-WITH-UUID#'
super().__init__(input_path, data, **kwargs)
self.writer = writer
def serializer_nodes_for_model(self, *args, model=None, **kwargs):
nodes = []
if model:
nodes.append(AddArchesModel(model=model))
nodes.append(Serializer(compact=False))
nodes.append(self.writer)
return nodes
def get_services(self):
services = super().get_services()
services.update({
'problematic_records': {},
'location_codes': {},
})
return services
def run(self, **options):
vocab.add_linked_art_boundary_check()
vocab.add_attribute_assignment_check()
services = self.get_services(**options)
super().run(services=services, **options)
class TestPeoplePipelineOutput(unittest.TestCase):
'''
Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.
Then verify that the serializations in the TestWriter object are what was expected.
'''
def setUp(self):
settings.pipeline_common_service_files_path = os.environ.get('GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH', str(pathlib.Path('data/common')))
settings.pipeline_service_files_base_path = os.environ.get('GETTY_PIPELINE_SERVICE_FILES_PATH', str(pathlib.Path('data')))
# os.environ['GETTY_PIPELINE_SERVICE_FILES_PATH'] = str(pathlib.Path('data/people'))
# os.environ['GETTY_PIPELINE_COMMON_SERVICE_FILES_PATH'] = 'data/common'
self.data = {
'header_file': 'tests/data/people/people_authority_0.csv',
'files_pattern': 'people_authority.csv',
}
os.environ['QUIET'] = '1'
def tearDown(self):
pass
def run_pipeline(self, test_name):
input_path = os.getcwd()
data = self.data.copy()
tests_path = Path(f'tests/data/people/{test_name}')
files = list(tests_path.rglob('people_authority_ar*'))
if files:
data['files_pattern'] = str(tests_path / 'people_authority_ar*')
writer = TestWriter()
pipeline = PeopleTestPipeline(
writer,
input_path,
data=data,
models=MODELS,
limit=100,
debug=True
)
pipeline.run()
return writer.processed_output()
##########################################################################################
def classified_identifiers(data, key='identified_by'):
classified_identifiers = {}
identifiers = [(i['content'], i.get('classified_as', [])) for i in data.get(key, [])]
for (content, classification) in identifiers:
if len(classification):
for cl in classification:
label = cl['_label']
classified_identifiers[label] = content
else:
classified_identifiers[None] = content
return classified_identifiers
def classified_identifier_sets(data, key='identified_by'):
classified_identifiers = defaultdict(set)
identifiers = [(i.get('content'), i.get('classified_as', [])) for i in data.get(key, [])]
for (content, classification) in identifiers:
if content:
if len(classification):
for cl in classification:
label = cl['_label']
classified_identifiers[label].add(content)
else:
classified_identifiers[None].add(content)
return classified_identifiers
def classification_sets(data, key='_label'):
classification_set = set()
classification = data.get('classified_as', [])
if len(classification):
for cl in classification:
label = cl[key]
classification_set.add(label)
return classification_set
def classification_tree(data, key='_label'):
tree = {}
classification = data.get('classified_as', [])
if len(classification):
for cl in classification:
label = cl[key]
tree[label] = classification_tree(cl, key=key)
return tree
|
nilq/baby-python
|
python
|
import logging
from datalad_lgpdextension.utils.dataframe import Dataframe
from datalad_lgpdextension.writers.dataframe import Dataframe as dfutils
from datalad_lgpdextension.utils.folder import Folder
from datalad_lgpdextension.runner.actions import Actions
from datalad_lgpdextension.utils.generate_config import GenerateConfig
from datalad_lgpdextension.utils.folder import Folder
lgr = logging.getLogger('datalad.lgpdextension.lgpd_extension.writers.dataframe')
class Main:
def __init__(self,filename=f"{Folder().getcurrent()}/_settings.json"):
self.filename = filename
def update_file(self,settings):
defauld_field = "Added the '{{FIELD}} field'. YOU NEED TO CONFIGURE THE '{{FIELD}} FIELD' FROM SETTINGS JSON."
msgs = ""
if not settings.get("ofuscation",None):
msg = defauld_field.replace("{{FIELD}}","OFUSCATION")
msgs += "\n" + msg
lgr.info(msg)
settings["ofuscation"] = GenerateConfig().addExampleOfuscation()
if not settings.get("tokenization",None):
msg = defauld_field.replace("{{FIELD}}","TOKENIZATION")
msgs = "\n" + msg
lgr.info(msg)
settings["tokenization"] = GenerateConfig().addExampleTokenization()
if not settings.get("file",None):
msg = defauld_field.replace("{{FIELD}}","FILE")
msgs += "\n"
lgr.info(msg)
settings["file"] = GenerateConfig().addExampleFile()
if not settings.get("columns",None):
msg = defauld_field.replace("{{FIELD}}","COLUMNS")
msgs += "\n" + msg
lgr.info(msg)
settings["columns"] = GenerateConfig().addExampleColumn()
Folder(self.filename).save(settings)
if msgs != "":
raise Exception(msgs)
return settings
def run(self):
if not Folder(self.filename).exists():
settings = self.update_file(dict())
else:
fld = Folder(self.filename)
settings = self.update_file(fld.read())
dataframe = dfutils().read(settings)
for colname,value in settings["columns"].items():
if value.get("enable",None) == "true":
Actions(colname,settings,dataframe,self.filename).run(value["actions"])
return True
|
nilq/baby-python
|
python
|
class LinkedListNode:
def __init__(self, data):
self.data = data
self.next = None
class Stack:
def __init__(self):
self.num_elements = 0
self.head = None
def push(self, data):
new_node = LinkedListNode(data)
if self.head is None:
self.head = new_node
else:
new_node.next = self.head
self.head = new_node
self.num_elements += 1
def pop(self):
if self.is_empty():
return None
temp = self.head.data
self.head = self.head.next
self.num_elements -= 1
return temp
def top(self):
if self.head is None:
return None
return self.head.data
def size(self):
return self.num_elements
def is_empty(self):
return self.num_elements == 0
def evaluate_post_fix(input_list):
stack = Stack();
operators = ['*', "/", "-", "+"];
for element in input_list:
# print(stack)
if element in operators:
first = int(stack.pop());
second = int(stack.pop());
print(first, second, element)
if element is '+':
stack.push(second + first);
if element is '/':
stack.push(int(second / first));
if element is '*':
stack.push(int(second * first));
if element is '-':
stack.push(second - first)
else:
stack.push(element);
# print(stack.head.data)
return stack.head.data
print(evaluate_post_fix(["4", "13", "5", "/", "+"]));
|
nilq/baby-python
|
python
|
import numpy as np
def project(W, X, mu=None):
if mu is None:
return np.dot(X,W)
return np.dot(X - mu, W)
def reconstruct(W, Y, mu=None):
if mu is None:
return np.dot(Y,W.T)
return np.dot(Y, W.T) + mu
def pca(X, y, num_components=0):
[n,d] = X.shape
if (num_components <= 0) or (num_components>n):
num_components = n
mu = X.mean(axis=0)
X = X - mu
if n>d:
C = np.dot(X.T,X)
[eigenvalues,eigenvectors] = np.linalg.eigh(C)
else:
C = np.dot(X,X.T)
[eigenvalues,eigenvectors] = np.linalg.eigh(C)
eigenvectors = np.dot(X.T,eigenvectors)
for i in xrange(n):
eigenvectors[:,i] = eigenvectors[:,i]/np.linalg.norm(eigenvectors[:,i])
# or simply perform an economy size decomposition
# eigenvectors, eigenvalues, variance = np.linalg.svd(X.T, full_matrices=False)
# sort eigenvectors descending by their eigenvalue
idx = np.argsort(-eigenvalues)
eigenvalues = eigenvalues[idx]
eigenvectors = eigenvectors[:,idx]
# select only num_components
eigenvalues = eigenvalues[0:num_components].copy()
eigenvectors = eigenvectors[:,0:num_components].copy()
return [eigenvalues, eigenvectors, mu]
def lda(X, y, num_components=0):
y = np.asarray(y)
[n,d] = X.shape
c = np.unique(y)
if (num_components <= 0) or (num_component>(len(c)-1)):
num_components = (len(c)-1)
meanTotal = X.mean(axis=0)
Sw = np.zeros((d, d), dtype=np.float32)
Sb = np.zeros((d, d), dtype=np.float32)
for i in c:
Xi = X[np.where(y==i)[0],:]
meanClass = Xi.mean(axis=0)
Sw = Sw + np.dot((Xi-meanClass).T, (Xi-meanClass))
Sb = Sb + n * np.dot((meanClass - meanTotal).T, (meanClass - meanTotal))
eigenvalues, eigenvectors = np.linalg.eig(np.linalg.inv(Sw)*Sb)
idx = np.argsort(-eigenvalues.real)
eigenvalues, eigenvectors = eigenvalues[idx], eigenvectors[:,idx]
eigenvalues = np.array(eigenvalues[0:num_components].real, dtype=np.float32, copy=True)
eigenvectors = np.array(eigenvectors[0:,0:num_components].real, dtype=np.float32, copy=True)
return [eigenvalues, eigenvectors]
def fisherfaces(X,y,num_components=0):
y = np.asarray(y)
[n,d] = X.shape
c = len(np.unique(y))
[eigenvalues_pca, eigenvectors_pca, mu_pca] = pca(X, y, (n-c))
[eigenvalues_lda, eigenvectors_lda] = lda(project(eigenvectors_pca, X, mu_pca), y, num_components)
eigenvectors = np.dot(eigenvectors_pca,eigenvectors_lda)
return [eigenvalues_lda, eigenvectors, mu_pca]
|
nilq/baby-python
|
python
|
import pytest
from copy import deepcopy
import mosdef_cassandra as mc
import unyt as u
from mosdef_cassandra.tests.base_test import BaseTest
from mosdef_cassandra.writers.inp_functions import generate_input
from mosdef_cassandra.writers.writers import write_mcfs
from mosdef_cassandra.utils.tempdir import *
class TestInpFunctions(BaseTest):
@pytest.fixture
def onecomp_system(self, methane_oplsaa, box):
system = mc.System([box], [methane_oplsaa], mols_to_add=[[10]])
moveset = mc.MoveSet("nvt", [methane_oplsaa])
return system, moveset
@pytest.fixture
def twocomp_system(self, methane_oplsaa, butane_oplsaa, box):
system = mc.System(
[box], [methane_oplsaa, butane_oplsaa], mols_to_add=[[10, 100]]
)
moveset = mc.MoveSet("nvt", [methane_oplsaa, butane_oplsaa])
return system, moveset
@pytest.fixture
def twobox_system(self, methane_oplsaa, box):
system = mc.System(
[box, box], [methane_oplsaa], mols_to_add=[[10], [5]]
)
moveset = mc.MoveSet("gemc", [methane_oplsaa])
return system, moveset
@pytest.fixture
def twocomptwobox_system(self, methane_oplsaa, butane_oplsaa, box):
system = mc.System(
[box, box],
[methane_oplsaa, butane_oplsaa],
mols_to_add=[[10, 100], [1, 5]],
)
moveset = mc.MoveSet("gemc_npt", [methane_oplsaa, butane_oplsaa])
return system, moveset
@pytest.fixture
def gcmc_system(
self, methane_oplsaa, fixed_lattice_compound, fixed_lattice_trappe
):
box_list = [fixed_lattice_compound]
species_list = [fixed_lattice_trappe, methane_oplsaa]
system = mc.System(
box_list,
species_list,
mols_in_boxes=[[1, 0]],
mols_to_add=[[0, 10]],
)
moveset = mc.MoveSet("gcmc", species_list)
return system, moveset
def test_invalid_kwargs(self, onecomp_system):
(system, moveset) = onecomp_system
with pytest.raises(ValueError, match=r"Invalid input argument"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
random_arg=1,
)
def test_run_name(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
run_name="test name",
)
assert "# Run_Name\ntest-name.out" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
run_name="test_name",
)
assert "# Run_Name\ntest_name.out" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Name\nnvt.out" in inp_data
with pytest.raises(TypeError, match=r"must be a string"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
run_name=1,
)
def test_sim_type(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Sim_Type\nnvt" in inp_data
with pytest.raises(ValueError, match=r"Unsupported sim_type"):
inp_data = mc.writers.inp_functions.get_sim_type("gccmc")
def test_nbr_species(self, onecomp_system, twocomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Nbr_Species\n1" in inp_data
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Nbr_Species\n2" in inp_data
def test_vdw_style(self, twocomp_system, twobox_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# VDW_Style\nlj cut_tail 12.0" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
vdw_style="none",
)
assert "# VDW_Style\nnone\n" in inp_data
with pytest.raises(ValueError, match=r"Unsupported vdw_style"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
vdw_style="cutoff",
vdw_cutoff=12.0 * u.angstrom,
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut",
vdw_cutoff=15.0 * u.angstrom,
)
assert "# VDW_Style\nlj cut 15.0" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_shift",
vdw_cutoff=15.0 * u.angstrom,
)
assert "# VDW_Style\nlj cut_shift 15.0" in inp_data
with pytest.raises(ValueError, match=r"Only one box"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
vdw_cutoff_box2=10.0 * u.angstrom,
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_switch",
vdw_cutoff=[12.0 * u.angstrom, 15.0 * u.angstrom],
)
assert "# VDW_Style\nlj cut_switch 12.0 15.0" in inp_data
with pytest.raises(ValueError, match=r"requires an inner"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_switch",
vdw_cutoff=12.0 * u.angstrom,
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# VDW_Style\nlj cut_tail 12.0\nlj cut_tail 12.0" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cut_switch",
vdw_cutoff_box1=[12.0 * u.angstrom, 15.0 * u.angstrom],
vdw_cutoff_box2=[11.0 * u.angstrom, 13.0 * u.angstrom],
)
assert (
"# VDW_Style\nlj cut_switch 12.0 15.0\nlj cut_switch 11.0 13.0"
in inp_data
)
with pytest.raises(ValueError, match=r"Unsupported cutoff style"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
cutoff_style="cutoff",
vdw_cutoff=12.0 * u.angstrom,
)
def test_charge_style(self, twocomp_system, twobox_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Charge_Style\ncoul ewald 12.0 1e-05\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="cut",
)
assert "# Charge_Style\ncoul cut 12.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="dsf",
)
assert "# Charge_Style\ncoul dsf 12.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="dsf",
dsf_damping=0.2,
)
assert "# Charge_Style\ncoul dsf 12.0 0.2\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_style="none",
)
assert "# Charge_Style\nnone\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_cutoff=15.0 * u.angstrom,
ewald_accuracy=5e-6,
)
assert "# Charge_Style\ncoul ewald 15.0 5e-06\n" in inp_data
with pytest.raises(ValueError, match=r"Only one box"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_cutoff_box2=1.0 * u.angstrom,
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
charge_cutoff_box2=30.0 * u.angstrom,
ewald_accuracy=5e-6,
)
assert (
"# Charge_Style\ncoul ewald 12.0 5e-06\ncoul ewald 30.0 5e-06\n"
in inp_data
)
def test_mixing_rule(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Mixing_Rule\nlb\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="geometric",
)
assert "# Mixing_Rule\ngeometric\n" in inp_data
mixing_dict = {"ls_138_s1 ls_140_s1": "1.0 1.0"}
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="custom",
custom_mixing_dict=mixing_dict,
)
assert (
"# Mixing_Rule\ncustom\nls_138_s1 ls_140_s1 1.0 1.0\n" in inp_data
)
with pytest.raises(
ValueError, match=r"Custom mixing rule requested but"
):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="custom",
)
with pytest.raises(ValueError, match=r"Unsupported mixing rule"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
mixing_rule="other",
)
def test_seeds(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Seed_Info\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
seeds=[1, 2],
)
assert "# Seed_Info\n1 2\n" in inp_data
with pytest.raises(TypeError, match=r"argument should be a list"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
seeds=100,
)
with pytest.raises(ValueError, match=r"must be integers"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
seeds=[100, -1],
)
def test_rcut_min(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Rcutoff_Low\n1.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
rcut_min=10.0 * u.angstrom,
)
assert "# Rcutoff_Low\n10.0\n" in inp_data
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
rcut_min="hello",
)
def test_pair_energy(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
rcut_min=10.0 * u.angstrom,
)
assert "# Pair_Energy\ntrue\n" in inp_data
with pytest.raises(TypeError, match=r"be of type boolean"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pair_energy=1,
)
def test_max_molecules(self, twocomp_system, gcmc_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Molecule_Files\nspecies1.mcf 10\nspecies2.mcf 100" in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
max_molecules=[100, 1000],
)
assert (
"# Molecule_Files\nspecies1.mcf 100\nspecies2.mcf 1000" in inp_data
)
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
assert (
"# Molecule_Files\nspecies1.mcf 1\nspecies2.mcf 2010\n" in inp_data
)
(system, moveset) = twocomp_system
with pytest.raises(TypeError, match=r"should be a list"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
max_molecules=100,
)
with pytest.raises(ValueError, match=r"Length of list specified"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
max_molecules=[100],
)
def test_boxes(self, onecomp_system, twobox_system, gcmc_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Box_Info\n1\ncubic\n50.0\n" in inp_data
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Box_Info\n2\ncubic\n50.0\n\ncubic\n50.0\n" in inp_data
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
assert "# Box_Info\n1\ncubic\n29.84\n" in inp_data
def test_temperature(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=200.0 * u.K,
)
assert "# Temperature_Info\n200.0\n" in inp_data
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=200.0 * u.K,
)
assert "# Temperature_Info\n200.0\n200.0\n" in inp_data
with pytest.raises(ValueError, match=r"less than zero"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=-300.0 * u.K,
)
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature="hi",
)
def test_pressure(self, twocomptwobox_system):
(system, moveset) = twocomptwobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=2.0 * u.bar,
)
assert "# Pressure_Info\n2.0\n2.0\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=2.0 * u.bar,
pressure_box2=10.0 * u.bar,
)
assert "# Pressure_Info\n2.0\n10.0\n" in inp_data
with pytest.raises(ValueError, match=r"Pressure must be specified"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure="string",
)
def test_chempot(self, gcmc_system):
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
assert "# Chemical_Potential_Info\nnone 10.0 \n" in inp_data
with pytest.raises(
ValueError, match=r"Chemical potential information"
):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
with pytest.raises(TypeError, match=r"unyt_array"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", "string"],
)
def test_moveset_formatting(self, onecomp_system):
# Invalid keyword
with pytest.raises(
ValueError, match="Invalid probability info section"
):
fake_prob_dict = {"trans": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Translate
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"translate": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"translate": [0.1, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"translate": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"translate": [0.1, [5.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Rotate
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"rotate": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"rotate": [0.1, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"rotate": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"rotate": [0.1, [5.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Angle
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"angle": [14.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"angle": 14.0}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Dihedral
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"dihed": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"dihed": [0.1, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"dihed": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"dihed": [0.1, [5.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Regrow
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"regrow": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"regrow": ["test", 0.1, 0.2]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating"):
fake_prob_dict = {"regrow": ["test", [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"regrow": [0.3, 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating"):
fake_prob_dict = {"regrow": [0.3, ["string"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"regrow": [0.3, [1.0]]}
# Vol
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"volume": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"volume": [0.1, 100.0, 0.2]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"volume": ["test", [100.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"volume": [0.1, 100.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"volume": [0.1, ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"volume": [0.1, [100.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Insertable
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"insert": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"insert": [0.1, True, True]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"insert": ["test", [True]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"insert": [0.1, True]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a boolean value"):
fake_prob_dict = {"insert": [0.1, [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"insert": [0.1, [True]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
# Swap
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": "test"}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, [True], [0.5]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"swap": ["test", [True], [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, True, [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a boolean value"):
fake_prob_dict = {"swap": [0.1, [1.0], [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, [True], 0.5, [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"swap": [0.1, [True], ["test"], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="not formatted properly"):
fake_prob_dict = {"swap": [0.1, [True], [0.5], 1.0]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
with pytest.raises(TypeError, match="must be a floating point"):
fake_prob_dict = {"swap": [0.1, [True], [0.5], ["test"]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"swap": [0.1, [True], [0.5], [1.0]]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"swap": [0.1, [True], [0.5], None]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
fake_prob_dict = {"swap": [0.1, [True], None, None]}
inp_data = mc.writers.inp_functions.get_move_probability_info(
**fake_prob_dict
)
def test_moveset_onecomp(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.33\n2.0 \n" in inp_data
assert "# Prob_Rotation\n0.33\n30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.34\n1.0 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
moveset.prob_angle = 0.1
moveset.prob_translate = 0.3
moveset.prob_rotate = 0.3
moveset.prob_regrow = 0.3
moveset.max_translate[0][0] = 10.0 * u.angstrom
moveset.max_rotate[0][0] = 10.0 * u.degree
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n10.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n10.0 \n" in inp_data
assert "# Prob_Angle\n0.1\n" in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.3\n1.0 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
def test_moveset_twocomp(self, twocomp_system):
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.33\n2.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.33\n30.0 30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.34\n0.5 0.5 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
moveset.prob_angle = 0.1
moveset.prob_translate = 0.3
moveset.prob_rotate = 0.3
moveset.prob_regrow = 0.26
moveset.max_translate[0][0] = 10.0 * u.angstrom
moveset.max_rotate[0][0] = 10.0 * u.degree
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n10.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n10.0 30.0 \n" in inp_data
assert "# Prob_Angle\n0.1\n" in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.26\n0.5 0.5 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
def test_moveset_twobox(self, twobox_system):
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n2.0 \n2.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n30.0 \n30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.295\n1.0 \n" in inp_data
assert "# Prob_Volume\n0.005\n500.0\n" in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert (
"# Prob_Swap\n0.1\ncbmc \nprob_swap_species 1.0 \nprob_swap_from_box 0.5 0.5 \n"
in inp_data
)
assert "# Prob_Ring" not in inp_data
def test_moveset_twocomptwobox(self, twocomptwobox_system):
(system, moveset) = twocomptwobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1.0 * u.bar,
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.3\n2.0 2.0 \n2.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.3\n30.0 30.0 \n30.0 30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.295\n0.5 0.5 \n" in inp_data
assert "# Prob_Volume\n0.005\n500.0\n5000.0\n" in inp_data
assert "# Prob_Insertion" not in inp_data
assert "# Prob_Deletion" not in inp_data
assert (
"# Prob_Swap\n0.1\ncbmc cbmc \nprob_swap_species 0.5 0.5 \nprob_swap_from_box 0.5 0.5 \n"
in inp_data
)
assert "# Prob_Ring" not in inp_data
def test_moveset_gcmc(self, gcmc_system):
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 1.0 * (u.kJ / u.mol)],
)
assert "# Move_Probability_Info" in inp_data
assert "# Done_Probability_Info" in inp_data
assert "# Prob_Translation\n0.25\n0.0 2.0 \n" in inp_data
assert "# Prob_Rotation\n0.25\n0.0 30.0 \n" in inp_data
assert "# Prob_Angle" not in inp_data
assert "# Prob_Dihedral" not in inp_data
assert "# Prob_Regrowth\n0.3\n0.0 1.0 \n" in inp_data
assert "# Prob_Volume" not in inp_data
assert "# Prob_Insertion\n0.1\nnone cbmc" in inp_data
assert "# Prob_Deletion\n0.1\n" in inp_data
assert "# Prob_Swap" not in inp_data
assert "# Prob_Ring" not in inp_data
def test_start_type(
self,
onecomp_system,
twocomp_system,
twobox_system,
twocomptwobox_system,
gcmc_system,
):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Start_Type\nmake_config 10\n" in inp_data
(system, moveset) = twocomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Start_Type\nmake_config 10 100\n" in inp_data
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Start_Type\nmake_config 10\nmake_config 5\n" in inp_data
(system, moveset) = twocomptwobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1.0 * u.bar,
)
assert (
"# Start_Type\nmake_config 10 100\nmake_config 1 5\n" in inp_data
)
(system, moveset) = gcmc_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 1.0 * (u.kJ / u.mol)],
)
assert "# Start_Type\nadd_to_config 1 0 box1.in.xyz 0 10\n" in inp_data
# HACK to test read config
system_copy = deepcopy(system)
system_copy._mols_to_add = [[0, 0], [0, 0]]
inp_data = generate_input(
system=system_copy,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 1.0 * (u.kJ / u.mol)],
)
assert "# Start_Type\nread_config 1 0 box1.in.xyz\n" in inp_data
def test_run_type(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Type\nequilibration 1000 \n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="production",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Type\nproduction 1000 \n" in inp_data
with pytest.raises(ValueError, match=r"Invalid run type"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="pro",
run_length=500,
temperature=300.0 * u.K,
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Run_Type\nequilibration 1000 100\n" in inp_data
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
thermal_stat_freq=100,
vol_stat_freq=50,
)
assert "# Run_Type\nequilibration 100 50\n" in inp_data
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
thermal_stat_freq=10.2,
vol_stat_freq=50,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
thermal_stat_freq=10,
vol_stat_freq=1.2,
)
def test_length_info(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Simulation_Length_Info\nunits steps\nprop_freq 500\ncoord_freq 5000\nrun 500"
in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
steps_per_sweep=10,
units="sweeps",
)
assert (
"# Simulation_Length_Info\nunits sweeps\nprop_freq 500\ncoord_freq 5000\nrun 500\nsteps_per_sweep 10\n"
in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
block_avg_freq=10,
)
assert (
"# Simulation_Length_Info\nunits steps\nprop_freq 500\ncoord_freq 5000\nrun 500\nblock_averages 10\n"
in inp_data
)
with pytest.raises(ValueError, match=r"Invalid units"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
units="stweeps",
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
prop_freq=1.2,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
coord_freq=1.2,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=5.2,
temperature=300.0 * u.K,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
block_avg_freq=10.2,
)
with pytest.raises(ValueError, match=r"must be an integer"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
steps_per_sweep=10.2,
)
def test_property_info(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Property_Info 1\nenergy_total\nenergy_intra\nenergy_inter\nenthalpy\npressure\nvolume\nnmols\nmass_density\n"
in inp_data
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# Property_Info 1\nenergy_total\nenergy_intra\nenergy_inter\nenthalpy\npressure\nvolume\nnmols\nmass_density\n\n# Property_Info 2\nenergy_total\nenergy_intra\nenergy_inter\nenthalpy\npressure\nvolume\nnmols\nmass_density\n"
in inp_data
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
properties=["energy_total", "enthalpy", "density"],
)
assert (
"# Property_Info 1\nenergy_total\nenthalpy\ndensity\n\n# Property_Info 2\nenergy_total\nenthalpy\ndensity\n"
in inp_data
)
with pytest.raises(ValueError, match=r"Invalid property"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
properties=["temperature"],
)
def test_fragment_files(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert "# Fragment_Files\n" in inp_data
def test_verbose_log(self, onecomp_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
verbose_log=True,
)
assert "# Verbose_Logfile\ntrue\n" in inp_data
with pytest.raises(TypeError, match=r"Verbosity must be"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
verbose_log="true",
)
def test_cbmc_info(self, onecomp_system, twobox_system):
(system, moveset) = onecomp_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# CBMC_Info\nkappa_ins 10\nkappa_dih 10\nrcut_cbmc 6.0\n"
in inp_data
)
(system, moveset) = twobox_system
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
assert (
"# CBMC_Info\nkappa_ins 10\nkappa_dih 10\nrcut_cbmc 6.0 6.0\n"
in inp_data
)
(system, moveset) = onecomp_system
moveset.cbmc_rcut = [0.45 * u.nm]
moveset.cbmc_n_insert = 2
moveset.cbmc_n_dihed = 5
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
)
print(inp_data)
assert (
"# CBMC_Info\nkappa_ins 2\nkappa_dih 5\nrcut_cbmc 4.5\n"
in inp_data
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 1.0 * u.angstrom),
("cylinder", 1.0 * u.angstrom),
("sphere", 1.0 * u.angstrom),
("interface", [1.0 * u.angstrom, 2.0 * u.angstrom]),
],
)
def test_write_restricted_gcmc(self, gcmc_system, typ, value):
(system, moveset) = gcmc_system
moveset.add_restricted_insertions(
system.species_topologies, [[None, typ]], [[None, value]]
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
if typ == "interface":
assert (
"\nrestricted_insertion {} {:0.1f} {:0.1f}\n".format(
typ, value[0].to_value(), value[1].to_value()
)
in inp_data
)
else:
assert (
"\nrestricted_insertion {} {:0.1f}\n".format(
typ, value.to_value()
)
in inp_data
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 30 * u.angstrom),
("cylinder", 30 * u.angstrom),
("sphere", 30 * u.angstrom),
("interface", [30 * u.angstrom, 50 * u.angstrom]),
],
)
def test_fail_restricted_gcmc(self, gcmc_system, typ, value):
(system, moveset) = gcmc_system
moveset.add_restricted_insertions(
system.species_topologies, [[None, typ]], [[None, value]]
)
with pytest.raises(ValueError, match=r"Restricted insertion"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
chemical_potentials=["none", 10.0 * (u.kJ / u.mol)],
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 10.0 * u.angstrom),
("cylinder", 10.0 * u.angstrom),
("sphere", 10.0 * u.angstrom),
("interface", [10.0 * u.angstrom, 20.0 * u.angstrom]),
],
)
def test_write_restricted_gemc_npt(self, twocomptwobox_system, typ, value):
(system, moveset) = twocomptwobox_system
moveset.add_restricted_insertions(
system.species_topologies,
[[None, None], [None, typ]],
[[None, None], [None, value]],
)
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1 * u.bar,
)
if typ == "interface":
assert (
"\nrestricted_insertion {} {:0.1f} {:0.1f}\n".format(
typ, value[0].to_value(), value[1].to_value()
)
in inp_data
)
else:
assert (
"\nrestricted_insertion {} {:0.1f}\n".format(
typ, value.to_value()
)
in inp_data
)
@pytest.mark.parametrize(
"typ,value",
[
("slitpore", 60 * u.angstrom),
("cylinder", 60 * u.angstrom),
("sphere", 60 * u.angstrom),
("interface", [10 * u.angstrom, 70 * u.angstrom]),
],
)
def test_fail_restricted_gemc_npt(self, twocomptwobox_system, typ, value):
(system, moveset) = twocomptwobox_system
moveset.add_restricted_insertions(
system.species_topologies,
[[None, None], [None, typ]],
[[None, None], [None, value]],
)
with pytest.raises(ValueError, match=r"Restricted insertion"):
inp_data = generate_input(
system=system,
moveset=moveset,
run_type="equilibration",
run_length=500,
temperature=300.0 * u.K,
pressure=1 * u.bar,
)
@pytest.mark.parametrize(
"angle_style", [["fixed"], ["harmonic"], "fixed", "harmonic"]
)
def test_onecomp_angle_style(self, onecomp_system, angle_style):
with temporary_directory() as tmp_dir:
with temporary_cd(tmp_dir):
(system, moveset) = onecomp_system
write_mcfs(system, angle_style=angle_style)
@pytest.mark.parametrize("angle_style", ["fixed", "harmonic"])
def test_twocomp_angle_style(self, twocomp_system, angle_style):
with temporary_directory() as tmp_dir:
with temporary_cd(tmp_dir):
(system, moveset) = twocomp_system
write_mcfs(system, angle_style=[angle_style, angle_style])
def test_angle_style_error(self, onecomp_system):
(system, moveset) = onecomp_system
with pytest.raises(ValueError, match="Invalid"):
write_mcfs(system, angle_style=["charmm"])
|
nilq/baby-python
|
python
|
class Solution:
def largestPerimeter(self, A: List[int]) -> int:
A.sort()
for i in range(len(A)-1, 1, -1):
if A[i-2] + A[i-1] > A[i]:
return A[i-2] + A[i-1] + A[i]
else:
return 0
|
nilq/baby-python
|
python
|
class Permissions(object):
# ccpo permissions
VIEW_AUDIT_LOG = "view_audit_log"
VIEW_CCPO_USER = "view_ccpo_user"
CREATE_CCPO_USER = "create_ccpo_user"
EDIT_CCPO_USER = "edit_ccpo_user"
DELETE_CCPO_USER = "delete_ccpo_user"
# base portfolio perms
VIEW_PORTFOLIO = "view_portfolio"
# application management
VIEW_APPLICATION = "view_application"
EDIT_APPLICATION = "edit_application"
CREATE_APPLICATION = "create_application"
DELETE_APPLICATION = "delete_application"
VIEW_APPLICATION_MEMBER = "view_application_member"
EDIT_APPLICATION_MEMBER = "edit_application_member"
DELETE_APPLICATION_MEMBER = "delete_application_member"
CREATE_APPLICATION_MEMBER = "create_application_member"
VIEW_ENVIRONMENT = "view_environment"
EDIT_ENVIRONMENT = "edit_environment"
CREATE_ENVIRONMENT = "create_environment"
DELETE_ENVIRONMENT = "delete_environment"
ASSIGN_ENVIRONMENT_MEMBER = "assign_environment_member"
VIEW_APPLICATION_ACTIVITY_LOG = "view_application_activity_log"
# funding
VIEW_PORTFOLIO_FUNDING = "view_portfolio_funding" # TO summary page
CREATE_TASK_ORDER = "create_task_order" # create a new TO
VIEW_TASK_ORDER_DETAILS = "view_task_order_details" # individual TO page
EDIT_TASK_ORDER_DETAILS = (
"edit_task_order_details" # edit TO that has not been finalized
)
# reporting
VIEW_PORTFOLIO_REPORTS = "view_portfolio_reports"
# portfolio admin
VIEW_PORTFOLIO_ADMIN = "view_portfolio_admin"
VIEW_PORTFOLIO_NAME = "view_portfolio_name"
EDIT_PORTFOLIO_NAME = "edit_portfolio_name"
VIEW_PORTFOLIO_USERS = "view_portfolio_users"
EDIT_PORTFOLIO_USERS = "edit_portfolio_users"
CREATE_PORTFOLIO_USERS = "create_portfolio_users"
VIEW_PORTFOLIO_ACTIVITY_LOG = "view_portfolio_activity_log"
VIEW_PORTFOLIO_POC = "view_portfolio_poc"
# portfolio POC
EDIT_PORTFOLIO_POC = "edit_portfolio_poc"
ARCHIVE_PORTFOLIO = "archive_portfolio"
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.