content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import json
import pandas as pd
import urllib3
import numpy as np
import re
http = urllib3.PoolManager()
votd = json.loads(http.request('GET',"https://public.tableau.com/api/gallery?page=0&count=10000&galleryType=viz-of-the-day&language=any").data)
df = pd.json_normalize(votd['items'], max_level=0)
# initialise dataframes
workbook_df =[]
attributions_df = []
for i in df.index:
print(i)
workbook_url = 'https://public.tableau.com/profile/api/single_workbook/' + votd['items'][i]['workbookRepoUrl']
workbook = json.loads(http.request('GET',workbook_url).data)
workbook = pd.json_normalize(workbook)
if 'error.message' in workbook.columns:
source_url = df['sourceUrl'][i]
retry = re.search('/views/(.+?)/', source_url)
if retry is not None:
retry = retry.group(0)[7:-1]
workbook_url = 'https://public.tableau.com/profile/api/single_workbook/' + retry
workbook = json.loads(http.request('GET',workbook_url).data)
workbook = pd.json_normalize(workbook)
workbook['workbookRepoUrl'] = votd['items'][i]['workbookRepoUrl']
if 'error.message' not in workbook.columns:
attributions = pd.json_normalize(workbook['attributions'][0])
attributions['workbookRepoUrl'] = votd['items'][i]['workbookRepoUrl']
workbook_df.append(workbook)
attributions_df.append(attributions)
# see pd.concat documentation for more info
workbook_df = pd.concat(workbook_df)
attributions_df = pd.concat(attributions_df)
# join VOTD with workbook and attributions dataframes
df = pd.merge(df,workbook_df, on='workbookRepoUrl',how='left')
df = pd.merge(df,attributions_df, on='workbookRepoUrl',how='left')
# remove columns that have been json_normalized to additional columns
del df['workbook']
del df['attributions']
# if there are error messages remove them
if 'error.message' in df.columns:
del df['error.message']
del df['error.id']
# convert lists to comma seperated strings
df['types'] = [','.join(map(str, l)) for l in df['types']]
df['topics'] = [','.join(map(str, l)) for l in df['topics']]
df['badges'] = [','.join(map(str, l)) for l in df['badges']]
# rename attribution columns
df.rename(columns={'authorProfileName_y':'attributed_authorProfileName'}, inplace=True)
df.rename(columns={'workbookName':'attributed_workbookName'}, inplace=True)
df.rename(columns={'authorDisplayName':'attributed_authorDisplayName'}, inplace=True)
df.rename(columns={'workbookViewName':'attributed_workbookViewName'}, inplace=True)
# rename conflicts between gallery and workbook data
df.rename(columns={'authorProfileName_x':'authorProfileName'}, inplace=True)
df.rename(columns={'title_x':'gallery_title'}, inplace=True)
df.rename(columns={'description_x':'gallery_description'}, inplace=True)
df.rename(columns={'title_y':'viz_title'}, inplace=True)
df.rename(columns={'description_y':'viz_description'}, inplace=True)
df = df.drop_duplicates()
# Save locally
#df.to_csv('data/tableau_public_votd.csv', index=False)
print(df)
|
nilq/baby-python
|
python
|
from .cachable_functions import Cachable
from .params import CachableParam
|
nilq/baby-python
|
python
|
from flask_server_files.models.defect import DefectModel
d1 = DefectModel.new_defect()
|
nilq/baby-python
|
python
|
import json
from twitter_helper import TwitterHelper
with open('config.json') as f:
data = json.load(f)
username = "@CoolDude32149"
th = TwitterHelper(data, username)
message = "Thank you for your complaint"
th.stream_tweet()
|
nilq/baby-python
|
python
|
import pytest
data = [
(pytest.lazy_fixture("a_base_model_object"), {"id": "1", "name": "default_name"}),
({1, 2, 3}, [1, 2, 3]),
]
@pytest.mark.parametrize("obj, expected", data)
def test_base_model_enhanced_encoder(obj, expected):
from fractal.contrib.fastapi.utils.json_encoder import BaseModelEnhancedEncoder
assert BaseModelEnhancedEncoder().default(obj) == expected
|
nilq/baby-python
|
python
|
# launcher.py
from math import radians, degrees, cos, sin
from graphics import *
from shotTracker import ShotTracker
class Launcher:
def __init__(self, win):
# Draw the base shot of the launcher
base = Circle(Point(0, 0), 3)
base.setFill("red")
base.setOutline("red")
base.draw(win)
# Save the window and create initial angle and velocity
self.win = win
self.angle = radians(45.0)
self.vel = 40.0
# Create initial "dummy" arrow (needed by redraw)
self.arrow = Line(Point(0, 0), Point(0, 0)).draw(win)
# Replace it with the correct arrow
self.redraw()
def adjAngle(self, amt):
"""Change launch angle by amt degrees"""
self.angle = self.angle + radians(amt)
self.redraw()
def adjVel(self, amt):
"""Change launch velocity by amt"""
self.vel = self.vel + amt
self.redraw()
def redraw(self):
"""Redraw the arrow to show current angle and velocity"""
self.arrow.undraw()
pt2 = Point(self.vel * cos(self.angle), \
self.vel*sin(self.angle))
self.arrow = Line(Point(0, 0), pt2).draw(self.win)
self.arrow.setWidth(3)
def fire(self):
return ShotTracker(self.win, degrees(self.angle), self.vel, 0.0)
|
nilq/baby-python
|
python
|
from uqcsbot import bot, Command
from uqcsbot.utils.command_utils import loading_status
from typing import Dict, List
from collections import defaultdict
from random import shuffle, choice
@bot.on_command("emojify")
@loading_status
def handle_emojify(command: Command):
'''
`!emojify text` - converts text to emoji.
'''
master: Dict[str, List[str]] = defaultdict(lambda: [":grey_question:"])
# letters
master['A'] = [":adobe:", ":airbnb:", ":amazon:", ":anarchism:",
":arch:", ":atlassian:", ":office_access:",
choice([":card-ace-clubs:", ":card-ace-diamonds:",
":card-ace-hearts:", ":card-ace-spades:"])]
master['B'] = [":bhinking:", ":bitcoin:", ":blutes:"]
master['C'] = [":c:", ":clang:", ":cplusplus:", ":copyright:", ":clipchamp:"]
master['D'] = [":d:", ":disney:"]
master['E'] = [":ecorp:", ":emacs:", ":erlang:", ":ie10:", ":thonk_slow:", ":edge:",
":expedia_group:"]
master['F'] = [":f:", ":facebook:"]
master['G'] = [":g+:", ":google:", ":nintendo_gamecube:", ":gatsbyjs:"]
master['H'] = [":hackerrank:", ":homejoy:"]
master['I'] = [":information_source:"]
master['J'] = [":hook:", choice([":card-jack-clubs:", ":card-jack-diamonds:",
":card-jack-hearts:", ":card-jack-spades:"])]
master['K'] = [":kickstarter:", ":kotlin:",
choice([":card-king-clubs:", ":card-king-diamonds:",
":card-king-hearts:", ":card-king-spades:"])]
master['L'] = [":l:", ":lime:", ":l_plate:"]
master['M'] = [":gmail:", ":maccas:", ":mcgrathnicol:", ":melange_mining:", ":mtg:", ":mxnet:"]
master['N'] = [":nano:", ":neovim:", ":netscape_navigator:",
":nginx:", ":nintendo_64:", ":office_onenote:"]
master['O'] = [":office_outlook:", ":oracle:", ":o_:", ":tetris_o:", ":ubuntu:"]
master['P'] = [":auspost:", ":office_powerpoint:", ":office_publisher:",
":pinterest:", ":paypal:", ":producthunt:"]
master['Q'] = [":quora:", ":quantium:", choice([":card-queen-clubs:", ":card-queen-diamonds:",
":card-queen-hearts:", ":card-queen-spades:"])]
master['R'] = [":r-project:", ":rust:", ":redroom:", ":registered:"]
master['S'] = [":s:", ":skedulo:", ":stanford:", ":stripe_s:", ":sublime:", ":tetris_s:"]
master['T'] = [":tanda:", choice([":telstra:", ":telstra-pink:"]),
":tesla:", ":tetris_t:", ":torchwood:", ":tumblr:"]
master['U'] = [":uber:", ":uqu:", ":the_horns:"]
master['V'] = [":vim:", ":vue:", ":vuetify:", ":v:"]
master['W'] = [":office_word:", ":washio:", ":wesfarmers:", ":westpac:",
":weyland_consortium:", ":wikipedia_w:", ":woolworths:"]
master['X'] = [":atlassian_old:", ":aginicx:", ":sonarr:", ":x-files:", ":xbox:",
":x:", ":flag-scotland:", ":office_excel:"]
master['Y'] = [":hackernews:"]
master['Z'] = [":tetris_z:"]
# numbers
master['0'] = [":chrome:", ":suncorp:", ":disney_zero:", ":firefox:",
":mars:", choice([":dvd:", ":cd:"])]
master['1'] = [":techone:", ":testtube:", ":thonk_ping:", ":first_place_medal:"]
master['2'] = [":second_place_medal:", choice([":card-2-clubs:", ":card-2-diamonds:",
":card-2-hearts:", ":card-2-spades:"])]
master['3'] = [":css:", ":third_place_medal:", choice([":card-3-clubs:", ":card-3-diamonds:",
":card-3-hearts:", ":card-3-spades:"])]
master['4'] = [choice([":card-4-clubs:", ":card-4-diamonds:",
":card-4-hearts:"]), ":card-4-spades:"]
master['5'] = [":html:", choice([":card-5-clubs:", ":card-5-diamonds:",
":card-5-hearts:", ":card-5-spades:"])]
master['6'] = [choice([":card-6-clubs:", ":card-6-diamonds:",
":card-6-hearts:", ":card-6-spades:"])]
master['7'] = [choice([":card-7-clubs:", ":card-7-diamonds:",
":card-7-hearts:", ":card-7-spades:"])]
master['8'] = [":8ball:", choice([":card-8-clubs:", ":card-8-diamonds:",
":card-8-hearts:", ":card-8-spades:"])]
master['9'] = [choice([":card-9-clubs:", ":card-9-diamonds:",
":card-9-hearts:", ":card-9-spades:"])]
# whitespace
master[' '] = [":whitespace:"]
master['\n'] = ["\n"]
# other ascii characters (sorted by ascii value)
master['!'] = [":exclamation:"]
master['"'] = [choice([":ldquo:", ":rdquo:"]), ":pig_nose:"]
master['#'] = [":slack_old:", ":csharp:"]
master['$'] = [":thonk_money:", ":moneybag:"]
# '&' converts to '&'
master['&'] = [":ampersand:", ":dnd:"]
master['*'] = [":day:", ":nab:", ":youtried:", ":msn_star:", ":rune_prayer:", ":wolfram:"]
master['+'] = [":tf2_medic:", ":flag-ch:", ":flag-england:"]
master['-'] = [":no_entry:"]
master['.'] = [":black_small_square:"]
master['/'] = [":slash:"]
# '>' converts to '>'
master['>'] = [":accenture:", ":implying:", ":plex:", ":powershell:"]
master['?'] = [":question:"]
master['@'] = [":whip:"]
master['^'] = [":this:", ":typographical_carrot:", ":arrow_up:"]
master['~'] = [":wavy_dash:"]
# slack/uqcsbot convert the following to other symbols
# greek letters
# 'Α' converts to 'A'
master['Α'] = [":alpha:"]
# 'Β' converts to 'B'
master['Β'] = [":beta:"]
# 'Λ' converts to 'L'
master['Λ'] = [":halflife:", ":haskell:", ":lambda:", ":racket:"]
# 'Π' converts to 'P'
master['Π'] = [":pi:"]
# 'Σ' converts to 'S'
master['Σ'] = [":polymathian:"]
# other symbols (sorted by unicode value)
# '…' converts to '...'
master['…'] = [":lastpass:"]
# '€' converts to 'EUR'
master['€'] = [":martian_euro:"]
# '√' converts to '[?]'
master['√'] = [":sqrt:"]
# '∞' converts to '[?]'
master['∞'] = [":arduino:", ":visualstudio:"]
# '∴' converts to '[?]'
master['∴'] = [":julia:"]
text = ""
if command.has_arg():
text = command.arg.upper()
# revert HTML conversions
text = text.replace(">", ">")
text = text.replace("<", "<")
text = text.replace("&", "&")
lexicon = {}
for character in set(text+'…'):
full, part = divmod((text+'…').count(character), len(master[character]))
shuffle(master[character])
lexicon[character] = full * master[character] + master[character][:part]
shuffle(lexicon[character])
ellipsis = lexicon['…'].pop()
response = ""
for character in text:
emoji = lexicon[character].pop()
if len(response + emoji + ellipsis) > 4000:
response += ellipsis
break
response += emoji
bot.post_message(command.channel_id, response)
|
nilq/baby-python
|
python
|
"""
@brief
@file Various function to help investigate an error.
"""
import traceback
from io import StringIO
class ErrorOnPurpose(Exception):
"""
raise to get the call stack
"""
pass
def get_call_stack():
"""
Returns a string showing the call stack
when this function is called.
.. exref::
:title: Display the call stack
.. runpython::
:showcode:
from pyquickhelper.pycode import get_call_stack
print(get_call_stack())
"""
s = StringIO()
traceback.print_stack(file=s)
return s.getvalue()
|
nilq/baby-python
|
python
|
import argparse
import subprocess
from typing import Tuple
from data_copy import copy_pgdata_cow, destroy_exploratory_data_cow
from pgnp_docker import start_exploration_docker, shutdown_exploratory_docker, setup_docker_env
from sql import checkpoint, execute_sql, \
wait_for_pg_ready
from util import ZFS_DOCKER_VOLUME_POOL, REPLICA_VOLUME_POOL, REPLICA_PORT, EXPLORATION_PORT, \
EXPLORATION_CONTAINER_NAME, \
DOCKER_VOLUME_DIR, execute_sys_command
def main():
"""
The exploratory daemon is responsible for creating a copy of replica instances, to be used for model training.
To set up a machine to ues the exploratory daemon you must perform the following steps:
1. Install ZFS on one of the disks
2. Set up a ZFS pool on the disk
3. Start a postgres instance that stores pgdata/ in the ZFS pool
"""
aparser = argparse.ArgumentParser(description="Exploratory Daemon")
# postgres args
aparser.add_argument("--postgres-replica-port", help="Port that replica instance is running on",
default=REPLICA_PORT)
aparser.add_argument("--postgres-exploratory-port", help="Port that exploratory instance will run on",
default=EXPLORATION_PORT)
# ZFS args
aparser.add_argument("--zfs-volume-pool", help="ZFS pool name for docker volume directory",
default=ZFS_DOCKER_VOLUME_POOL)
aparser.add_argument("--zfs-replica-pool-name", help="Relative name of ZFS pool used for the replica volume",
default=REPLICA_VOLUME_POOL)
# Docker args
aparser.add_argument("--docker-volume-directory", help="directory path of the docker volume directory",
default=DOCKER_VOLUME_DIR)
args = vars(aparser.parse_args())
run_daemon(args["postgres_replica_port"], args["postgres_exploratory_port"], args["zfs_volume_pool"],
args["zfs_replica_pool_name"], args["docker_volume_directory"])
def run_daemon(replica_port: int, exploratory_port: int, zfs_volume_pool: str, zfs_replica_pool: str,
docker_volume_dir: str):
"""
Run exploratory daemon
Parameters
----------
replica_port
port that replica instance is reachable from
exploratory_port
port that exploratory instance will be reachable from
zfs_volume_pool
name of zfs pool used to store docker volumes
zfs_replica_pool
relative name of zfs pool used to store postgres replica data
docker_volume_dir
directory path that docker uses for volumes
"""
setup_docker_env(docker_volume_dir)
destroy_exploratory_data_cow(zfs_volume_pool, zfs_replica_pool)
# Make sure that container doesn't reuse machine's IP address
execute_sys_command("sudo docker network create --driver=bridge --subnet 172.19.253.0/30 tombstone")
exploratory_docker_proc, valid = spin_up_exploratory_instance(replica_port, exploratory_port, zfs_volume_pool,
zfs_replica_pool, docker_volume_dir)
if valid:
print(execute_sql("CREATE TABLE foo(a int);", EXPLORATION_PORT))
print(execute_sql("INSERT INTO foo VALUES (42), (666);", EXPLORATION_PORT))
print(execute_sql("SELECT * FROM foo;", EXPLORATION_PORT))
else:
print("Failed to start exploratory instance")
spin_down_exploratory_instance(exploratory_docker_proc, zfs_volume_pool, zfs_replica_pool, docker_volume_dir)
def spin_up_exploratory_instance(replica_port: int, exploratory_port: int, zfs_volume_pool: str, zfs_replica_pool: str,
docker_volume_dir: str) -> Tuple[subprocess.Popen, bool]:
"""
Start exploratory instance
Parameters
----------
replica_port
port that replica instance is reachable from
exploratory_port
port that exploratory instance will be reachable from
zfs_volume_pool
name of zfs pool used to store docker volumes
zfs_replica_pool
relative name of zfs pool used to store postgres replica data
docker_volume_dir
directory path that docker uses for volumes
Returns
-------
exploratory_instance
docker process that is running exploratory instance
valid
True if the container started successfully, False otherwise
"""
print("Taking checkpoint in replica")
# LOOK HERE: Consider removing this. Checkpointing has limited benefits for data staleness and can have a huge performance cost.
checkpoint(replica_port)
print("Checkpoint complete")
print("Copying replica data")
copy_pgdata_cow(zfs_volume_pool, zfs_replica_pool)
print("Replica data copied")
print("Starting exploratory instance")
exploratory_docker_proc = start_exploration_docker(docker_volume_dir)
valid = wait_for_pg_ready(EXPLORATION_CONTAINER_NAME, exploratory_port, exploratory_docker_proc)
print("Exploratory instance started")
return exploratory_docker_proc, valid
def spin_down_exploratory_instance(exploratory_docker_proc: subprocess.Popen, zfs_volume_pool: str,
zfs_replica_pool: str, docker_volume_dir: str):
"""
Stop and destroy exploratory instance
Parameters
----------
exploratory_docker_proc
docker process that is running exploratory instance
zfs_volume_pool
name of zfs pool used to store docker volumes
zfs_replica_pool
relative name of zfs pool used to store postgres replica data
docker_volume_dir
directory path that docker uses for volumes
"""
print("Shutting down exploratory instance")
shutdown_exploratory_docker(exploratory_docker_proc, docker_volume_dir)
destroy_exploratory_data_cow(zfs_volume_pool, zfs_replica_pool)
print("Exploratory instance shut down")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# Licensed under CC BY-NC-SA 4.0 (Attribution-NonCommercial-ShareAlike 4.0 International) (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
#
# The code is released for academic research use only. For commercial use, please contact Huawei Technologies Co., Ltd.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
from . import activation as activation
from .distance import DistanceMap
from .local_correlation.correlation import FunctionCorrelation, FunctionCorrelationTranspose
from .plot_corr import plot_local_gocor_weights
from . import fourdim as fourdim
class LocalCorrInitializerZeros(nn.Module):
"""Local GOCor initializer module.
Initializes the Local GOCor filter with a zero tensor.
args:
filter_size: spatial kernel size of filter
"""
def __init__(self, filter_size=1):
super().__init__()
assert filter_size == 1
self.filter_size = filter_size
def forward(self, feat):
"""Initialize filter.
args:
feat: input features (sequences, feat_dim, H, W)
output:
weights: initial filters (sequences, feat_dim, H, W)
"""
weights = torch.zeros_like(feat)
return weights
class LocalCorrSimpleInitializer(nn.Module):
"""Local GOCor initializer module.
Initializes the Local GOCor filter through a simple norm operation
args:
filter_size: spatial kernel size of filter
"""
def __init__(self, filter_size=1):
super().__init__()
assert filter_size == 1
self.filter_size = filter_size
self.scaling = nn.Parameter(torch.ones(1))
def forward(self, feat):
"""Initialize filter.
args:
feat: input features (sequences, feat_dim, H, W)
output:
weights: initial filters (sequences, feat_dim, H, W)
"""
weights = feat / ((feat*feat).mean(dim=1, keepdim=True) + 1e-6)
weights = self.scaling * weights
return weights
class LocalCorrContextAwareInitializer(nn.Module):
"""Local GOCor initializer module.
Initializes the Local GOCor filter ContextAwareInitializer.
It assumes that the filter at a particular pixel location, correlated with the features at the same location
should be equal to 1 (here the value 1 islearnt as target_fg_value), while correlated with features
at other locations should be zero (here the value 0 is learnt as target_bg). The other features locations are
approximated by the mean of the features, called background_vector.
Filter at particular location should be linear combination of feature at this location (foreground) and
background features (average of all features)
It corresponds to non ideal cases, where scalar product between filter and background feature is not
necessarily equal to 0.
args:
filter_size: spatial kernel size of filter
init_fg: initial value for scalar product between filter and features at the same location (=1)
init_bg: initial value for scalar product between filter and background features (=0)
"""
def __init__(self, filter_size=1, init_fg=1.0, init_bg=0.0):
super().__init__()
self.filter_size = filter_size
self.target_fg = nn.Parameter(init_fg * torch.ones(1, float))
self.target_bg = nn.Parameter(init_bg * torch.ones(1, float))
def forward(self, feat):
"""Initialize filter.
args:
feat: input features (sequences, feat_dim, H, W)
output:
weights: initial filters (sequences, feat_dim, H, W)
"""
d = feat.size(1)
bg_weights = feat.mean(dim=2, keepdim=True) # averages over all features
ff = (feat * feat).sum(dim=1, keepdim=True)
bb = (bg_weights * bg_weights).sum(dim=1, keepdim=True)
fb = (feat * bg_weights).sum(dim=1, keepdim=True)
den = (ff*bb - fb*fb).clamp(1e-6)
fg_scale = self.target_fg * bb - self.target_bg * fb
bg_scale = self.target_fg * fb - self.target_bg * ff
weights = d * (fg_scale * feat - bg_scale * bg_weights) / (den + 1e-6)
return weights
class LocalCorrFlexibleContextAwareInitializer(nn.Module):
"""Local GOCor initializer module.
Initializes the Local GOCor with a Flexible-ContextAwareInitializer.
It assumes that the filter at a particular pixel location, correlated with the features at the same location
should be equal to 1 (here the value 1 is a vector, learnt as target_fg_value), while correlated with features
at other locations should be zero (here the value 0 is a vector, learnt as target_bg). The other features locations are
approximated by the mean of the features, called background_vector.
Filter at particular location should be linear combination of feature at this location (foreground) and
background features (average of all features)
It corresponds to non ideal cases, where scalar product between filter and background feature is not
necessarily equal to 0.
args:
filter_size: spatial kernel size of filter
number_feat: dimensionality of input features
init_fg: initial value for scalar product between filter and features at the same location (=1)
init_bg: initial value for scalar product between filter and background features (=0)
"""
def __init__(self, filter_size=1, number_feat=512, init_fg=1.0, init_bg=0.0):
super().__init__()
self.filter_size = filter_size
self.target_fg = nn.Parameter(init_fg * torch.ones(number_feat))
self.target_bg = nn.Parameter(init_bg * torch.ones(number_feat))
def forward(self, feat):
"""Initialize filter.
args:
feat: input features (sequences, feat_dim, H, W)
output:
weights: initial filters (sequences, feat_dim, H, W)
"""
d = feat.size(1)
bg_weights = feat.mean(dim=2, keepdim=True) # averages over all features
ff = (feat * feat).sum(dim=1, keepdim=True)
bb = (bg_weights * bg_weights).sum(dim=1, keepdim=True)
fb = (feat * bg_weights).sum(dim=1, keepdim=True)
den = (ff*bb - fb*fb).clamp(1e-6)
fg_scale = self.target_fg.view(d, 1, 1) * bb - self.target_bg.view(d, 1, 1) * fb
bg_scale = self.target_fg.view(d, 1, 1) * fb - self.target_bg.view(d, 1, 1) * ff
weights = d * (fg_scale * feat - bg_scale * bg_weights) / (den + 1e-6)
return weights
class LocalGOCorrOpt(nn.Module):
"""Local GOCor optimizer module.
Optimizes the LocalGOCor filter map on the reference image.
args:
num_iter: number of iteration recursions to run in the optimizer
init_step_length: initial step length factor
init_filter_reg: initialization of the filter regularization parameter
target_sigma: standard deviation for the correlation volume label in the reference image
test_loss: Loss to use for the test data
min_filter_reg: an epsilon thing to avoid devide by zero
"""
def __init__(self, num_iter=3, init_step_length=1.0, init_filter_reg=1e-2,
min_filter_reg=1e-5, num_dist_bins=10, bin_displacement=0.5, init_gauss_sigma=1.0,
v_minus_act='sigmoid', v_minus_init_factor=4.0, search_size=9,
apply_query_loss=False, reg_kernel_size=3, reg_inter_dim=1, reg_output_dim=1):
super().__init__()
assert search_size == 9 # fixed to 9 currently, we are working on making a general version
self.num_iter = num_iter
self.min_filter_reg = min_filter_reg
self.search_size = search_size
self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1))
self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))
self.distance_map = DistanceMap(num_dist_bins, bin_displacement)
# for the query loss L_q
# not used in final version, because too computationally expensive
self.apply_query_loss = apply_query_loss
if self.apply_query_loss:
# the 4d conv applied on the correlation filter with query
self.reg_layer = fourdim.SeparableConv4d(kernel_size=reg_kernel_size, inter_dim=reg_inter_dim,
output_dim=reg_output_dim,
bias=False, permute_back_output=False)
self.reg_layer.weight1.data.normal_(0, 1e-3)
self.reg_layer.weight2.data.normal_(0, 1e-3)
# for the reference loss L_r
# Distance coordinates
d = torch.arange(num_dist_bins, dtype=torch.float32).view(1,-1,1,1) * bin_displacement
# initialize the label map predictor y'_theta
if init_gauss_sigma == 0:
init_gauss = torch.zeros_like(d)
init_gauss[0, 0, 0, 0] = 1
else:
init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2)
self.init_gauss = init_gauss
self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)
self.label_map_predictor.weight.data = init_gauss - init_gauss.min()
# initialize the weight v_plus predictor, here called spatial_weight_predictor
self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)
self.spatial_weight_predictor.weight.data.fill_(1.0)
# initialize the weights m predictor m_theta, here called target_mask_predictor
# the weights m at then used to compute the weights v_minus, as v_minus = m * v_plus
self.num_bins = num_dist_bins
init_v_minus = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)]
init_w = v_minus_init_factor * torch.tanh(2.0 - d)
self.v_minus_act = v_minus_act
if v_minus_act == 'sigmoid':
init_v_minus.append(nn.Sigmoid())
elif v_minus_act == 'linear':
init_w = torch.sigmoid(init_w)
else:
raise ValueError('Unknown activation')
self.target_mask_predictor = nn.Sequential(*init_v_minus)
self.target_mask_predictor[0].weight.data = init_w
self.init_target_mask_predictor = init_w.clone() # for plotting
# initialize activation function sigma (to apply to the correlation score between the filter map and the ref)
self.score_activation = activation.LeakyReluPar()
self.score_activation_deriv = activation.LeakyReluParDeriv()
def _plot_weights(self, save_dir):
plot_local_gocor_weights(save_dir, self.init_gauss, self.label_map_predictor, self.init_target_mask_predictor,
self.target_mask_predictor, self.v_minus_act, self.num_bins,
self.spatial_weight_predictor)
def forward(self, filter_map, reference_feat, query_feat=None, num_iter=None, compute_losses=False):
"""
Apply optimization loop on the initialized filter map
args:
filter_map: initial filters, shape is (b, feat_dim, H, W)
reference_feat: features from the reference image, shape is (b, feat_dim, H, W)
query_feat: features from the query image, shape is (b, feat_dim, H, W)
num_iter: number of iteration, to overwrite num_iter given in init parameters
compute_losses: compute intermediate losses
output:
filters and losses
"""
if num_iter is None:
num_iter = self.num_iter
num_sequences = reference_feat.shape[0]
num_filters = reference_feat.shape[-2] * reference_feat.shape[-1]
feat_sz = (reference_feat.shape[-2], reference_feat.shape[-1])
feat_dim = reference_feat.shape[-3]
# Compute distance map
dist_map_sz = (self.search_size, self.search_size)
center = torch.Tensor([dist_map_sz[0] // 2, dist_map_sz[1] // 2]).to(reference_feat.device)
dist_map = self.distance_map(center, dist_map_sz)
# Compute target map, weights v_plus and weight_m (used in v_minus), used for reference loss
target_map = self.label_map_predictor(dist_map).reshape(1, -1, 1, 1)
v_plus = self.spatial_weight_predictor(dist_map).reshape(1, -1, 1, 1)
weight_m = self.target_mask_predictor(dist_map).reshape(1, -1, 1, 1)
# compute regularizer term
step_length = torch.exp(self.log_step_length)
reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2)/(feat_dim**2)
losses = {'train': [], 'train_reference_loss': [], 'train_reg': [], 'train_query_loss': []}
for i in range(num_iter):
# I. Computing gradient of reference loss with respect to the filter map
# Computing the cost volume between the filter map and the reference features
scores_filter_w_ref = FunctionCorrelation(filter_map, reference_feat)
# Computing Reference Frame Objective L_R and corresponding gradient with respect to the filter map
# Applying sigma function on the score:
act_scores_filter_w_ref = v_plus * self.score_activation(scores_filter_w_ref, weight_m)
grad_act_scores_by_filter = v_plus * self.score_activation_deriv(scores_filter_w_ref, weight_m)
loss_ref_residuals = act_scores_filter_w_ref - v_plus * target_map
mapped_residuals = grad_act_scores_by_filter * loss_ref_residuals
# Computing the gradient of the reference loss with respect to the filer map
filter_grad_loss_ref = FunctionCorrelationTranspose(mapped_residuals, reference_feat)
# Computing the gradient of the regularization term with respect to the filter map
filter_grad_reg = reg_weight * filter_map
filter_grad = filter_grad_reg + filter_grad_loss_ref
if compute_losses:
# compute corresponding loss
loss_ref = 0.5 * (loss_ref_residuals**2).sum()/num_sequences
loss_reg = 0.5 / reg_weight.item() * (filter_grad_reg ** 2).sum() / num_sequences
# II. Computing Query Frame Objective L_q and corresponding gradient with respect to the filter map
loss_query = 0
if self.apply_query_loss:
# Computing the cost volume between the filter map and the query features
# dimension (b, search_size*search_size, H, W)
scores_filter_w_query = FunctionCorrelation(filter_map, query_feat)
# Applying the 4D kernel on the cost volume,
loss_query_residuals = self.reg_layer(scores_filter_w_query.reshape(-1, self.search_size,
self.search_size, *feat_sz))
# output shape is (b, H, W, output_dim, search_size, search_size)
# Computing the gradient of the query loss with respect to the filer map
# apply transpose convolution, returns to b, search_size, search_size, H, W
reg_tp_res = self.reg_layer(loss_query_residuals, transpose=True).reshape(scores_filter_w_query.shape)
filter_grad_loss_query = FunctionCorrelationTranspose(reg_tp_res, query_feat)
filter_grad += filter_grad_loss_query
if compute_losses:
# calculate the corresponding loss:
loss_query = 0.5 * (loss_query_residuals ** 2).sum() / num_sequences
# III. Calculating alpha denominator
# 1. Reference loss (L_r)
# Computing the cost volume between the gradient of the loss with respect to the filter map with
# the reference features in scores_filter_grad_w_ref
scores_filter_grad_w_ref = FunctionCorrelation(filter_grad, reference_feat)
scores_filter_grad_w_ref = grad_act_scores_by_filter * scores_filter_grad_w_ref
if self.apply_query_loss:
alpha_den = (scores_filter_grad_w_ref * scores_filter_grad_w_ref).view(num_sequences, -1).sum(dim=1)
# shape is b
else:
alpha_den = (scores_filter_grad_w_ref * scores_filter_grad_w_ref).sum(dim=1, keepdim=True)
# shape is b, spa**2, H, W
# 2. Query Loss (L_q)
if self.apply_query_loss:
# Hessian parts for regularization
scores_filter_grad_w_query = FunctionCorrelation(filter_grad, query_feat)
alpha_den_loss_query_residual = self.reg_layer(scores_filter_grad_w_query.reshape(-1,
self.search_size,
self.search_size,
*feat_sz))
alpha_den += (alpha_den_loss_query_residual * alpha_den_loss_query_residual)\
.view(num_sequences, -1).sum(dim=1)
# IV. Compute step length alpha
if self.apply_query_loss:
alpha_num = (filter_grad * filter_grad).view(num_sequences, -1).sum(dim=1)
else:
alpha_num = (filter_grad * filter_grad).sum(dim=1, keepdim=True)
alpha_den = (alpha_den + reg_weight * alpha_num).clamp(1e-8)
alpha = alpha_num / alpha_den
# V. Update filter map
if self.apply_query_loss:
filter_map = filter_map - (step_length * alpha.view(num_sequences, 1, 1, 1)) * filter_grad
else:
filter_map = filter_map - (step_length * alpha) * filter_grad
if compute_losses:
losses['train_reference_loss'].append(loss_ref)
losses['train_reg'].append(loss_reg)
losses['train_query_loss'].append(loss_query)
losses['train'].append(losses['train_reference_loss'][-1] + losses['train_reg'][-1] +
losses['train_query_loss'][-1])
if compute_losses:
print('LocalGOCor: train reference loss is {}'.format(losses['train_reference_loss']))
print('LocalGOCor: train query loss is {}'.format(losses['train_query_loss']))
print('LocalGOCor: train reg is {}\n'.format(losses['train_reg']))
return filter_map, losses
class LocalGOCor(nn.Module):
"""The main LocalGOCor module for computing the local correlation volume.
For now, only supports local search radius of 4.
args:
filter_initializer: initializer network
filter_optimizer: optimizer network
"""
def __init__(self, filter_initializer, filter_optimizer):
super(LocalGOCor, self).__init__()
self.filter_initializer = filter_initializer
self.filter_optimizer = filter_optimizer
def forward(self, reference_feat, query_feat, **kwargs):
"""
Computes the local GOCor correspondence volume between inputted reference and query feature maps.
args:
reference_feat: reference feature with shape (b, feat_dim, H, W)
query_feat: query feature with shape (b, feat_dim, H2, W2)
output:
scores: local correspondence volume between the optimized filter map (instead of the reference features in the
feature correlation layer) and the query feature map.
"""
# initializes the filter map
filter = self.filter_initializer(reference_feat)
# optimizes the filter map
filter, losses = self.filter_optimizer(filter, reference_feat, query_feat=query_feat, **kwargs)
# compute the local cost volume between optimized filter map and query features
scores = FunctionCorrelation(filter, query_feat)
return scores
######## Example ########
#
# initializer = LocalCorrSimpleInitializer()
#
# optimizer = LocalGOCorrOpt(num_iter=optim_iter, init_step_length=optim_init_step, init_filter_reg=optim_init_reg,
# num_dist_bins=num_dist_bins, bin_displacement=bin_displacement,
# v_minus_act=v_minus_act, v_minus_init_factor=v_minus_init_factor, search_size=search_size,
# apply_query_loss=False, reg_kernel_size=1, reg_inter_dim=1, reg_output_dim=1)
# corr_module = LocalGOCor(filter_initializer=initializer, filter_optimizer=optimizer)
|
nilq/baby-python
|
python
|
from .wd_containers import _ParameterContainer
import os
import sys
# below snippet is taken from subprocess32 manual
if os.name == 'posix' and sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
class _WDIO:
def __init__(self, container, wd_path, wd_binary_name):
self.parameters = container
self._input = ""
self._cwd = wd_path
self._type = ""
self._wd_binary_name = wd_binary_name
# TODO implement error checking for common input errors
self.warning = ""
self.error = ""
self.has_warning = False
self.has_error = False
self.process = None
def set_working_directory(self, path):
self._cwd = path
def _get_input_path(self):
return os.path.join(self._cwd, self._type + "in.active")
def _get_output_path(self):
return os.path.join(self._cwd, self._type + "out.active")
def save(self):
with open(self._get_input_path(), "w") as output:
output.write(self._input)
return self
def run(self):
cmd = os.path.join(self._cwd, self._wd_binary_name)
if os.path.isfile(cmd):
self.process = subprocess.Popen(cmd, cwd=self._cwd)
self.process.wait()
self.process = None
return self
else:
raise IOError("Cannot find WD binary:\n" + cmd)
@staticmethod
def _format_eccentricity(ipt):
ipt = float(ipt.get())
if ipt >= 1.0 or ipt < 0.0:
raise ValueError("Invalid eccentricity value: " + repr(ipt))
else:
output = "{:6.5f}".format(ipt)
return output[1:]
def _format_spots(self):
def _format_spot(spt):
return spt["xlat"].format(9, 5, "F") + \
spt["xlong"].format(9, 5, "F") + \
spt["radsp"].format(9, 5, "F") + \
spt["temsp"].format(9, 5, "F") + \
spt["tstart"].format(14, 5, "F") + \
spt["tmax1"].format(14, 5, "F") + \
spt["tmax2"].format(14, 5, "F") + \
spt["tfinal"].format(14, 5, "F") + "\n"
star1_spot_lines = ""
for spot in self.parameters.star1_spots:
star1_spot_lines = star1_spot_lines + _format_spot(spot)
star2_spot_lines = ""
for spot in self.parameters.star2_spots:
star2_spot_lines = star2_spot_lines + _format_spot(spot)
return star1_spot_lines, star2_spot_lines
@staticmethod
def _slice_with_splitmap(line, splitmap, string=False):
if splitmap[0] != 0:
splitmap.insert(0, 0)
splitted_line = []
i = 0
while i < len(splitmap) - 1:
value = line[splitmap[i]:splitmap[i + 1]]
value = value.rstrip(" ")
value = value.strip(" ")
splitted_line.append(_WDIO._tidy_value(value, string=string))
i = i + 1
return splitted_line
@staticmethod
def _tidy_value(value, string=False):
if string:
return value
else:
if "*" in value:
return float("nan")
else:
try:
return float(value.replace("D", "e"))
except ValueError:
return value
@staticmethod
def _tidy_table(table):
if len(table) == 0:
return []
columns = [[] for _ in table[0]]
for line in table:
for index, data in enumerate(line):
columns[index].append(data)
return columns
@staticmethod
def _read_table(source, header, offset=1, occurence=1, splitmap=None, tidy=True, string=False):
table = []
flag = False
start = 0
occured = 0
with open(source, "r") as src:
for line in src:
if header in line:
occured = occured + 1
if occured == occurence:
flag = True
if flag is True:
if start < offset:
start = start + 1
else:
if not line.strip():
break
else:
if splitmap is not None:
table.append(_WDIO._slice_with_splitmap(line, splitmap, string=string))
else:
table.append([_WDIO._tidy_value(x, string=string) for x in line.split()])
if tidy:
return _WDIO._tidy_table(table)
else:
return table
@staticmethod
def _read_all_tables(source, header, offset=1, splitmap=None, tidy=True, string=False):
with open(source, "r") as src:
splitted_source = src.read().split(header)
if len(splitted_source) == 1:
return []
splitted_source.pop(0) # we do not care about prior data
tables = []
for segment in splitted_source:
splitted_segment = segment.split("\n")
current_offset = 0
while offset > current_offset:
splitted_segment.pop(0)
current_offset = current_offset + 1
table = []
for line in splitted_segment:
if not line.split():
break
else:
if splitmap is not None:
table.append(_WDIO._slice_with_splitmap(line, splitmap, string=string))
else:
table.append([_WDIO._tidy_value(x, string=string) for x in line.split()])
if tidy:
tables.append(_WDIO._tidy_table(table))
else:
tables.append(table)
return tables
def check_container_type(self):
expectation = None
if self._type == "lc":
expectation = "LC"
elif self._type == "dc":
expectation = "DC"
if self.parameters.name != expectation:
raise TypeError("Expected container: " + expectation + "\n"
"Found container: " + self.parameters.name)
def __str__(self):
return self._input
class LCIO(_WDIO):
def __init__(self, container, wd_path=os.getcwd(), lc_binary_name="LC"):
_WDIO.__init__(self, container, wd_path=wd_path, wd_binary_name=lc_binary_name)
self._type = "lc"
self.check_container_type()
def _fill_input(self, mpage, ktstep=0):
self.parameters.check_values()
line1 = str(mpage) + " " + \
self.parameters["nref"].format(1, 0, "") + " " + \
self.parameters["mref"].format(1, 0, "") + " " + \
self.parameters["ifsmv1"].format(1, 0, "") + " " + \
self.parameters["ifsmv2"].format(1, 0, "") + " " + \
self.parameters["icor1"].format(1, 0, "") + " " + \
self.parameters["icor2"].format(1, 0, "") + " " + \
self.parameters["if3b"].format(1, 0, "") + " " + \
self.parameters["ld1"].format(2, 0, "", signed=True) + " " + \
self.parameters["ld2"].format(2, 0, "", signed=True) + " " + \
self.parameters["kspev"].format(1, 0, "") + " " + \
self.parameters["kspot"].format(1, 0, "") + " " + \
self.parameters["nomax"].format(1, 0, "") + " " + \
self.parameters["ifcgs"].format(1, 0, "") + " " + \
((" " * (6 - len(str(ktstep)))) + str(ktstep)) + "\n"
line2 = self.parameters["jdphs"].format(1, 0, "") + \
self.parameters["hjd0"].format(15, 6, "F") + \
self.parameters["pzero"].format(17, 10, "D") + \
self.parameters["dpdt"].format(14, 6, "D") + \
self.parameters["pshift"].format(10, 4, "D") + \
self.parameters["delph"].format(8, 5, "F") + \
self.parameters["nga"].format(3, 0, "") + \
self.parameters["stdev"].format(11, 4, "D") + \
self.parameters["noise"].format(2, 0, "") + \
self.parameters["seed"].format(11, 0, "F") + "\n"
line3 = self.parameters["hjdst"].format(14, 6, "F") + \
self.parameters["hjdsp"].format(15, 6, "F") + \
self.parameters["hjdin"].format(13, 6, "F") + \
self.parameters["phstrt"].format(12, 6, "F") + \
self.parameters["phstop"].format(12, 6, "F") + \
self.parameters["phin"].format(12, 6, "F") + \
self.parameters["phn"].format(12, 6, "F") + \
self.parameters["phobs"].format(10, 4, "F") + \
self.parameters["lsp"].format(2, 0, "") + \
self.parameters["tobs"].format(8, 4, "F") + "\n"
line4 = self.parameters["mode"].format(2, 0, "") + \
self.parameters["ipb"].format(2, 0, "") + \
self.parameters["ifat1"].format(2, 0, "") + \
self.parameters["ifat2"].format(2, 0, "") + \
self.parameters["n1"].format(4, 0, "") + \
self.parameters["n2"].format(4, 0, "") + \
self.parameters["perr"].format(13, 6, "F") + \
self.parameters["dperdt"].format(14, 6, "D") + \
self.parameters["the"].format(8, 5, "F") + \
self.parameters["vunit"].format(8, 2, "F") + "\n"
line5 = self._format_eccentricity(self.parameters["e"]) + \
self.parameters["a"].format(13, 6, "D") + \
self.parameters["f1"].format(10, 4, "F") + \
self.parameters["f2"].format(10, 4, "F") + \
self.parameters["vga"].format(10, 4, "F") + \
self.parameters["xincl"].format(9, 3, "F") + \
self.parameters["gr1"].format(7, 3, "F") + \
self.parameters["gr2"].format(7, 3, "F") + \
self.parameters["abunin"].format(7, 2, "F") + \
self.parameters["fspot1"].format(10, 4, "F") + \
self.parameters["fspot2"].format(10, 4, "F") + "\n"
tavh_n = _ParameterContainer.Parameter("tavh_n", float, self.parameters["tavh"].get() / 10000.0)
tavc_n = _ParameterContainer.Parameter("tavc_n", float, self.parameters["tavc"].get() / 10000.0)
line6 = tavh_n.format(7, 4, "F") + " " + \
tavc_n.format(7, 4, "F") + \
self.parameters["alb1"].format(7, 3, "F") + \
self.parameters["alb2"].format(7, 3, "F") + \
self.parameters["phsv"].format(13, 6, "D") + \
self.parameters["pcsv"].format(13, 6, "D") + \
self.parameters["rm"].format(13, 6, "D") + \
self.parameters["xbol1"].format(7, 3, "F") + \
self.parameters["xbol2"].format(7, 3, "F") + \
self.parameters["ybol1"].format(7, 3, "F") + \
self.parameters["ybol2"].format(7, 3, "F") + \
self.parameters["dpclog"].format(8, 5, "F") + "\n"
line7 = self.parameters["a3b"].format(12, 6, "D") + \
self.parameters["p3b"].format(14, 7, "D") + \
self.parameters["xincl3b"].format(11, 5, "F") + \
self.parameters["e3b"].format(9, 6, "F") + \
self.parameters["perr3b"].format(10, 7, "F") + \
self.parameters["tc3b"].format(17, 8, "F") + "\n"
line8 = self.parameters.synthetic_curve["iband"].format(3, 0, "") + \
self.parameters.synthetic_curve["hla"].format(13, 7, "D") + \
self.parameters.synthetic_curve["cla"].format(13, 7, "D") + \
self.parameters.synthetic_curve["x1a"].format(7, 3, "F") + \
self.parameters.synthetic_curve["x2a"].format(7, 3, "F") + \
self.parameters.synthetic_curve["y1a"].format(7, 3, "F") + \
self.parameters.synthetic_curve["y2a"].format(7, 3, "F") + \
self.parameters.synthetic_curve["el3a"].format(12, 4, "D") + \
self.parameters.synthetic_curve["opsfa"].format(11, 4, "D") + \
self.parameters.synthetic_curve["zero"].format(8, 3, "F") + \
self.parameters.synthetic_curve["factor"].format(8, 4, "F") + \
self.parameters.synthetic_curve["wla"].format(10, 6, "F") + \
self.parameters.synthetic_curve["aextinc"].format(8, 4, "F") + \
self.parameters.synthetic_curve["calib"].format(12, 5, "D") + "\n"
star1_line_profiles = ""
star2_line_profiles = ""
if mpage == 3:
star1_line_profiles = self.parameters["binwm1"].format(11, 5, "D") + \
self.parameters["sc1"].format(9, 4, "F") + \
self.parameters["sl1"].format(9, 2, "F") + \
self.parameters["nf1"].format(3, 0, "") + "\n"
for line in self.parameters.star1_lines:
star1_line_profiles = star1_line_profiles + \
line["wll"].format(9, 6, "F") + \
line["ewid"].format(12, 5, "D") + \
line["depth"].format(10, 5, "F") + \
line["kks"].format(5, 0, "") + "\n"
star1_line_profiles = star1_line_profiles + "-1.\n"
star2_line_profiles = self.parameters["binwm2"].format(11, 5, "D") + \
self.parameters["sc2"].format(9, 4, "F") + \
self.parameters["sl2"].format(9, 2, "F") + \
self.parameters["nf2"].format(3, 0, "") + "\n"
for line in self.parameters.star2_lines:
star2_line_profiles = star2_line_profiles + \
line["wll"].format(9, 6, "F") + \
line["ewid"].format(12, 5, "D") + \
line["depth"].format(10, 5, "F") + \
line["kks"].format(5, 0, "") + "\n"
star2_line_profiles = star2_line_profiles + "-1.\n"
star1_spots, star2_spots = self._format_spots()
eclipse_data = ""
if mpage == 6 and ktstep == 0:
if len(self.parameters.data["eclipse_times"]) == 0:
raise ValueError("Eclipse times must be provided for mpage: 6, ktstep: 0")
jd_formatter = _ParameterContainer.Parameter("jd", float)
type_formatter = _ParameterContainer.Parameter("type", int)
jd_list, type_list = self.parameters.data["eclipse_times"]
for data in zip(jd_list, type_list):
jd_formatter.set(data[0])
type_formatter.set(data[1])
eclipse_data = eclipse_data + jd_formatter.format(14, 5, "F") + type_formatter.format(6, 0, "") + "\n"
eclipse_data = eclipse_data + "-10000.\n"
self._input = line1 + line2 + line3 + line4 + line5 + line6 + line7 + line8 + \
star1_line_profiles + star2_line_profiles + \
star1_spots + \
"300.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000\n" + \
star2_spots + \
"300.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000\n" + \
"150.\n" + \
eclipse_data + \
"9"
return self
def fill_for_synthetic_light_curve(self):
return self._fill_input(1)
def fill_for_synthetic_velocity_curve(self):
return self._fill_input(2)
def fill_for_spectral_lines(self):
return self._fill_input(3)
def fill_for_component_dimensions(self):
return self._fill_input(4)
def fill_for_star_positions(self):
return self._fill_input(5)
def fill_for_etv(self):
return self._fill_input(6)
def fill_for_conjunction(self, ktstep):
return self._fill_input(6, ktstep=ktstep)
def read_synthetic_light_curve(self):
lc = self._read_table(self._get_output_path(),
" JD Phase light 1 light 2")
return lc
def read_cgs_synthetic_light_curve(self):
lc = self._read_table(self._get_output_path(),
" JD Phase cgs1 cgs2 cgstot")
return lc
def read_synthetic_velocity_curve(self):
vc = self._read_table(self._get_output_path(),
" JD Phase V Rad 1")
return vc
def read_spectral_lines(self):
star1_spec_lines = self._read_all_tables(self._get_output_path(),
" star 1\n",
offset=2)
star2_spec_lines = self._read_all_tables(self._get_output_path(),
" star 2\n",
offset=2)
return star1_spec_lines, star2_spec_lines
def read_component_dimensions(self):
dimensions = self._read_table(self._get_output_path(),
" JD Phase r1pol r1pt")
return dimensions
def read_star_positions(self):
positions = self._read_all_tables(self._get_output_path(),
" Y Sky Coordinate Z Sky Coordinate\n")
return positions
def read_etv(self):
etv = self._read_table(self._get_output_path(),
"eclipse timing type wt.",
offset=2)
return etv
def read_conjunction(self):
conjunction = self._read_table(self._get_output_path(),
"conj. time type wt.",
offset=2)
return conjunction
def read_abs_params(self):
abs_params = self._read_table(self._get_output_path(),
" Star M/Msun (Mean Radius)/Rsun M Bol Log g (cgs)")
teffs = self._read_table(self._get_output_path(),
" T1 T2 Alb 1 Alb 2")
sma = self._read_table(self._get_output_path(),
" ecc s-m axis F1 F2 Vgam")
lds = self._read_table(self._get_output_path(),
"band x1 x2 y1 y2")
lums = self._read_table(self._get_output_path(),
"band L1 L2 x1 x2 y1 y2")
return abs_params, teffs, sma, lds, lums
def read_K1_2_params(self):
par_set_1 = self._read_table(self._get_output_path(),
"JDPHS J.D. zero P zero dPdt Ph. shift")
par_set_2 = self._read_table(self._get_output_path(),
" ecc s-m axis F1 F2 Vgam Incl")
par_set_3 = self._read_table(self._get_output_path(),
" T1 T2 Alb 1 Alb 2 Pot 1 Pot 2 M2/M1")
p, e, a, i, q = float(par_set_1[2][0]), float(par_set_2[0][0]), float(par_set_2[1][0]), \
float(par_set_2[5][0]), float(par_set_3[6][0])
return p, e, a, i, q
class DCIO(_WDIO):
def __init__(self, container, wd_path=os.getcwd(), dc_binary_name="DC"):
_WDIO.__init__(self, container, wd_path=wd_path, wd_binary_name=dc_binary_name)
self._type = "dc"
self.check_container_type()
def fill_for_solution(self):
def _format_keeps(keep):
block1 = " " + keep["spot_a_lat"].format(1, 0, "") + \
keep["spot_a_long"].format(1, 0, "") + \
keep["spot_a_rad"].format(1, 0, "") + \
keep["spot_a_tempf"].format(1, 0, "") + " "
block2 = keep["spot_b_lat"].format(1, 0, "") + \
keep["spot_b_long"].format(1, 0, "") + \
keep["spot_b_rad"].format(1, 0, "") + \
keep["spot_b_tempf"].format(1, 0, "") + " "
block3 = keep["a"].format(1, 0, "") + \
keep["e"].format(1, 0, "") + \
keep["perr"].format(1, 0, "") + \
keep["f1"].format(1, 0, "") + \
keep["f2"].format(1, 0, "") + \
keep["pshift"].format(1, 0, "") + \
keep["vga"].format(1, 0, "") + " "
block4 = keep["xincl"].format(1, 0, "") + \
keep["g1"].format(1, 0, "") + \
keep["g2"].format(1, 0, "") + \
keep["tavh"].format(1, 0, "") + \
keep["tavc"].format(1, 0, "") + " "
block5 = keep["alb1"].format(1, 0, "") + \
keep["alb2"].format(1, 0, "") + \
keep["phsv"].format(1, 0, "") + \
keep["pcsv"].format(1, 0, "") + \
keep["rm"].format(1, 0, "") + " "
block6 = keep["hjd0"].format(1, 0, "") + \
keep["pzero"].format(1, 0, "") + \
keep["dpdt"].format(1, 0, "") + \
keep["dperdt"].format(1, 0, "") + \
keep["a3b"].format(1, 0, "") + " "
block7 = keep["p3b"].format(1, 0, "") + \
keep["xincl3b"].format(1, 0, "") + \
keep["e3b"].format(1, 0, "") + \
keep["perr3b"].format(1, 0, "") + \
keep["t03b"].format(1, 0, "") + " "
block8 = "11111 " # unused block
block9 = keep["dpclog"].format(1, 0, "") + \
keep["desextinc"].format(1, 0, "") + \
keep["spot_a_tstart"].format(1, 0, "") + \
keep["spot_a_tmax1"].format(1, 0, "") + \
keep["spot_a_tmax2"].format(1, 0, "") + " "
block10 = keep["spot_a_tend"].format(1, 0, "") + \
keep["spot_b_tstart"].format(1, 0, "") + \
keep["spot_b_tmax1"].format(1, 0, "") + \
keep["spot_b_tmax2"].format(1, 0, "") + \
keep["spot_b_tend"].format(1, 0, "") + " "
block11 = "11111 " # unused block
block12 = keep["hla"].format(1, 0, "") + \
keep["cla"].format(1, 0, "") + \
keep["x1a"].format(1, 0, "") + \
keep["x2a"].format(1, 0, "") + \
keep["el3a"].format(1, 0, "") + " "
block13 = keep["niter"].format(2, 0, "") + \
keep["xlamda"].format(10, 3, "D") + \
keep["vlr"].format(6, 3, "F") + "\n"
return block1 + block2 + block3 + block4 + block5 + \
block6 + block7 + block8 + block9 + block10 + \
block11 + block12 + block13
def _format_lc_vc_data(x, y, w):
data_line = ""
time_formatter = _ParameterContainer.Parameter("time", float)
observation_formatter = _ParameterContainer.Parameter("obs", float)
weight_formatter = _ParameterContainer.Parameter("weight", float)
for xyw in zip(x, y, w):
time_formatter.set(xyw[0])
observation_formatter.set(xyw[1])
weight_formatter.set(xyw[2])
data_line = data_line + \
time_formatter.format(14, 5, "D") + \
observation_formatter.format(11, 6, "D") + \
weight_formatter.format(8, 3, "D") + "\n"
return data_line + " -10001.00000\n"
def _format_velocity_curve(vc):
if vc is None:
return "", ""
else:
vc_info_line = vc["iband"].format(3, 0, "") + \
vc["hla"].format(13, 6, "D") + \
vc["cla"].format(13, 6, "D") + \
vc["x1a"].format(7, 3, "F") + \
vc["x2a"].format(7, 3, "F") + \
vc["y1a"].format(7, 3, "F") + \
vc["y2a"].format(7, 3, "F") + \
vc["opsfa"].format(10, 3, "D") + \
vc["sigma"].format(12, 5, "D") + \
vc["sphas1"].format(8, 5, "F") + \
vc["sphas2"].format(8, 5, "F") + \
vc["sphas3"].format(8, 5, "F") + \
vc["sphas4"].format(8, 5, "F") + \
vc["wla"].format(10, 6, "F") + \
vc["ksd"].format(2, 0, "") + "\n"
x, y, w = vc.data["velocity_data"]
vc_data_line = _format_lc_vc_data(x, y, w)
return vc_info_line, vc_data_line
def _format_light_curve(lc):
if lc is None:
return "", "", ""
else:
lc_info_line = lc["iband"].format(3, 0, "") + \
lc["hla"].format(13, 6, "D") + \
lc["cla"].format(13, 6, "D") + \
lc["x1a"].format(7, 3, "F") + \
lc["x2a"].format(7, 3, "F") + \
lc["y1a"].format(7, 3, "F") + \
lc["y2a"].format(7, 3, "F") + \
lc["el3a"].format(12, 4, "D") + \
lc["opsfa"].format(10, 3, "D") + \
lc["noise"].format(2, 0, "") + \
lc["sigma"].format(12, 5, "D") + \
lc["sphas1"].format(8, 5, "F") + \
lc["sphas2"].format(8, 5, "F") + \
lc["sphas3"].format(8, 5, "F") + \
lc["sphas4"].format(8, 5, "F") + \
lc["ksd"].format(2, 0, "") + "\n"
lc_extra_line = lc["wla"].format(9, 6, "F") + \
lc["aextinc"].format(8, 4, "F") + \
lc["xunit"].format(11, 4, "D") + \
lc["calib"].format(12, 5, "D") + "\n"
x, y, w = lc.data["light_data"]
lc_data_line = _format_lc_vc_data(x, y, w)
return lc_info_line, lc_extra_line, lc_data_line
# all del's use same formatting
del_width = 7
del_precision = 4
del_exponent = "d"
del1 = " " + self.parameters.dels["spot_a_lat"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["spot_a_long"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["spot_a_rad"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["spot_a_tempf"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["spot_b_lat"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["spot_b_long"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["spot_b_rad"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["spot_b_tempf"].format(del_width, del_precision, del_exponent) + "\n"
del2 = " " + self.parameters.dels["a"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["e"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["perr"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["f1"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["f2"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["pshift"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["xincl"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["g1"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["g2"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["tavh"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["tavc"].format(del_width, del_precision, del_exponent) + " " + "\n"
del3 = " " + self.parameters.dels["alb1"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["alb2"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["phsv"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["pcsv"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["rm"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["hla"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["cla"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["x1a"].format(del_width, del_precision, del_exponent) + " " + \
self.parameters.dels["x2a"].format(del_width, del_precision, del_exponent) + "\n"
keeps = _format_keeps(self.parameters.keeps)
line5 = self.parameters["kspa"].format(3, 0, "") + \
self.parameters["nspa"].format(3, 0, "") + \
self.parameters["kspb"].format(3, 0, "") + \
self.parameters["nspb"].format(3, 0, "") + "\n"
line6 = self.parameters["ifvc1"].format(1, 0, "") + " " + \
self.parameters["ifvc2"].format(1, 0, "") + " " + \
self.parameters["nlc"].format(2, 0, "") + \
self.parameters["iftime"].format(2, 0, "") + \
self.parameters["ko"].format(2, 0, "") + \
self.parameters["kdisk"].format(2, 0, "") + \
self.parameters["isym"].format(2, 0, "") + \
self.parameters["nppl"].format(2, 0, "") + \
self.parameters["ifder"].format(2, 0, "") + \
self.parameters["iflcin"].format(2, 0, "") + \
self.parameters["ifoc"].format(2, 0, "") + "\n"
line7 = self.parameters["nref"].format(1, 0, "") + " " + \
self.parameters["mref"].format(1, 0, "") + " " + \
self.parameters["ifsmv1"].format(1, 0, "") + " " + \
self.parameters["ifsmv2"].format(1, 0, "") + " " + \
self.parameters["icor1"].format(1, 0, "") + " " + \
self.parameters["icor2"].format(1, 0, "") + " " + \
self.parameters["if3b"].format(1, 0, "") + " " + \
self.parameters["ld1"].format(2, 0, "", signed=True) + " " + \
self.parameters["ld2"].format(2, 0, "", signed=True) + " " + \
self.parameters["kspev"].format(1, 0, "") + " " + \
self.parameters["kspot"].format(1, 0, "") + " " + \
self.parameters["nomax"].format(1, 0, "") + " " + \
self.parameters["ifcgs"].format(1, 0, "") + " " + \
self.parameters["maglite"].format(1, 0, "") + " " + \
self.parameters["linkext"].format(1, 0, "") + " " + \
self.parameters["desextinc"].format(7, 4, "F") + "\n"
line8 = self.parameters["jdphs"].format(1, 0, "") + \
self.parameters["hjd0"].format(15, 6, "F") + \
self.parameters["pzero"].format(17, 10, "D") + \
self.parameters["dpdt"].format(14, 6, "D") + \
self.parameters["pshift"].format(10, 4, "D") + \
self.parameters["delph"].format(8, 5, "F") + \
self.parameters["nga"].format(3, 0, "") + "\n"
line9 = self.parameters["mode"].format(2, 0, "") + \
self.parameters["ipb"].format(2, 0, "") + \
self.parameters["ifat1"].format(2, 0, "") + \
self.parameters["ifat2"].format(2, 0, "") + \
self.parameters["n1"].format(4, 0, "") + \
self.parameters["n2"].format(4, 0, "") + \
self.parameters["n1l"].format(4, 0, "") + \
self.parameters["n2l"].format(4, 0, "") + \
self.parameters["perr"].format(13, 6, "F") + \
self.parameters["dperdt"].format(13, 5, "D") + \
self.parameters["the"].format(8, 5, "F") + \
self.parameters["vunit"].format(9, 3, "F") + "\n"
line10 = self._format_eccentricity(self.parameters["e"]) + \
self.parameters["a"].format(13, 6, "D") + \
self.parameters["f1"].format(10, 4, "F") + \
self.parameters["f2"].format(10, 4, "F") + \
self.parameters["vga"].format(10, 4, "F") + \
self.parameters["xincl"].format(9, 3, "F") + \
self.parameters["gr1"].format(7, 3, "F") + \
self.parameters["gr2"].format(7, 3, "F") + \
self.parameters["abunin"].format(7, 2, "F") + \
self.parameters["fspot1"].format(10, 4, "F") + \
self.parameters["fspot2"].format(10, 4, "F") + "\n"
tavh_n = _ParameterContainer.Parameter("tavh_n", float, self.parameters["tavh"].get() / 10000.0)
tavc_n = _ParameterContainer.Parameter("tavc_n", float, self.parameters["tavc"].get() / 10000.0)
line11 = tavh_n.format(7, 4, "F") + \
tavc_n.format(8, 4, "F") + \
self.parameters["alb1"].format(7, 3, "F") + \
self.parameters["alb2"].format(7, 3, "F") + \
self.parameters["phsv"].format(13, 6, "D") + \
self.parameters["pcsv"].format(13, 6, "D") + \
self.parameters["rm"].format(13, 6, "D") + \
self.parameters["xbol1"].format(7, 3, "F") + \
self.parameters["xbol2"].format(7, 3, "F") + \
self.parameters["ybol1"].format(7, 3, "F") + \
self.parameters["ybol2"].format(7, 3, "F") + \
self.parameters["dpclog"].format(9, 5, "F") + "\n"
line12 = self.parameters["a3b"].format(12, 6, "D") + \
self.parameters["p3b"].format(14, 7, "D") + \
self.parameters["xincl3b"].format(11, 5, "F") + \
self.parameters["e3b"].format(9, 6, "F") + \
self.parameters["perr3b"].format(10, 7, "F") + \
self.parameters["tc3b"].format(17, 8, "F") + "\n"
star1_spots, star2_spots = self._format_spots()
vc1_dependent_line, vc1_data = _format_velocity_curve(self.parameters.velocity_curves[0])
vc2_dependent_line, vc2_data = _format_velocity_curve(self.parameters.velocity_curves[1])
lc_dependent_lines = ""
lc_extra_dependent_lines = ""
lc_data = ""
for lc_container in self.parameters.light_curves:
info, extra, data = _format_light_curve(lc_container)
lc_dependent_lines = lc_dependent_lines + info
lc_extra_dependent_lines = lc_extra_dependent_lines + extra
lc_data = lc_data + data
eclipse_line = ""
eclipse_data = ""
if self.parameters.eclipse_timings is not None:
eclipse_line = (" " * 82) + \
self.parameters.eclipse_timings["sigma"].format(10,8,"F") + \
(" " * 34) + \
self.parameters.eclipse_timings["ksd"].format(1,1,"") + "\n"
hjd_formatter = _ParameterContainer.Parameter("hjd", float)
type_formatter = _ParameterContainer.Parameter("type", int)
weights_formatter = _ParameterContainer.Parameter("weights", float)
x, y, z = self.parameters.eclipse_timings.data["eclipse_data"][0], \
self.parameters.eclipse_timings.data["eclipse_data"][1], \
self.parameters.eclipse_timings.data["eclipse_data"][2]
for xyz in zip(x,y,z):
hjd_formatter.set(xyz[0])
type_formatter.set(xyz[1])
weights_formatter.set(xyz[2])
eclipse_data = eclipse_data + \
hjd_formatter.format(14, 5, "D") + \
type_formatter.format(6, 0, "") + \
weights_formatter.format(13, 3, "D") + "\n"
eclipse_data = eclipse_data + " -10001.00000\n"
subset_line = ""
for subset in self.parameters.subsets:
subset_line = subset_line + _format_keeps(subset)
self._input = del1 + del2 + del3 + keeps + \
line5 + line6 + line7 + line8 + line9 + line10 + line11 + line12 + \
vc1_dependent_line + vc2_dependent_line + lc_dependent_lines + \
eclipse_line + lc_extra_dependent_lines + \
star1_spots + "300.00000\n" + star2_spots + "300.00000\n150.\n" + \
vc1_data + vc2_data + lc_data + eclipse_data + subset_line + " 2\n"\
return self
def read_results(self, force_tidy_output=False):
results = self._read_table(self._get_output_path(),
"Input-Output in F Format",
offset=3,
splitmap=[5, 9, 28, 46, 65, 83],
occurence=self.parameters.keeps["niter"].get(),
tidy=force_tidy_output)
return results
def read_solution_stats(self):
stats = self._read_table(self._get_output_path(),
" Mean residual for input values",
occurence=self.parameters.keeps["niter"].get())
return stats
def read_component_dimensions(self):
s1_dimensions = self._read_table(self._get_output_path(),
" 1 pole",
offset=0,
splitmap=[3, 10, 24, 38, 52, 66])
s2_dimensions = self._read_table(self._get_output_path(),
" 2 pole",
offset=0,
splitmap=[3, 10, 24, 38, 52, 66])
return [s1_dimensions, s2_dimensions]
def read_unweighted_observations(self, split_by_observation=False):
results = self.read_results()
column_limit = 20
base_columns = 4
if self.parameters["jdphs"].get() == 1:
column_limit = 23
base_columns = 5
current_columns = len(results[0]) + base_columns
if current_columns > column_limit:
oc_table = self._read_table(self._get_output_path(), "Unweighted Observational Equations", offset=3,
tidy=False)
table = []
idx = 0
max_idx = len(oc_table)
while idx < max_idx:
table.append(oc_table[idx] + oc_table[idx + 1])
idx = idx + 2
oc_table = self._tidy_table(table)
else:
oc_table = self._read_table(self._get_output_path(), "Unweighted Observational Equations", offset=3)
if split_by_observation:
obs_table = []
split_table = []
limit = 0
if self.parameters.velocity_curves[0] is not None:
vc1_len = len(self.parameters.velocity_curves[0].data["velocity_data"][0])
split_table.append([limit, limit + vc1_len])
limit = limit + vc1_len #+ 1
if self.parameters.velocity_curves[1] is not None:
vc2_len = len(self.parameters.velocity_curves[1].data["velocity_data"][0])
split_table.append([limit, limit + vc2_len])
limit = limit + vc2_len #+ 1
for lc in self.parameters.light_curves:
lc_len = len(lc.data["light_data"][0])
split_table.append([limit, limit + lc_len])
limit = limit + lc_len #+ 1
for split in split_table:
temp_table = []
for column in oc_table:
temp_table.append(column[split[0]:split[1]])
obs_table.append(temp_table)
return obs_table
else:
return oc_table
def update_from_results(self):
# TODO implement this
raise NotImplementedError
|
nilq/baby-python
|
python
|
from django.test import TestCase
from .models import Location,Tag
import datetime as dt
# Test case for locations
class LocationTestClass(TestCase):
def setUp(self):
self.location = Location(location='Nairobi')
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_method(self):
self.location.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
def test_delete_method(self):
self.location.save_location()
locations = Location.objects.all()
self.location.delete_location()
locations = Location.objects.all()
self.assertTrue(len(locations) == 0)
# Test case for categories
class TagTestClass(TestCase):
def setUp(self):
self.tag = Tag(tag='vacay')
def test_tag_instance(self):
self.assertTrue(isinstance(self.tag, Tag))
def test_save_tag_method(self):
self.tag.save_tag()
tag_object = Tag.objects.all()
self.assertTrue(len(tag_object) > 0)
def test_delete_tag_method(self):
self.tag.save_tag()
tag_object = Tag.objects.all()
self.tag.delete_tag()
tag_object = Tag.objects.all()
self.assertTrue(len(tag_object) == 0)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
Manage Dell DRAC.
.. versionadded:: 2015.8.2
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import re
# Import Salt libs
from salt.exceptions import CommandExecutionError
import salt.utils.path
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves import map
log = logging.getLogger(__name__)
__proxyenabled__ = ['fx2']
try:
run_all = __salt__['cmd.run_all']
except (NameError, KeyError):
import salt.modules.cmdmod
__salt__ = {
'cmd.run_all': salt.modules.cmdmod.run_all
}
def __virtual__():
if salt.utils.path.which('racadm'):
return True
return (False, 'The drac execution module cannot be loaded: racadm binary not in path.')
def __parse_drac(output):
'''
Parse Dell DRAC output
'''
drac = {}
section = ''
for i in output.splitlines():
if i.strip().endswith(':') and '=' not in i:
section = i[0:-1]
drac[section] = {}
if len(i.rstrip()) > 0 and '=' in i:
if section in drac:
drac[section].update(dict(
[[prop.strip() for prop in i.split('=')]]
))
else:
section = i.strip()
if section not in drac and section:
drac[section] = {}
return drac
def __execute_cmd(command, host=None,
admin_username=None, admin_password=None,
module=None):
'''
Execute rac commands
'''
if module:
# -a takes 'server' or 'switch' to represent all servers
# or all switches in a chassis. Allow
# user to say 'module=ALL_SERVER' or 'module=ALL_SWITCH'
if module.startswith('ALL_'):
modswitch = '-a '\
+ module[module.index('_') + 1:len(module)].lower()
else:
modswitch = '-m {0}'.format(module)
else:
modswitch = ''
if not host:
# This is a local call
cmd = __salt__['cmd.run_all']('racadm {0} {1}'.format(command,
modswitch))
else:
cmd = __salt__['cmd.run_all'](
'racadm -r {0} -u {1} -p {2} {3} {4}'.format(host,
admin_username,
admin_password,
command,
modswitch),
output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
return False
return True
def __execute_ret(command, host=None,
admin_username=None, admin_password=None,
module=None):
'''
Execute rac commands
'''
if module:
if module == 'ALL':
modswitch = '-a '
else:
modswitch = '-m {0}'.format(module)
else:
modswitch = ''
if not host:
# This is a local call
cmd = __salt__['cmd.run_all']('racadm {0} {1}'.format(command,
modswitch))
else:
cmd = __salt__['cmd.run_all'](
'racadm -r {0} -u {1} -p {2} {3} {4}'.format(host,
admin_username,
admin_password,
command,
modswitch),
output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
else:
fmtlines = []
for l in cmd['stdout'].splitlines():
if l.startswith('Security Alert'):
continue
if l.startswith('RAC1168:'):
break
if l.startswith('RAC1169:'):
break
if l.startswith('Continuing execution'):
continue
if len(l.strip()) == 0:
continue
fmtlines.append(l)
if '=' in l:
continue
cmd['stdout'] = '\n'.join(fmtlines)
return cmd
def get_dns_dracname(host=None,
admin_username=None, admin_password=None):
ret = __execute_ret('get iDRAC.NIC.DNSRacName', host=host,
admin_username=admin_username,
admin_password=admin_password)
parsed = __parse_drac(ret['stdout'])
return parsed
def set_dns_dracname(name,
host=None,
admin_username=None,
admin_password=None):
ret = __execute_ret('set iDRAC.NIC.DNSRacName {0}'.format(name),
host=host,
admin_username=admin_username,
admin_password=admin_password)
return ret
def system_info(host=None,
admin_username=None, admin_password=None,
module=None):
'''
Return System information
CLI Example:
.. code-block:: bash
salt dell dracr.system_info
'''
cmd = __execute_ret('getsysinfo', host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
return cmd
return __parse_drac(cmd['stdout'])
def set_niccfg(ip=None, netmask=None, gateway=None, dhcp=False,
host=None,
admin_username=None,
admin_password=None,
module=None):
cmdstr = 'setniccfg '
if dhcp:
cmdstr += '-d '
else:
cmdstr += '-s ' + ip + ' ' + netmask + ' ' + gateway
return __execute_cmd(cmdstr, host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
def set_nicvlan(vlan=None,
host=None,
admin_username=None,
admin_password=None,
module=None):
cmdstr = 'setniccfg -v '
if vlan:
cmdstr += vlan
ret = __execute_cmd(cmdstr, host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
return ret
def network_info(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Return Network Configuration
CLI Example:
.. code-block:: bash
salt dell dracr.network_info
'''
inv = inventory(host=host, admin_username=admin_username,
admin_password=admin_password)
if inv is None:
cmd = {}
cmd['retcode'] = -1
cmd['stdout'] = 'Problem getting switch inventory'
return cmd
if module not in inv.get('switch') and module not in inv.get('server'):
cmd = {}
cmd['retcode'] = -1
cmd['stdout'] = 'No module {0} found.'.format(module)
return cmd
cmd = __execute_ret('getniccfg', host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
cmd['stdout'] = 'Network:\n' + 'Device = ' + module + '\n' + \
cmd['stdout']
return __parse_drac(cmd['stdout'])
def nameservers(ns,
host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Configure the nameservers on the DRAC
CLI Example:
.. code-block:: bash
salt dell dracr.nameservers [NAMESERVERS]
salt dell dracr.nameservers ns1.example.com ns2.example.com
admin_username=root admin_password=calvin module=server-1
host=192.168.1.1
'''
if len(ns) > 2:
log.warning('racadm only supports two nameservers')
return False
for i in range(1, len(ns) + 1):
if not __execute_cmd('config -g cfgLanNetworking -o '
'cfgDNSServer{0} {1}'.format(i, ns[i - 1]),
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module):
return False
return True
def syslog(server, enable=True, host=None,
admin_username=None, admin_password=None, module=None):
'''
Configure syslog remote logging, by default syslog will automatically be
enabled if a server is specified. However, if you want to disable syslog
you will need to specify a server followed by False
CLI Example:
.. code-block:: bash
salt dell dracr.syslog [SYSLOG IP] [ENABLE/DISABLE]
salt dell dracr.syslog 0.0.0.0 False
'''
if enable and __execute_cmd('config -g cfgRemoteHosts -o '
'cfgRhostsSyslogEnable 1',
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=None):
return __execute_cmd('config -g cfgRemoteHosts -o '
'cfgRhostsSyslogServer1 {0}'.format(server),
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
return __execute_cmd('config -g cfgRemoteHosts -o cfgRhostsSyslogEnable 0',
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
def email_alerts(action,
host=None,
admin_username=None,
admin_password=None):
'''
Enable/Disable email alerts
CLI Example:
.. code-block:: bash
salt dell dracr.email_alerts True
salt dell dracr.email_alerts False
'''
if action:
return __execute_cmd('config -g cfgEmailAlert -o '
'cfgEmailAlertEnable -i 1 1', host=host,
admin_username=admin_username,
admin_password=admin_password)
else:
return __execute_cmd('config -g cfgEmailAlert -o '
'cfgEmailAlertEnable -i 1 0')
def list_users(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
List all DRAC users
CLI Example:
.. code-block:: bash
salt dell dracr.list_users
'''
users = {}
_username = ''
for idx in range(1, 17):
cmd = __execute_ret('getconfig -g '
'cfgUserAdmin -i {0}'.format(idx),
host=host, admin_username=admin_username,
admin_password=admin_password)
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
for user in cmd['stdout'].splitlines():
if not user.startswith('cfg'):
continue
(key, val) = user.split('=')
if key.startswith('cfgUserAdminUserName'):
_username = val.strip()
if val:
users[_username] = {'index': idx}
else:
break
else:
if len(_username) > 0:
users[_username].update({key: val})
return users
def delete_user(username,
uid=None,
host=None,
admin_username=None,
admin_password=None):
'''
Delete a user
CLI Example:
.. code-block:: bash
salt dell dracr.delete_user [USERNAME] [UID - optional]
salt dell dracr.delete_user diana 4
'''
if uid is None:
user = list_users()
uid = user[username]['index']
if uid:
return __execute_cmd('config -g cfgUserAdmin -o '
'cfgUserAdminUserName -i {0} ""'.format(uid),
host=host, admin_username=admin_username,
admin_password=admin_password)
else:
log.warning('User \'%s\' does not exist', username)
return False
def change_password(username, password, uid=None, host=None,
admin_username=None, admin_password=None,
module=None):
'''
Change user's password
CLI Example:
.. code-block:: bash
salt dell dracr.change_password [USERNAME] [PASSWORD] uid=[OPTIONAL]
host=<remote DRAC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.change_password diana secret
Note that if only a username is specified then this module will look up
details for all 16 possible DRAC users. This is time consuming, but might
be necessary if one is not sure which user slot contains the one you want.
Many late-model Dell chassis have 'root' as UID 1, so if you can depend
on that then setting the password is much quicker.
Raises an error if the supplied password is greater than 20 chars.
'''
if len(password) > 20:
raise CommandExecutionError('Supplied password should be 20 characters or less')
if uid is None:
user = list_users(host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
uid = user[username]['index']
if uid:
return __execute_cmd('config -g cfgUserAdmin -o '
'cfgUserAdminPassword -i {0} {1}'
.format(uid, password),
host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
else:
log.warning('racadm: user \'%s\' does not exist', username)
return False
def deploy_password(username, password, host=None, admin_username=None,
admin_password=None, module=None):
'''
Change the QuickDeploy password, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_password [USERNAME] [PASSWORD]
host=<remote DRAC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.change_password diana secret
Note that if only a username is specified then this module will look up
details for all 16 possible DRAC users. This is time consuming, but might
be necessary if one is not sure which user slot contains the one you want.
Many late-model Dell chassis have 'root' as UID 1, so if you can depend
on that then setting the password is much quicker.
'''
return __execute_cmd('deploy -u {0} -p {1}'.format(
username, password), host=host, admin_username=admin_username,
admin_password=admin_password, module=module
)
def deploy_snmp(snmp, host=None, admin_username=None,
admin_password=None, module=None):
'''
Change the QuickDeploy SNMP community string, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_snmp SNMP_STRING
host=<remote DRAC or CMC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.deploy_password diana secret
'''
return __execute_cmd('deploy -v SNMPv2 {0} ro'.format(snmp),
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
def create_user(username, password, permissions,
users=None, host=None,
admin_username=None, admin_password=None):
'''
Create user accounts
CLI Example:
.. code-block:: bash
salt dell dracr.create_user [USERNAME] [PASSWORD] [PRIVILEGES]
salt dell dracr.create_user diana secret login,test_alerts,clear_logs
DRAC Privileges
* login : Login to iDRAC
* drac : Configure iDRAC
* user_management : Configure Users
* clear_logs : Clear Logs
* server_control_commands : Execute Server Control Commands
* console_redirection : Access Console Redirection
* virtual_media : Access Virtual Media
* test_alerts : Test Alerts
* debug_commands : Execute Debug Commands
'''
_uids = set()
if users is None:
users = list_users()
if username in users:
log.warning('racadm: user \'%s\' already exists', username)
return False
for idx in six.iterkeys(users):
_uids.add(users[idx]['index'])
uid = sorted(list(set(range(2, 12)) - _uids), reverse=True).pop()
# Create user account first
if not __execute_cmd('config -g cfgUserAdmin -o '
'cfgUserAdminUserName -i {0} {1}'
.format(uid, username),
host=host, admin_username=admin_username,
admin_password=admin_password):
delete_user(username, uid)
return False
# Configure users permissions
if not set_permissions(username, permissions, uid):
log.warning('unable to set user permissions')
delete_user(username, uid)
return False
# Configure users password
if not change_password(username, password, uid):
log.warning('unable to set user password')
delete_user(username, uid)
return False
# Enable users admin
if not __execute_cmd('config -g cfgUserAdmin -o '
'cfgUserAdminEnable -i {0} 1'.format(uid)):
delete_user(username, uid)
return False
return True
def set_permissions(username, permissions,
uid=None, host=None,
admin_username=None, admin_password=None):
'''
Configure users permissions
CLI Example:
.. code-block:: bash
salt dell dracr.set_permissions [USERNAME] [PRIVILEGES]
[USER INDEX - optional]
salt dell dracr.set_permissions diana login,test_alerts,clear_logs 4
DRAC Privileges
* login : Login to iDRAC
* drac : Configure iDRAC
* user_management : Configure Users
* clear_logs : Clear Logs
* server_control_commands : Execute Server Control Commands
* console_redirection : Access Console Redirection
* virtual_media : Access Virtual Media
* test_alerts : Test Alerts
* debug_commands : Execute Debug Commands
'''
privileges = {'login': '0x0000001',
'drac': '0x0000002',
'user_management': '0x0000004',
'clear_logs': '0x0000008',
'server_control_commands': '0x0000010',
'console_redirection': '0x0000020',
'virtual_media': '0x0000040',
'test_alerts': '0x0000080',
'debug_commands': '0x0000100'}
permission = 0
# When users don't provide a user ID we need to search for this
if uid is None:
user = list_users()
uid = user[username]['index']
# Generate privilege bit mask
for i in permissions.split(','):
perm = i.strip()
if perm in privileges:
permission += int(privileges[perm], 16)
return __execute_cmd('config -g cfgUserAdmin -o '
'cfgUserAdminPrivilege -i {0} 0x{1:08X}'
.format(uid, permission),
host=host, admin_username=admin_username,
admin_password=admin_password)
def set_snmp(community, host=None,
admin_username=None, admin_password=None):
'''
Configure CMC or individual iDRAC SNMP community string.
Use ``deploy_snmp`` for configuring chassis switch SNMP.
CLI Example:
.. code-block:: bash
salt dell dracr.set_snmp [COMMUNITY]
salt dell dracr.set_snmp public
'''
return __execute_cmd('config -g cfgOobSnmp -o '
'cfgOobSnmpAgentCommunity {0}'.format(community),
host=host, admin_username=admin_username,
admin_password=admin_password)
def set_network(ip, netmask, gateway, host=None,
admin_username=None, admin_password=None):
'''
Configure Network on the CMC or individual iDRAC.
Use ``set_niccfg`` for blade and switch addresses.
CLI Example:
.. code-block:: bash
salt dell dracr.set_network [DRAC IP] [NETMASK] [GATEWAY]
salt dell dracr.set_network 192.168.0.2 255.255.255.0 192.168.0.1
admin_username=root admin_password=calvin host=192.168.1.1
'''
return __execute_cmd('setniccfg -s {0} {1} {2}'.format(
ip, netmask, gateway, host=host, admin_username=admin_username,
admin_password=admin_password
))
def server_power(status, host=None,
admin_username=None,
admin_password=None,
module=None):
'''
status
One of 'powerup', 'powerdown', 'powercycle', 'hardreset',
'graceshutdown'
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to reboot on the chassis such as a blade. If not provided,
the chassis will be rebooted.
CLI Example:
.. code-block:: bash
salt dell dracr.server_reboot
salt dell dracr.server_reboot module=server-1
'''
return __execute_cmd('serveraction {0}'.format(status),
host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
def server_reboot(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Issues a power-cycle operation on the managed server. This action is
similar to pressing the power button on the system's front panel to
power down and then power up the system.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to reboot on the chassis such as a blade. If not provided,
the chassis will be rebooted.
CLI Example:
.. code-block:: bash
salt dell dracr.server_reboot
salt dell dracr.server_reboot module=server-1
'''
return __execute_cmd('serveraction powercycle',
host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
def server_poweroff(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Powers down the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power off on the chassis such as a blade.
If not provided, the chassis will be powered off.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweroff
salt dell dracr.server_poweroff module=server-1
'''
return __execute_cmd('serveraction powerdown',
host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
def server_poweron(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Powers up the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power on located on the chassis such as a blade. If
not provided, the chassis will be powered on.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweron
salt dell dracr.server_poweron module=server-1
'''
return __execute_cmd('serveraction powerup',
host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
def server_hardreset(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Performs a reset (reboot) operation on the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to hard reset on the chassis such as a blade. If
not provided, the chassis will be reset.
CLI Example:
.. code-block:: bash
salt dell dracr.server_hardreset
salt dell dracr.server_hardreset module=server-1
'''
return __execute_cmd('serveraction hardreset',
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
def server_powerstatus(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
return the power status for the passed module
CLI Example:
.. code-block:: bash
salt dell drac.server_powerstatus
'''
ret = __execute_ret('serveraction powerstatus',
host=host, admin_username=admin_username,
admin_password=admin_password,
module=module)
result = {'retcode': 0}
if ret['stdout'] == 'ON':
result['status'] = True
result['comment'] = 'Power is on'
if ret['stdout'] == 'OFF':
result['status'] = False
result['comment'] = 'Power is on'
if ret['stdout'].startswith('ERROR'):
result['status'] = False
result['comment'] = ret['stdout']
return result
def server_pxe(host=None,
admin_username=None,
admin_password=None):
'''
Configure server to PXE perform a one off PXE boot
CLI Example:
.. code-block:: bash
salt dell dracr.server_pxe
'''
if __execute_cmd('config -g cfgServerInfo -o cfgServerFirstBootDevice PXE',
host=host, admin_username=admin_username,
admin_password=admin_password):
if __execute_cmd('config -g cfgServerInfo -o cfgServerBootOnce 1',
host=host, admin_username=admin_username,
admin_password=admin_password):
return server_reboot
else:
log.warning('failed to set boot order')
return False
log.warning('failed to configure PXE boot')
return False
def list_slotnames(host=None,
admin_username=None,
admin_password=None):
'''
List the names of all slots in the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt-call --local dracr.list_slotnames host=111.222.333.444
admin_username=root admin_password=secret
'''
slotraw = __execute_ret('getslotname',
host=host, admin_username=admin_username,
admin_password=admin_password)
if slotraw['retcode'] != 0:
return slotraw
slots = {}
stripheader = True
for l in slotraw['stdout'].splitlines():
if l.startswith('<'):
stripheader = False
continue
if stripheader:
continue
fields = l.split()
slots[fields[0]] = {}
slots[fields[0]]['slot'] = fields[0]
if len(fields) > 1:
slots[fields[0]]['slotname'] = fields[1]
else:
slots[fields[0]]['slotname'] = ''
if len(fields) > 2:
slots[fields[0]]['hostname'] = fields[2]
else:
slots[fields[0]]['hostname'] = ''
return slots
def get_slotname(slot, host=None, admin_username=None, admin_password=None):
'''
Get the name of a slot number in the chassis.
slot
The number of the slot for which to obtain the name.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt-call --local dracr.get_slotname 0 host=111.222.333.444
admin_username=root admin_password=secret
'''
slots = list_slotnames(host=host, admin_username=admin_username,
admin_password=admin_password)
# The keys for this dictionary are strings, not integers, so convert the
# argument to a string
slot = six.text_type(slot)
return slots[slot]['slotname']
def set_slotname(slot, name, host=None,
admin_username=None, admin_password=None):
'''
Set the name of a slot in a chassis.
slot
The slot number to change.
name
The name to set. Can only be 15 characters long.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_slotname 2 my-slotname host=111.222.333.444
admin_username=root admin_password=secret
'''
return __execute_cmd('config -g cfgServerInfo -o cfgServerName -i {0} {1}'.format(slot, name),
host=host, admin_username=admin_username,
admin_password=admin_password)
def set_chassis_name(name,
host=None,
admin_username=None,
admin_password=None):
'''
Set the name of the chassis.
name
The name to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_name my-chassis host=111.222.333.444
admin_username=root admin_password=secret
'''
return __execute_cmd('setsysinfo -c chassisname {0}'.format(name),
host=host, admin_username=admin_username,
admin_password=admin_password)
def get_chassis_name(host=None, admin_username=None, admin_password=None):
'''
Get the name of a chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.get_chassis_name host=111.222.333.444
admin_username=root admin_password=secret
'''
return bare_rac_cmd('getchassisname', host=host,
admin_username=admin_username,
admin_password=admin_password)
def inventory(host=None, admin_username=None, admin_password=None):
def mapit(x, y):
return {x: y}
fields = {}
fields['server'] = ['name', 'idrac_version', 'blade_type', 'gen',
'updateable']
fields['switch'] = ['name', 'model_name', 'hw_version', 'fw_version']
fields['cmc'] = ['name', 'cmc_version', 'updateable']
fields['chassis'] = ['name', 'fw_version', 'fqdd']
rawinv = __execute_ret('getversion', host=host,
admin_username=admin_username,
admin_password=admin_password)
if rawinv['retcode'] != 0:
return rawinv
in_server = False
in_switch = False
in_cmc = False
in_chassis = False
ret = {}
ret['server'] = {}
ret['switch'] = {}
ret['cmc'] = {}
ret['chassis'] = {}
for l in rawinv['stdout'].splitlines():
if l.startswith('<Server>'):
in_server = True
in_switch = False
in_cmc = False
in_chassis = False
continue
if l.startswith('<Switch>'):
in_server = False
in_switch = True
in_cmc = False
in_chassis = False
continue
if l.startswith('<CMC>'):
in_server = False
in_switch = False
in_cmc = True
in_chassis = False
continue
if l.startswith('<Chassis Infrastructure>'):
in_server = False
in_switch = False
in_cmc = False
in_chassis = True
continue
if len(l) < 1:
continue
line = re.split(' +', l.strip())
if in_server:
ret['server'][line[0]] = dict(
(k, v) for d in map(mapit, fields['server'], line) for (k, v)
in d.items())
if in_switch:
ret['switch'][line[0]] = dict(
(k, v) for d in map(mapit, fields['switch'], line) for (k, v)
in d.items())
if in_cmc:
ret['cmc'][line[0]] = dict(
(k, v) for d in map(mapit, fields['cmc'], line) for (k, v) in
d.items())
if in_chassis:
ret['chassis'][line[0]] = dict(
(k, v) for d in map(mapit, fields['chassis'], line) for k, v in
d.items())
return ret
def set_chassis_location(location,
host=None,
admin_username=None,
admin_password=None):
'''
Set the location of the chassis.
location
The name of the location to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_location location-name host=111.222.333.444
admin_username=root admin_password=secret
'''
return __execute_cmd('setsysinfo -c chassislocation {0}'.format(location),
host=host, admin_username=admin_username,
admin_password=admin_password)
def get_chassis_location(host=None,
admin_username=None,
admin_password=None):
'''
Get the location of the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_location host=111.222.333.444
admin_username=root admin_password=secret
'''
return system_info(host=host,
admin_username=admin_username,
admin_password=admin_password)['Chassis Information']['Chassis Location']
def set_chassis_datacenter(location,
host=None,
admin_username=None,
admin_password=None):
'''
Set the location of the chassis.
location
The name of the datacenter to be set on the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_datacenter datacenter-name host=111.222.333.444
admin_username=root admin_password=secret
'''
return set_general('cfgLocation', 'cfgLocationDatacenter', location,
host=host, admin_username=admin_username,
admin_password=admin_password)
def get_chassis_datacenter(host=None,
admin_username=None,
admin_password=None):
'''
Get the datacenter of the chassis.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_chassis_location host=111.222.333.444
admin_username=root admin_password=secret
'''
return get_general('cfgLocation', 'cfgLocationDatacenter', host=host,
admin_username=admin_username, admin_password=admin_password)
def set_general(cfg_sec, cfg_var, val, host=None,
admin_username=None, admin_password=None):
return __execute_cmd('config -g {0} -o {1} {2}'.format(cfg_sec,
cfg_var, val),
host=host,
admin_username=admin_username,
admin_password=admin_password)
def get_general(cfg_sec, cfg_var, host=None,
admin_username=None, admin_password=None):
ret = __execute_ret('getconfig -g {0} -o {1}'.format(cfg_sec, cfg_var),
host=host,
admin_username=admin_username,
admin_password=admin_password)
if ret['retcode'] == 0:
return ret['stdout']
else:
return ret
def idrac_general(blade_name, command, idrac_password=None,
host=None,
admin_username=None, admin_password=None):
'''
Run a generic racadm command against a particular
blade in a chassis. Blades are usually named things like
'server-1', 'server-2', etc. If the iDRAC has a different
password than the CMC, then you can pass it with the
idrac_password kwarg.
:param blade_name: Name of the blade to run the command on
:param command: Command like to pass to racadm
:param idrac_password: Password for the iDRAC if different from the CMC
:param host: Chassis hostname
:param admin_username: CMC username
:param admin_password: CMC password
:return: stdout if the retcode is 0, otherwise a standard cmd.run_all dictionary
CLI Example:
.. code-block:: bash
salt fx2 chassis.cmd idrac_general server-1 'get BIOS.SysProfileSettings'
'''
module_network = network_info(host, admin_username,
admin_password, blade_name)
if idrac_password is not None:
password = idrac_password
else:
password = admin_password
idrac_ip = module_network['Network']['IP Address']
ret = __execute_ret(command, host=idrac_ip,
admin_username='root',
admin_password=password)
if ret['retcode'] == 0:
return ret['stdout']
else:
return ret
def _update_firmware(cmd,
host=None,
admin_username=None,
admin_password=None):
if not admin_username:
admin_username = __pillar__['proxy']['admin_username']
if not admin_username:
admin_password = __pillar__['proxy']['admin_password']
ret = __execute_ret(cmd,
host=host,
admin_username=admin_username,
admin_password=admin_password)
if ret['retcode'] == 0:
return ret['stdout']
else:
return ret
def bare_rac_cmd(cmd, host=None,
admin_username=None, admin_password=None):
ret = __execute_ret('{0}'.format(cmd),
host=host,
admin_username=admin_username,
admin_password=admin_password)
if ret['retcode'] == 0:
return ret['stdout']
else:
return ret
def update_firmware(filename,
host=None,
admin_username=None,
admin_password=None):
'''
Updates firmware using local firmware file
.. code-block:: bash
salt dell dracr.update_firmware firmware.exe
This executes the following command on your FX2
(using username and password stored in the pillar data)
.. code-block:: bash
racadm update –f firmware.exe -u user –p pass
'''
if os.path.exists(filename):
return _update_firmware('update -f {0}'.format(filename),
host=None,
admin_username=None,
admin_password=None)
else:
raise CommandExecutionError('Unable to find firmware file {0}'
.format(filename))
def update_firmware_nfs_or_cifs(filename, share,
host=None,
admin_username=None,
admin_password=None):
'''
Executes the following for CIFS
(using username and password stored in the pillar data)
.. code-block:: bash
racadm update -f <updatefile> -u user –p pass -l //IP-Address/share
Or for NFS
(using username and password stored in the pillar data)
.. code-block:: bash
racadm update -f <updatefile> -u user –p pass -l IP-address:/share
Salt command for CIFS:
.. code-block:: bash
salt dell dracr.update_firmware_nfs_or_cifs \
firmware.exe //IP-Address/share
Salt command for NFS:
.. code-block:: bash
salt dell dracr.update_firmware_nfs_or_cifs \
firmware.exe IP-address:/share
'''
if os.path.exists(filename):
return _update_firmware('update -f {0} -l {1}'.format(filename, share),
host=None,
admin_username=None,
admin_password=None)
else:
raise CommandExecutionError('Unable to find firmware file {0}'
.format(filename))
# def get_idrac_nic()
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup as bs
import os
import re
import ntpath
class GetEngine(object):
"""
This class contains the methods needed to get the files,
to help make the pdf file.
The class contains the following methods:
get_html() --- Which gets the html file names.
get_pdf() --- Which gets the pdf file names.
get_css() --- Which gets the css file names.
get_images() --- Which gets the image file names.
To create an instance of this object, pass in the name of the directory
that stores all the extracted files from the epub file.
"""
def __init__(self, directory):
self.html_files = []
self.css_files = []
self.image_files = []
self.directory = directory
self.files = []
self.pdf_files = []
def get_html(self):
for file in self.files:
if file.endswith(".xhtml") or file.endswith(".html"):
self.html_files.append(file)
def get_pdf(self):
for file in self.html_files:
self.pdf_files.append("{}.pdf".format(self.html_files.index(file)))
def get_css(self):
for file in self.files:
if file.endswith(".css"):
self.css_files.append(file)
def get_images(self):
for file in self.files:
if file.endswith((".png", ".jpg", ".gif")):
self.image_files.append(file)
def get_all(self):
file = None
directory_paths = []
for root, dirs, files in os.walk(self.directory):
#This traverses the directory passed in as an argument,
#returns the current directory, the sub directories and all the files
directory_paths.append(root)
if file:
continue
for each in files:
if each.endswith(".opf"):
file = os.path.join(root, each)
continue
if not file:
return
xml_content = open(file, "r").read()
xml_tree = bs(xml_content, features = "xml")
file_names = xml_tree.package.manifest.findAll('item')
# Gets the name of all the documents in order
# from the opf file, then saves the file name with its path
# The file path in the opf file can't be relied upon
# Hence, the need to extract file name and get its path
for file in file_names:
file_path_match = re.match(r'.+\.[a-zA-Z]+', file.get('href', ''))
if not file_path_match:
continue
file_name = ntpath.basename(file_path_match.group())
for path in directory_paths:
filepath = path + '/' + file_name
if os.path.exists(filepath):
self.files.append(filepath)
|
nilq/baby-python
|
python
|
# # @package version.py
# @brief Argos version finder
import os
import core # Argos core
# # Attempts to determine the version of this argos by its .VERSION file
def get_version():
return core.get_argos_version()
# Read the .VERSION file
# #join = os.path.join
# #dirname = os.path.dirname
# #abspath = os.path.abspath
# #version_file = join(dirname(abspath(__file__)), '../../.VERSION')
# #try:
# # with open(version_file) as vf:
# # verstr = vf.readline().strip()
# # return verstr
# #except IOError as ex:
# # return 'unknown'
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
import scipy.ndimage
from sklearn.externals import joblib
from tools import *
from ml import *
import argparse
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--mode', '-mode', help="Mode : train or predict", type=str)
parser.add_argument('--a', '-algorithm', help="algorithm/model name", type=str)
parser.add_argument('--i', '-image', help="licence plate to read", type=str)
parser.add_argument('--model', '-model', help="Model file path", type=str)
parser.add_argument('--d', '-dataset', help="dataset folder path", type=str)
args = parser.parse_args()
if args.mode == "train":
# Load Data
data, labels = load_dataset(args.d)
# Train ML models
mlp(data, labels, "mlp.pkl")
knn(data, labels, "knn.pkl")
elif args.mode == "predict":
# Load image
img = cv2.imread(args.i, 1)
# Apply image segmentation and extract digits
digits = histogram_of_pixel_projection(img)
# Load ML model
clf = joblib.load(args.model)
# List of predicted classes
prediction = []
for i in range(len(digits)):
# Get digit
digit = digits[i]
# Make the image squared
squared_digit = square(digit)
# Resize the image
resized_digit = cv2.resize(squared_digit, (20, 20), interpolation=cv2.INTER_AREA)
# Convert to one dim vector
one_vector_digit = np.array(resized_digit).ravel()
# Predict digit class
resultat = clf.predict([one_vector_digit])
# Append to total predictions
prediction.append(resultat[0])
print(prediction)
else:
print(" Error mode argument !")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module declares the different meanings that the Orbit 6 components can take
and their conversions
"""
from numpy import cos, arccos, sin, arcsin, arctan2, sqrt, arctanh, sinh, cosh
import numpy as np
from ..errors import UnknownFormError
from ..utils.node import Node
class Form(Node):
"""Base class for orbital form definition
"""
alt = {
"theta": "θ",
"phi": "φ",
"raan": "Ω",
"Omega": "Ω",
"omega": "ω",
"nu": "ν",
"theta_dot": "θ_dot",
"phi_dot": "φ_dot",
"aol": "u",
"H": "E", # The hyperbolic anomaly is available under the eccentric anomaly
}
def __init__(self, name, param_names):
super().__init__(name)
self.param_names = param_names
def __str__(self): # pragma: no cover
return self.name
def __call__(self, orbit, new_form):
"""Gives the result of the transformation without in-place modifications
Args:
orbit (Orbit):
new_form (str or Form):
Returns:
Coord
"""
if isinstance(new_form, Form):
new_form = new_form.name
coord = orbit.copy()
if new_form != orbit.form.name:
for a, b in self.steps(new_form):
coord = getattr(
self, "_{}_to_{}".format(a.name.lower(), b.name.lower())
)(coord, orbit.frame.center)
return coord
@classmethod
def _cartesian_to_keplerian(cls, coord, center):
"""Conversion from cartesian (position and velocity) to keplerian
The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
"""
r, v = coord[:3], coord[3:]
h = np.cross(r, v) # angular momentum vector
h_norm = np.linalg.norm(h)
r_norm = np.linalg.norm(r)
v_norm = np.linalg.norm(v)
K = v_norm ** 2 / 2 - center.µ / r_norm # specific energy
a = -center.µ / (2 * K) # semi-major axis
e = sqrt(1 - h_norm ** 2 / (a * center.µ)) # eccentricity
p = a * (1 - e ** 2) # semi parameter
i = arccos(h[2] / h_norm) # inclination
Ω = arctan2(h[0], -h[1]) % (2 * np.pi) # right ascension of the ascending node
ω_ν = arctan2(r[2] / sin(i), r[0] * cos(Ω) + r[1] * sin(Ω))
ν = arctan2(sqrt(p / center.µ) * np.dot(v, r), p - r_norm) % (2 * np.pi)
ω = (ω_ν - ν) % (2 * np.pi) # argument of the perigee
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_cartesian(cls, coord, center):
"""Conversion from Keplerian to Cartesian coordinates
"""
a, e, i, Ω, ω, ν = coord
p = a * (1 - e ** 2)
r = p / (1 + e * cos(ν))
h = sqrt(center.µ * p)
x = r * (cos(Ω) * cos(ω + ν) - sin(Ω) * sin(ω + ν) * cos(i))
y = r * (sin(Ω) * cos(ω + ν) + cos(Ω) * sin(ω + ν) * cos(i))
z = r * sin(i) * sin(ω + ν)
vx = x * h * e / (r * p) * sin(ν) - h / r * (
cos(Ω) * sin(ω + ν) + sin(Ω) * cos(ω + ν) * cos(i)
)
vy = y * h * e / (r * p) * sin(ν) - h / r * (
sin(Ω) * sin(ω + ν) - cos(Ω) * cos(ω + ν) * cos(i)
)
vz = z * h * e / (r * p) * sin(ν) + h / r * sin(i) * cos(ω + ν)
return np.array([x, y, z, vx, vy, vz], dtype=float)
@classmethod
def _keplerian_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, ν = coord
if e < 1:
# Elliptic case
cos_E = (e + cos(ν)) / (1 + e * cos(ν))
sin_E = (sin(ν) * sqrt(1 - e ** 2)) / (1 + e * cos(ν))
E = arctan2(sin_E, cos_E) % (2 * np.pi)
else:
# Hyperbolic case, E usually marked as H
cosh_E = (e + cos(ν)) / (1 + e * cos(ν))
sinh_E = (sin(ν) * sqrt(e ** 2 - 1)) / (1 + e * cos(ν))
E = arctanh(sinh_E / cosh_E)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian_mean(cls, coord, center):
"""Conversion from Keplerian Eccentric to Keplerian Mean
"""
a, e, i, Ω, ω, E = coord
if e < 1:
M = E - e * sin(E)
else:
# Hyperbolic case, E usually marked as H
M = e * sinh(E) - E
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, M = coord
E = cls.M2E(e, M)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian(cls, coord, center):
"""Conversion from Mean Keplerian to True Keplerian
"""
a, e, i, Ω, ω, E = coord
if e < 1:
cos_ν = (cos(E) - e) / (1 - e * cos(E))
sin_ν = (sin(E) * sqrt(1 - e ** 2)) / (1 - e * cos(E))
else:
# Hyperbolic case, E usually marked as H
cos_ν = (cosh(E) - e) / (1 - e * cosh(E))
sin_ν = -(sinh(E) * sqrt(e ** 2 - 1)) / (1 - e * cosh(E))
ν = arctan2(sin_ν, cos_ν) % (np.pi * 2)
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def M2E(cls, e, M):
"""Conversion from Mean Anomaly to Eccentric anomaly,
or Hyperbolic anomaly.
from Vallado
"""
tol = 1e-8
if e < 1:
# Ellipse
if -np.pi < M < 0 or M > np.pi:
E = M - e
else:
E = M + e
def next_E(E, e, M):
return E + (M - E + e * sin(E)) / (1 - e * cos(E))
E1 = next_E(E, e, M)
while abs(E1 - E) >= tol:
E = E1
E1 = next_E(E, e, M)
return E1
else:
# Hyperbolic
if e < 1.6:
if -np.pi < M < 0 or M > np.pi:
H = M - e
else:
H = M + e
else:
if e < 3.6 and abs(M) > np.pi:
H = M - np.sign(M) * e
else:
H = M / (e - 1)
def next_H(H, e, M):
return H + (M - e * sinh(H) + H) / (e * cosh(H) - 1)
H1 = next_H(H, e, M)
while abs(H1 - H) >= tol:
H = H1
H1 = next_H(H, e, M)
return H1
@classmethod
def _e_e_sin_e(cls, e, E):
x = (1 - e) * sin(E)
term = float(E)
d = 0
x0 = np.nan
while x != x0:
d += 2
term *= -(E ** 2) / (d * (d + 1))
x0 = x
x = x - term
return x
@classmethod
def _keplerian_circular_to_keplerian(cls, coord, center):
"""Conversion from Keplerian near-circular elements to Mean Keplerian
"""
a, ex, ey, i, Ω, u = coord
e = sqrt(ex ** 2 + ey ** 2)
ω = arctan2(ey / e, ex / e)
ν = u - ω
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_keplerian_circular(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian near-circular elements
"""
a, e, i, Ω, ω, ν = coord
ex = e * cos(ω)
ey = e * sin(ω)
u = (ω + ν) % (np.pi * 2)
return np.array([a, ex, ey, i, Ω, u], dtype=float)
@classmethod
def _tle_to_keplerian_mean(cls, coord, center):
"""Conversion from the TLE standard format to the Mean Keplerian
see :py:class:`Tle` for more information.
"""
i, Ω, e, ω, M, n = coord
a = (center.µ / n ** 2) ** (1 / 3)
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_tle(cls, coord, center):
"""Mean Keplerian to TLE format conversion
"""
a, e, i, Ω, ω, M = coord
n = sqrt(center.µ / a ** 3)
return np.array([i, Ω, e, ω, M, n], dtype=float)
@classmethod
def _cartesian_to_spherical(cls, coord, center):
"""Cartesian to Spherical conversion
.. warning:: The spherical form is equatorial, not zenithal
"""
x, y, z, vx, vy, vz = coord
r = np.linalg.norm(coord[:3])
phi = arcsin(z / r)
theta = arctan2(y, x)
r_dot = (x * vx + y * vy + z * vz) / r
phi_dot = (vz * (x ** 2 + y ** 2) - z * (x * vx + y * vy)) / (
r ** 2 * sqrt(x ** 2 + y ** 2)
)
theta_dot = (x * vy - y * vx) / (x ** 2 + y ** 2)
return np.array([r, theta, phi, r_dot, theta_dot, phi_dot], dtype=float)
@classmethod
def _spherical_to_cartesian(cls, coord, center):
"""Spherical to cartesian conversion
"""
r, theta, phi, r_dot, theta_dot, phi_dot = coord
x = r * cos(phi) * cos(theta)
y = r * cos(phi) * sin(theta)
z = r * sin(phi)
vx = r_dot * x / r - y * theta_dot - z * phi_dot * cos(theta)
vy = r_dot * y / r + x * theta_dot - z * phi_dot * sin(theta)
vz = r_dot * z / r + r * phi_dot * cos(phi)
return np.array([x, y, z, vx, vy, vz], dtype=float)
TLE = Form("tle", ["i", "Ω", "e", "ω", "M", "n"])
"""TLE special form
* i : inclination
* Ω : right-ascension of ascending node
* e : eccentricity
* ω : argument of perigee
* M : mean anomaly
* n : mean motion
see :py:class:`~beyond.orbits.tle.Tle` for details
"""
KEPL_C = Form("keplerian_circular", ["a", "ex", "ey", "i", "Ω", "u"])
"""Special case for near-circular orbits
* a : semi-major axis
* ex : e * cos(ω)
* ey : e * sin(ω)
* i : inclination
* Ω : right-ascension of ascending node
* u : argument of latitude (ω + ν)
"""
KEPL_E = Form("keplerian_eccentric", ["a", "e", "i", "Ω", "ω", "E"])
"""Same as Keplerian, but replaces True anomaly with
`Eccentric anomaly <https://en.wikipedia.org/wiki/Eccentric_anomaly>`__
"""
KEPL_M = Form("keplerian_mean", ["a", "e", "i", "Ω", "ω", "M"])
"""Same as Keplerian, but replaces True anomaly with
`Mean anomaly <https://en.wikipedia.org/wiki/Mean_anomaly>`__
"""
KEPL = Form("keplerian", ["a", "e", "i", "Ω", "ω", "ν"])
"""The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
see `wikipedia <https://en.wikipedia.org/wiki/Orbital_elements>`__ for details
"""
SPHE = Form("spherical", ["r", "θ", "φ", "r_dot", "θ_dot", "φ_dot"])
"""Spherical form
* r : radial distance / altitude
* θ : azimuth / longitude
* φ : elevation / latitude
* r_dot : first derivative of radial distance / altitude
* θ_dot : first derivative of azimuth / longitude
* φ_dot : first derivative of elevation / latitude
"""
CART = Form("cartesian", ["x", "y", "z", "vx", "vy", "vz"])
"""Cartesian form"""
SPHE + CART + KEPL + KEPL_E + KEPL_M + TLE
KEPL + KEPL_C
_cache = {
"tle": TLE,
"keplerian_circular": KEPL_C,
"keplerian_mean": KEPL_M,
"keplerian_eccentric": KEPL_E,
"keplerian": KEPL,
"spherical": SPHE,
"cartesian": CART,
}
def get_form(form): # pragma: no cover
if form.lower() not in _cache:
raise UnknownFormError(form)
return _cache[form.lower()]
|
nilq/baby-python
|
python
|
"""
Tasks related to `oms` project.
Import as:
import oms.oms_lib_tasks as oomlitas
"""
import logging
import os
from invoke import task
import helpers.hdbg as hdbg
import helpers.hgit as hgit
import helpers.lib_tasks as hlibtask
_LOG = logging.getLogger(__name__)
# TODO(gp): This was branched from im/im_lib_tasks.py. We should factor out the
# common part CMTask #496.
def get_db_env_path(stage: str) -> str:
"""
Get path to db env file that contains db connection parameters.
:param stage: development stage, i.e. `local`, `dev` and `prod`
"""
hdbg.dassert_in(stage, "local dev prod".split())
# Get `env` files dir.
env_dir = "oms/devops/env"
# Get the file name depending on the stage.
env_file_name = f"{stage}.oms_db_config.env"
# Get file path.
amp_path = hgit.get_amp_abs_path()
env_file_path = os.path.join(amp_path, env_dir, env_file_name)
hdbg.dassert_file_exists(env_file_path)
return env_file_path
# TODO(gp): This should be used also from the unit tests?
def _get_docker_compose_path() -> str:
"""
Return the absolute path to the docker-compose file for this component.
E.g., `im/devops/compose/docker-compose.yml`.
"""
# Get `amp` path.
amp_path = hgit.get_amp_abs_path()
# Get `docker-compose` file path.
# TODO(gp): Factor out this dir.
docker_compose_dir = "oms/devops/compose"
compose_file_name = "docker-compose.yml"
docker_compose_path = os.path.join(
amp_path, docker_compose_dir, compose_file_name
)
# Get absolute version of a file path.
docker_compose_abs_path = os.path.abspath(docker_compose_path)
# Verify that the file exists.
hdbg.dassert_file_exists(docker_compose_abs_path)
return docker_compose_abs_path
# #############################################################################
def _get_docker_cmd(stage: str, docker_cmd: str) -> str:
"""
Construct the `docker-compose' command to run a script inside this
container Docker component.
E.g, to run the `.../devops/set_schema_im_db.py`:
```
docker-compose \
--file devops/compose/docker-compose.yml \
--env-file devops/env/local.oms_db_config.env \
run --rm oms_postgres \
.../devops/set_schema_im_db.py
```
:param cmd: command to execute inside docker
"""
cmd = ["docker-compose"]
# Add `docker-compose` file path.
docker_compose_file_path = _get_docker_compose_path()
cmd.append(f"--file {docker_compose_file_path}")
# Add `env file` path.
env_file = get_db_env_path(stage)
cmd.append(f"--env-file {env_file}")
# Add `run`.
service_name = "oms_postgres"
cmd.append(f"run --rm {service_name}")
cmd.append(docker_cmd)
# Convert the list to a multiline command.
multiline_docker_cmd = hlibtask._to_multi_line_cmd(cmd)
return multiline_docker_cmd # type: ignore[no-any-return]
@task
def oms_docker_cmd(ctx, stage, cmd): # type: ignore
"""
Execute the command `cmd` inside a container attached to the `im app`.
:param stage: development stage, i.e. `local`, `dev` and `prod`
:param cmd: command to execute
"""
hdbg.dassert_ne(cmd, "")
# Get docker cmd.
docker_cmd = _get_docker_cmd(stage, cmd)
# Execute the command.
hlibtask._run(ctx, docker_cmd, pty=True)
# #############################################################################
def _get_docker_up_cmd(stage: str, detach: bool) -> str:
"""
Construct the command to bring up the `oms` service.
E.g.,
```
docker-compose \
--file devops/compose/docker-compose.yml \
--env-file devops/env/local.oms_db_config.env \
up \
oms_postgres
```
:param stage: development stage, i.e. `local`, `dev` and `prod`
:param detach: run containers in the background
"""
cmd = ["docker-compose"]
# Add `docker-compose` file path.
docker_compose_file_path = _get_docker_compose_path()
cmd.append(f"--file {docker_compose_file_path}")
# Add `env file` path.
env_file = get_db_env_path(stage)
cmd.append(f"--env-file {env_file}")
# Add `down` command.
cmd.append("up")
if detach:
# Enable detached mode.
cmd.append("-d")
service = "oms_postgres"
cmd.append(service)
cmd = hlibtask._to_multi_line_cmd(cmd)
return cmd # type: ignore[no-any-return]
@task
def oms_docker_up(ctx, stage, detach=False): # type: ignore
"""
Start oms container with Postgres inside.
:param ctx: `context` object
:param stage: development stage, i.e. `local`, `dev` and `prod`
:param detach: run containers in the background
"""
# Get docker down command.
docker_clean_up_cmd = _get_docker_up_cmd(stage, detach)
# Execute the command.
hlibtask._run(ctx, docker_clean_up_cmd, pty=True)
# #############################################################################
def _get_docker_down_cmd(stage: str, volumes_remove: bool) -> str:
"""
Construct the command to shut down the `oms` service.
E.g.,
```
docker-compose \
--file devops/compose/docker-compose.yml \
--env-file devops/env/local.oms_db_config.env \
down \
-v
```
:param stage: development stage, i.e. `local`, `dev` and `prod`
:param volumes_remove: whether to remove attached volumes or not
"""
cmd = ["docker-compose"]
# Add `docker-compose` file path.
docker_compose_file_path = _get_docker_compose_path()
cmd.append(f"--file {docker_compose_file_path}")
# Add `env file` path.
env_file = get_db_env_path(stage)
cmd.append(f"--env-file {env_file}")
# Add `down` command.
cmd.append("down")
if volumes_remove:
# Use the '-v' option to remove attached volumes.
_LOG.warning(
"Removing the attached volumes resetting the state of the DB"
)
cmd.append("-v")
cmd = hlibtask._to_multi_line_cmd(cmd)
return cmd # type: ignore[no-any-return]
@task
def oms_docker_down(ctx, stage, volumes_remove=False): # type: ignore
"""
Bring down the `oms` service.
By default volumes are not removed, to also remove volumes do
`invoke im_docker_down -v`.
:param stage: development stage, i.e. `local`, `dev` and `prod`
:param volumes_remove: whether to remove attached volumes or not
:param ctx: `context` object
"""
# Get docker down command.
cmd = _get_docker_down_cmd(stage, volumes_remove)
# Execute the command.
hlibtask._run(ctx, cmd, pty=True)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved
import re
import setuptools
import sys
TORCH_AVAILABLE = True
try:
import torch
from torch.utils import cpp_extension
except ImportError:
TORCH_AVAILABLE = False
print("[WARNING] Unable to import torch, pre-compiling ops will be disabled.")
def get_package_dir():
pkg_dir = {
"yolox.tools": "tools",
"yolox.exp.default": "exps/default",
}
return pkg_dir
def get_install_requirements():
with open("requirements.txt", "r", encoding="utf-8") as f:
reqs = [x.strip() for x in f.read().splitlines()]
reqs = [x for x in reqs if not x.startswith("#")]
return reqs
def get_yolox_version():
with open("yolox/__init__.py", "r") as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(), re.MULTILINE
).group(1)
return version
def get_long_description():
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
return long_description
def get_ext_modules():
ext_module = []
if sys.platform != "win32": # pre-compile ops on linux
assert TORCH_AVAILABLE, "torch is required for pre-compiling ops, please install it first."
# if any other op is added, please also add it here
from yolox.layers import FastCOCOEvalOp
ext_module.append(FastCOCOEvalOp().build_op())
return ext_module
def get_cmd_class():
cmdclass = {}
if TORCH_AVAILABLE:
cmdclass["build_ext"] = cpp_extension.BuildExtension
return cmdclass
setuptools.setup(
name="yolox",
version=get_yolox_version(),
author="megvii basedet team",
url="https://github.com/Megvii-BaseDetection/YOLOX",
package_dir=get_package_dir(),
packages=setuptools.find_packages(exclude=("tests", "tools")) + list(get_package_dir().keys()),
python_requires=">=3.6",
install_requires=get_install_requirements(),
setup_requires=["wheel"], # avoid building error when pip is not updated
long_description=get_long_description(),
long_description_content_type="text/markdown",
include_package_data=True, # include files in MANIFEST.in
ext_modules=get_ext_modules(),
cmdclass=get_cmd_class(),
classifiers=[
"Programming Language :: Python :: 3", "Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License",
],
project_urls={
"Documentation": "https://yolox.readthedocs.io",
"Source": "https://github.com/Megvii-BaseDetection/YOLOX",
"Tracker": "https://github.com/Megvii-BaseDetection/YOLOX/issues",
},
)
|
nilq/baby-python
|
python
|
from tkinter import *
from math import *
class test():
def __init__(self):
self.a=dict(name="",usn="",q1="",q2="",q3="",q4="",t1="",t2="",ass="")
self.resulttable=Tk()
self.resulttable.geometry("1500x1500")
self.resulttable.config()
self.ent=Frame(self.resulttable)
self.ent.grid()
self.res1=Frame(self.resulttable)
self.res1.grid()
self.execute()
self.key=1
self.res2=Frame(self.resulttable)
self.res2.grid()
self.entab()
def execute(self):
ht=2
wt=8
Label(self.res1,text=" Subjects ",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15").grid(row=1,column=1)
Label(self.res1,text=" Quize 1 ",justify=LEFT,relief="solid",bd=2,font="Times 15",width=wt,height=ht).grid(row=1,column=2)
Label(self.res1,text=" Quize 2 ",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15").grid(row=1,column=3)
Label(self.res1,text=" Quize 3 ",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15").grid(row=1,column=4)
Label(self.res1,text=" Quize 4 ",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15").grid(row=1,column=5)
Label(self.res1,text=" Test 1 ",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15").grid(row=1,column=6)
Label(self.res1,text=" Test 2 ",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15").grid(row=1,column=7)
Label(self.res1,text=" Assgt ",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15").grid(row=1,column=8)
Label(self.res1,text="Credits",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15",bg="yellow").grid(row=1,column=9)
Label(self.res1,text="Total",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 15",bg="green",fg="white").grid(row=1,column=10)
print("EXECUTE success")
def alldestroys(self):
self.resulttable.destroy()
self.errorwin.destroy()
def result(self):
q=50/17
wt=9
ht=2
if(self.a['name'].get()==""):
print("Exit this")
self.errorwin=Tk()
self.errorwin.geometry("350x50")
self.errorwin.title("ERROR")
Label(self.errorwin,text="Sorry!\n No data Added. Press OK to exit").pack()
Button(self.errorwin,text=" OK ",bg="red",fg="white",command=self.alldestroys ).pack()
self.errorwin.mainloop()
else:
print(self.key)
Label(self.res2,text=self.a['name'].get(),bg="blue",fg="white",justify=LEFT,width=wt,relief="solid",bd=2,height=ht,font="Times 13").grid(row=self.key,column=1)
Label(self.res2,text=ceil((int(self.a['q1'].get()))/5),width=wt,height=ht,justify=LEFT,relief="solid",bd=2,font="Times 13").grid(row=self.key,column=2)
Label(self.res2,text=ceil((int(self.a['q2'].get()))/5),justify=LEFT,width=wt,height=ht,relief="solid",bd=2,font="Times 13").grid(row=self.key,column=3)
Label(self.res2,text=ceil((int(self.a['q3'].get()))/5),justify=LEFT,width=wt,height=ht,relief="solid",bd=2,font="Times 13").grid(row=self.key,column=4)
Label(self.res2,text=ceil((int(self.a['q4'].get()))/5),justify=LEFT,width=wt,height=ht,relief="solid",bd=2,font="Times 13").grid(row=self.key,column=5)
Label(self.res2,text=ceil((int(self.a['t1'].get()))/q),justify=LEFT,width=wt,height=ht,relief="solid",bd=2,font="Times 13").grid(row=self.key,column=6)
Label(self.res2,text=ceil((int(self.a['t2'].get()))/q),justify=LEFT,relief="solid",bd=2,width=wt,height=ht,font="Times 13").grid(row=self.key,column=7)
Label(self.res2,text=ceil((int(self.a['ass'].get()))),relief="solid",bd=2,justify=LEFT,width=wt,height=ht,font="Times 13").grid(row=self.key,column=8)
t=ceil((int(self.a['ass'].get())))+ceil((int(self.a['t2'].get()))/q)+ceil((int(self.a['t1'].get()))/q)+ceil((int(self.a['q1'].get()))/5)+ceil((int(self.a['q2'].get()))/5)+ceil((int(self.a['q3'].get()))/5)+ceil((int(self.a['q4'].get()))/5)
Label(self.res2,text=self.a['usn'].get(),font="Times 13",justify=LEFT,relief="solid",bd=2,width=wt,height=ht,bg="yellow").grid(row=self.key,column=9)
Label(self.res2,text=t,justify=LEFT,font="Times 13",relief="solid",bd=2,width=wt,height=ht,bg="green",fg="white").grid(row=self.key,column=10)
self.key=self.key+1
print("result success")
print(self.a['name'].get())
def entab(self):
i=1
j=1
self.a['name']=StringVar()
label=Label(self.ent,text="Subject")
entry=Entry(self.ent,textvariable=self.a['name'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['usn']=StringVar()
label=Label(self.ent,text="Credits of Subjects")
entry=Entry(self.ent,textvariable=self.a['usn'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['q1']=StringVar()
label=Label(self.ent,text="Quize 1")
entry=Entry(self.ent,textvariable=self.a['q1'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['q2']=StringVar()
label=Label(self.ent,text="Quize 2")
entry=Entry(self.ent,textvariable=self.a['q2'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['q3']=StringVar()
label=Label(self.ent,text="Quize 3")
entry=Entry(self.ent,textvariable=self.a['q3'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['q4']=StringVar()
label=Label(self.ent,text="Quize 4")
entry=Entry(self.ent,textvariable=self.a['q4'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['t1']=StringVar()
label=Label(self.ent,text="Test 1")
entry=Entry(self.ent,textvariable=self.a['t1'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['t2']=StringVar()
label=Label(self.ent,text="Test 2")
entry=Entry(self.ent,textvariable=self.a['t2'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
self.a['ass']=StringVar()
label=Label(self.ent,text="Assignment")
entry=Entry(self.ent,textvariable=self.a['ass'])
label.grid(row=i,column=j)
entry.grid(row=i,column=j+1)
i=i+1
Label(self.ent,text="").grid()
Label(self.ent,text="").grid()
Label(self.ent,text="").grid()
Button(self.ent,text=" Add ",bg="green",fg="white",command=self.result).grid(row=i+3,column=j+3)
Button(self.ent, text=" Exit ", bg="red", fg="white", command=self.resulttable.destroy).grid(row=i + 3, column=j + 5)
self.resulttable.mainloop()
test()
|
nilq/baby-python
|
python
|
import json
from typing import Any, Dict, List, Optional, Set, Tuple
from google.cloud import ndb
from backend.common.consts.media_type import MediaType
from backend.common.models.media import Media
from backend.common.models.team import Team
from backend.tasks_io.datafeeds.parsers.json.parser_paginated_json import (
ParserPaginatedJSON,
)
class FMSAPITeamAvatarParser(ParserPaginatedJSON[Tuple[List[Media], Set[ndb.Key]]]):
def __init__(self, year: int):
self.year = year
def parse(
self, response: Dict[str, Any]
) -> Tuple[Optional[Tuple[List[Media], Set[ndb.Key]]], bool]:
current_page = response["pageCurrent"]
total_pages = response["pageTotal"]
avatars: List[Media] = []
media_keys_to_delete: Set[ndb.Key] = set()
for teamData in response["teams"]:
team_number = teamData["teamNumber"]
foreign_key = "avatar_{}_frc{}".format(self.year, team_number)
media_key = ndb.Key(
Media, Media.render_key_name(MediaType.AVATAR, foreign_key)
)
encoded_avatar = teamData["encodedAvatar"]
if not encoded_avatar:
media_keys_to_delete.add(media_key)
continue
avatars.append(
Media(
key=media_key,
details_json=json.dumps({"base64Image": encoded_avatar}),
foreign_key=foreign_key,
media_type_enum=MediaType.AVATAR,
references=[ndb.Key(Team, "frc{}".format(team_number))],
year=self.year,
)
)
return (
(avatars, media_keys_to_delete)
if avatars or media_keys_to_delete
else None,
(current_page < total_pages),
)
|
nilq/baby-python
|
python
|
"""\
Pyconstruct provides metrics and losses to be used with most of the structured
output problems out there.
"""
from .losses import *
__all__ = losses.__all__
|
nilq/baby-python
|
python
|
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_euler.python.euler_ops import base
from tf_euler.python.euler_ops import type_ops
_sample_neighbor = base._LIB_OP.sample_neighbor
_get_top_k_neighbor = base._LIB_OP.get_top_k_neighbor
_sample_fanout = base._LIB_OP.sample_fanout
_sample_neighbor_layerwise_with_adj = \
base._LIB_OP.sample_neighbor_layerwise_with_adj
_sample_fanout_with_feature = base._LIB_OP.sample_fanout_with_feature
def sparse_get_adj(nodes, nb_nodes, edge_types, n=-1, m=-1):
edge_types = type_ops.get_edge_type_id(edge_types)
res = base._LIB_OP.sparse_get_adj(nodes, nb_nodes, edge_types, n, m)
return tf.SparseTensor(*res[:3])
def sample_neighbor(nodes, edge_types, count, default_node=-1, condition=''):
edge_types = type_ops.get_edge_type_id(edge_types)
return _sample_neighbor(nodes, edge_types, count, default_node, condition)
def get_top_k_neighbor(nodes, edge_types, k, default_node=-1, condition=''):
edge_types = type_ops.get_edge_type_id(edge_types)
return _get_top_k_neighbor(nodes, edge_types, k, default_node, condition)
def sample_fanout_with_feature(nodes, edge_types, count, default_node,
dense_feature_names, dense_dimensions,
sparse_feature_names, sparse_default_values):
edge_types = type_ops.get_edge_type_id(edge_types)
res = _sample_fanout_with_feature(
tf.reshape(nodes, [-1]), edge_types, count,
default_node=default_node,
sparse_feature_names=sparse_feature_names,
sparse_default_values=sparse_default_values,
dense_feature_names=dense_feature_names,
dense_dimensions=dense_dimensions,
N=len(count),
ND=(len(count) + 1) * len(dense_feature_names),
NS=(len(count) + 1) * len(sparse_feature_names))
neighbors = [tf.reshape(nodes, [-1])]
neighbors.extend([tf.reshape(i, [-1]) for i in res[0]])
weights = res[1]
types = res[2]
dense_features = res[3]
sparse_features = [tf.SparseTensor(*sp) for sp in zip(*res[4:7])]
return neighbors, weights, types, dense_features, sparse_features
def sample_neighbor_layerwise(nodes, edge_types, count,
default_node=-1, weight_func=''):
edge_types = type_ops.get_edge_type_id(edge_types)
res = _sample_neighbor_layerwise_with_adj(nodes, edge_types, count,
weight_func, default_node)
return res[0], tf.SparseTensor(*res[1:4])
def get_full_neighbor(nodes, edge_types, condition=''):
"""
Args:
nodes: A `Tensor` of `int64`.
edge_types: A 1-D `Tensor` of int32. Specify edge types to filter
outgoing edges.
Return:
A tuple of `SparseTensor` (neibors, weights).
neighbors: A `SparseTensor` of `int64`.
weights: A `SparseTensor` of `float`.
types: A `SparseTensor` of `int32`
"""
edge_types = type_ops.get_edge_type_id(edge_types)
sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types, condition)
return tf.SparseTensor(*sp_returns[:3]), \
tf.SparseTensor(*sp_returns[3:6]), \
tf.SparseTensor(*sp_returns[6:])
def get_sorted_full_neighbor(nodes, edge_types, condition=''):
"""
Args:
nodes: A `Tensor` of `int64`.
edge_types: A 1-D `Tensor` of int32. Specify edge types to filter
outgoing edges.
Return:
A tuple of `SparseTensor` (neibors, weights).
neighbors: A `SparseTensor` of `int64`.
weights: A `SparseTensor` of `float`.
types: A `SparseTensor` of `int32`
"""
edge_types = type_ops.get_edge_type_id(edge_types)
sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes,
edge_types,
condition)
return tf.SparseTensor(*sp_returns[:3]),\
tf.SparseTensor(*sp_returns[3:6]),\
tf.SparseTensor(*sp_returns[6:])
def sample_fanout(nodes, edge_types, counts, default_node=-1):
"""
Sample multi-hop neighbors of nodes according to weight in graph.
Args:
nodes: A 1-D `Tensor` of `int64`.
edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter
outgoing edges in each hop.
counts: A list of `int`. Specify the number of sampling for each node in
each hop.
default_node: A `int`. Specify the node id to fill when there is no
neighbor for specific nodes.
Return:
A tuple of list: (samples, weights)
samples: A list of `Tensor` of `int64`, with the same length as
`edge_types` and `counts`, with shapes `[num_nodes]`,
`[num_nodes * count1]`, `[num_nodes * count1 * count2]`, ...
weights: A list of `Tensor` of `float`, with shapes
`[num_nodes * count1]`, `[num_nodes * count1 * count2]` ...
types: A list of `Tensor` of `int32`, with shapes
`[num_nodes * count1]`, `[num_nodes * count1 * count2]` ...
"""
edge_types = [type_ops.get_edge_type_id(edge_type)
for edge_type in edge_types]
neighbors_list = [tf.reshape(nodes, [-1])]
weights_list = []
type_list = []
neighbors, weights, types = _sample_fanout(
neighbors_list[-1],
edge_types, counts,
default_node=default_node,
N=len(counts))
neighbors_list.extend([tf.reshape(n, [-1]) for n in neighbors])
weights_list.extend([tf.reshape(w, [-1]) for w in weights])
type_list.extend([tf.reshape(t, [-1]) for t in types])
return neighbors_list, weights_list, type_list
def sample_fanout_layerwise_each_node(nodes, edge_types, counts,
default_node=-1):
'''
sample fanout layerwise for each node
'''
edge_types = [type_ops.get_edge_type_id(edge_type)
for edge_type in edge_types]
neighbors_list = [tf.reshape(nodes, [-1])]
adj_list = []
for hop_edge_types, count in zip(edge_types, counts):
if (len(neighbors_list) == 1):
neighbors, _, _ = sample_neighbor(neighbors_list[-1],
hop_edge_types,
count,
default_node=default_node)
neighbors_list.append(tf.reshape(neighbors, [-1]))
else:
neighbors, adj = sample_neighbor_layerwise(
tf.reshape(neighbors_list[-1], [-1, last_count]),
hop_edge_types,
count,
default_node=default_node)
neighbors_list.append(tf.reshape(neighbors, [-1]))
adj_list.append(adj)
last_count = count
return neighbors_list, adj_list
def sample_fanout_layerwise(nodes, edge_types, counts,
default_node=-1, weight_func=''):
edge_types = [type_ops.get_edge_type_id(edge_type)
for edge_type in edge_types]
neighbors_list = [tf.reshape(nodes, [-1])]
adj_list = []
last_count = tf.size(nodes)
for hop_edge_types, count in zip(edge_types, counts):
neighbors, adj = sample_neighbor_layerwise(
tf.reshape(neighbors_list[-1], [-1, last_count]),
hop_edge_types,
count,
default_node=default_node,
weight_func=weight_func)
neighbors_list.append(tf.reshape(neighbors, [-1]))
adj_list.append(adj)
last_count = count
return neighbors_list, adj_list
def get_multi_hop_neighbor(nodes, edge_types):
"""
Get multi-hop neighbors with adjacent matrix.
Args:
nodes: A 1-D `tf.Tensor` of `int64`.
edge_types: A list of 1-D `tf.Tensor` of `int32`. Specify edge types to
filter outgoing edges in each hop.
Return:
A tuple of list: (nodes, adjcents)
nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of
hops. Specify node set of each hop, including the root.
adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent
matrix between hops.
"""
edge_types = [type_ops.get_edge_type_id(edge_type)
for edge_type in edge_types]
nodes = tf.reshape(nodes, [-1])
nodes_list = [nodes]
adj_list = []
for hop_edge_types in edge_types:
neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types)
next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64)
next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
next_values = weight.values
next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)])
next_shape = tf.cast(next_shape, tf.int64)
next_adj = tf.SparseTensor(next_indices, next_values, next_shape)
next_adj = tf.sparse_reorder(next_adj)
nodes_list.append(next_nodes)
adj_list.append(next_adj)
nodes = next_nodes
return nodes_list, adj_list
|
nilq/baby-python
|
python
|
from gym_tak.tak.board import Presets, Board
from gym_tak.tak.piece import Colors, Types
from gym_tak.tak.player import Player
class TakGame:
def __init__(self, preset: Presets, player1: str, player2: str) -> None:
super().__init__()
self.preset = preset
self.board = Board(preset)
self.player1 = Player(player1, self, Colors.BLACK)
self.player2 = Player(player2, self, Colors.WHITE)
self.winner = None
self.next_player = self.player1
self.active = True
self.turn = 1
def can_move(self, player: Player, column_from: int, row_from: int, column_to: int, row_to: int, pieces: int) -> bool:
return self.active and player is self.next_player and self.board.can_move(player.hand.color, column_from, row_from,
column_to, row_to, pieces)
def move(self, player: Player, column_from: int, row_from: int, column_to: int, row_to: int, pieces: int) -> None:
print(player.name + " moving from column " + str(column_from) + " row " + str(row_from) + " to column " + str(column_to) + " row " + str(row_to))
assert self.can_move(player, column_from, row_from, column_to, row_to, pieces)
self.board.move(column_from, row_from, column_to, row_to, pieces)
self.next_player = self.get_other_player(self.next_player)
self.turn += 1
def can_place(self, player: Player, column: int, row: int, type_: Types) -> bool:
return self.active and player is self.next_player and player.hand.has(type_) and self.board.rows[row, column, 0] == 0
def place(self, player: Player, column: int, row: int, type_: Types) -> None:
print(player.name + " placing in column " + str(column) + " row " + str(row))
assert self.can_place(player, column, row, type_)
piece = player.hand.take_piece(type_)
self.board.place(piece, column, row)
self.next_player = self.player2
self.turn += 1
def get_player(self, color: Colors) -> Player:
if color is Colors.BLACK:
return self.player1
elif color is Colors.WHITE:
return self.player2
else:
raise ValueError('Unrecognized color %s' % color)
def get_other_player(self, player: Player) -> Player:
if player is self.player1:
return self.player2
elif player is self.player2:
return self.player1
else:
raise ValueError('Player %s is not in this game' % player.name)
def surrender(self, player: Player) -> None:
self.active = False
self.winner = self.get_other_player(player)
def reset(self) -> None:
self.board.reset()
self.player1.reset()
self.player2.reset()
self.winner = None
self.next_player = self.player1
self.active = True
self.turn = 1
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from os import path
__cdir__ = path.dirname(__file__)
__fabfile__ = path.join(__cdir__, 'commands.py')
|
nilq/baby-python
|
python
|
from i3pystatus import Module
class Text(Module):
"""
Display static, colored text.
"""
settings = (
"text",
("color", "HTML color code #RRGGBB"),
)
required = ("text",)
color = None
def init(self):
self.output = {
"full_text": self.text
}
if self.color:
self.output["color"] = self.color
|
nilq/baby-python
|
python
|
import sqlalchemy as sa
from aiopg.sa import create_engine
from datetime import datetime
from sqlalchemy.dialects.postgresql import UUID
async def init_pg(app):
settings = app['settings']['db']
engine = await create_engine(
**settings
)
app['db'] = engine
# async with app['db'].acquire() as conn:
# await conn.execute(sa.schema.CreateTable(users_tbl))
# await conn.execute(sa.schema.CreateTable(oauth_providers_tbl))
# await conn.execute(sa.schema.DropTable(messages_tbl))
# await conn.execute(sa.schema.CreateTable(messages_tbl))
async def close_pg(app):
app['db'].close()
await app['db'].wait_closed()
async def create_user(app, values):
async with app['db'].acquire() as conn:
result = await conn.execute(
users_tbl.insert()
.values(**values)
)
async def update_user(app, old_tocken, values):
async with app['db'].acquire() as conn:
result = await conn.execute(
users_tbl.update()
.where(users_tbl.c.oauth_token == old_tocken)
.returning(*users_tbl.c)
.values(**values)
)
return (await result.fetchone())
async def complete_auth(app, token):
async with app['db'].acquire() as conn:
result = await conn.execute(
users_tbl.delete()
.where(users_tbl.c.oauth_token == token)
)
async def get_user(app, username):
async with app['db'].acquire() as conn:
result = await conn.execute(
users_tbl.select()
.where(users_tbl.c.username == username)
)
return (await result.fetchone())
async def get_user_by_token(app, token):
async with app['db'].acquire() as conn:
result = await conn.execute(
users_tbl.select()
.where(users_tbl.c.oauth_token == token)
)
return (await result.fetchone())
async def create_oauth_provider(app, values):
async with app['db'].acquire() as conn:
result = await conn.execute(
oauth_providers_tbl.insert()
.values(**values)
)
async def get_oauth_provider(app, name):
async with app['db'].acquire() as conn:
result = await conn.execute(
oauth_providers_tbl.select()
.where(oauth_providers_tbl.c.name == name)
)
return (await result.fetchone())
async def create_message(app, values):
async with app['db'].acquire() as conn:
result = await conn.execute(
messages_tbl.insert()
.values(**values)
)
async def get_message(app, uuid, user):
async with app['db'].acquire() as conn:
result = await conn.execute(
messages_tbl.select()
.where(messages_tbl.c.uuid == uuid)
.where(messages_tbl.c.user == user)
)
return (await result.fetchone())
async def list_messages(app, user):
async with app['db'].acquire() as conn:
result = await conn.execute(
messages_tbl.select()
.where(messages_tbl.c.user == user)
)
return (await result.fetchall())
meta = sa.MetaData()
oauth_providers_tbl = sa.Table(
'oauth_providers_tbl', meta,
sa.Column('uuid', UUID, nullable=False, primary_key=True),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('app_key', sa.String(100), nullable=False),
sa.Column('app_secret', sa.String(100), nullable=False),
sa.UniqueConstraint('name')
)
users_tbl = sa.Table(
'users_tbl', meta,
sa.Column('uuid', UUID, nullable=False, primary_key=True),
sa.Column('username', sa.String(50)),
sa.Column('oauth_token', sa.String(100), nullable=False),
sa.Column('oauth_token_secret', sa.String(100), nullable=False),
sa.Column('fullname', sa.String(200)),
sa.Column(
'created', sa.TIMESTAMP, server_default=sa.func.now(), nullable=False
),
sa.Column(
'edited', sa.TIMESTAMP, server_default=sa.func.now(),
onupdate=sa.func.now(), nullable=False
)
)
messages_tbl = sa.Table(
'messages_tbl', meta,
sa.Column('uuid', UUID, nullable=False, primary_key=True),
sa.Column('user', UUID, sa.ForeignKey('users_tbl.uuid'), nullable=False),
sa.Column('private_key', sa.String(8196), nullable=False),
sa.Column('ciphertext', sa.String, nullable=False),
sa.Column('expires', sa.TIMESTAMP, nullable=False)
)
|
nilq/baby-python
|
python
|
"""Return the euclidean distance beetween the given dictionaries."""
from .minkowsky import minkowsky
from typing import Dict
def euclidean(a: Dict, b: Dict)->float:
"""Return the euclidean distance beetween the given dictionaries.
Parameters
----------------------------
a: Dict,
First dictionary to consider.
b: Dict,
Second dictionary to consider.
Returns
----------------------------
Return the euclidean distance beetween the given dictionaries.
"""
return minkowsky(a, b, 2)
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.13 on 2021-08-19 10:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0123_reportcolumn_preview_only'),
]
operations = [
migrations.CreateModel(
name='ReportFilter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('identifier', models.CharField(max_length=255, verbose_name='identifier')),
('type', models.CharField(choices=[('exact', 'exact value'), ('multiple', 'multiple choice'), ('range', 'value range'), ('set', 'value is set'), ('not_set', 'value is not set')], max_length=8, verbose_name='filter type')),
('attributes_as_choices', models.BooleanField(default=False, verbose_name='use attributes as choices')),
('attributes', models.ManyToManyField(to='projects.Attribute', verbose_name='target attributes')),
('reports', models.ManyToManyField(related_name='filters', to='projects.Report', verbose_name='usable with reports')),
],
),
migrations.CreateModel(
name='ReportFilterAttributeChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('identifier', models.CharField(max_length=255, verbose_name='identifier')),
('value', models.CharField(max_length=255, verbose_name='search value, values or value range')),
('attribute', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Attribute', verbose_name='attribute')),
('report_filter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attribute_choices', to='projects.ReportFilter', verbose_name='filter')),
],
),
]
|
nilq/baby-python
|
python
|
import torch
from torch import nn
from torch.nn import functional as F
import math
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.5):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))
self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))
self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))
self.reset_parameters()
self.reset_noise()
def forward(self, x):
if self.training:
weight = self.weight_mu + self.weight_sigma.mul(self.weight_epsilon.to(self.weight_sigma.device))
bias = self.bias_mu + self.bias_sigma.mul(self.bias_epsilon.to(self.bias_sigma.device))
else:
weight = self.weight_mu
bias = self.bias_mu
return F.linear(x, weight, bias)
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.weight_mu.size(1))
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.weight_sigma.size(1)))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.bias_sigma.size(0)))
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon = epsilon_out.ger(epsilon_in)
self.bias_epsilon = self._scale_noise(self.out_features)
def _scale_noise(self, size):
x = torch.randn(size)
x = x.sign().mul(x.abs().sqrt())
return x
|
nilq/baby-python
|
python
|
from django import template
import mistune
register = template.Library()
@register.filter
def markdownify(text):
# safe_mode governs how the function handles raw HTML
renderer = mistune.Renderer(escape=True, hard_wrap=True)
markdown = mistune.Markdown(renderer=renderer)
return markdown(text)
|
nilq/baby-python
|
python
|
import sys
import os
from dotenv import load_dotenv
# see by https://github.com/mytliulei/boundless/blob/master/python/%E6%89%93%E5%8C%85exe/pyinstaller.md
if getattr(sys, 'frozen', False):
BASE_DIR = os.path.dirname(sys.executable)
else:
# 文件所在目录
#BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# 文件所在目录上级
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# 运行环境所在目录
#BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ENV支持中文 编码为GBK
load_dotenv(os.path.join('.', '.env'), encoding='utf-8')
class Config(object):
ENV = os.environ.get('LOGURU_LEVEL') or 'PRODUCTION'
LOGURU_LEVEL = os.environ.get('LOGURU_LEVEL') or 'INFO'
LOGURU_LOGFILE = os.environ.get('LOGURU_LOGFILE') or 'logfile.log'
#AUTO_SEND = os.environ.get('AUTO_SEND', 'false').lower() in ['true', 'on', '1']
AUTO_SEND = os.environ.get('AUTO_SEND', 'true').lower() not in ['false', 'off', '0']
|
nilq/baby-python
|
python
|
import fbuild.config.c as c
# ------------------------------------------------------------------------------
class extensions(c.Test):
builtin_expect = c.function_test('long', 'long', 'long',
name='__builtin_expect',
test='int main() { if(__builtin_expect(1,1)); return 0; }')
@c.cacheproperty
def named_registers_x86(self):
return self.builder.check_run('''
#include <stdio.h>
register void *sp __asm__ ("esp");
int main() {
printf("Sp = %p\\n", sp);
return 0;
}
''', 'checking for named x86 registers')
@c.cacheproperty
def named_registers_x86_64(self):
return self.builder.check_run('''
#include <stdio.h>
register void *sp __asm__ ("rsp");
int main() {
printf("Sp = %p\\n", sp);
return 0;
}
''', 'checking for named x86_64 registers')
@c.cacheproperty
def computed_gotos(self):
return self.builder.check_run('''
int main(int argc, char** argv) {
void *label = &&label2;
goto *label;
label1:
return 1;
label2:
return 0;
}
''', 'checking for computed gotos')
@c.cacheproperty
def asm_labels(self):
return self.builder.check_run('''
int main(int argc, char** argv) {
void *label = &&label2;
__asm__(".global fred");
__asm__("fred:");
__asm__(""::"g"(&&label1));
goto *label;
label1:
return 1;
label2:
return 0;
}
''', 'checking for asm labels')
class getopt_h(c.Test):
header = c.header_test('getopt.h')
getopt = c.function_test('int', 'int', 'char**', 'char*', test='''
#include <getopt.h>
int main(int argc, char** argv) {
int ch, ret = 0;
while ((ch = getopt(argc, argv, "f")) != -1) {
switch (ch) {
case 'f':
break;
default:
ret = 1;
}
}
return ret;
}
''')
getopt_long = c.function_test('int', 'int', 'char**', 'char*', 'struct option*', 'int',
test='''
#include <getopt.h>
static struct option longopts[] = {
{ "foo", no_argument, NULL, 'f' }
};
int main(int argc, char** argv) {
int ch, ret = 0;
while ((ch = getopt_long(argc, argv, "b", longopts, NULL)) != -1) {
switch (ch) {
case 'b':
case 'f':
break;
default:
ret = 1;
}
}
return ret;
}
''')
getopt_long_only = c.function_test('int', 'int', 'char**', 'char*', 'struct option*', 'int',
test='''
#include <getopt.h>
static struct option longopts[] = {
{ "foo", no_argument, NULL, 'f' }
};
int main(int argc, char** argv) {
int ch, ret = 0;
while ((ch = getopt_long_only(argc, argv, "b", longopts, NULL)) != -1) {
switch (ch) {
case 'b':
case 'f':
break;
default:
ret = 1;
}
}
return ret;
}
''')
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from .models import Brew
class BrewSerializer(serializers.ModelSerializer):
class Meta:
model = Brew
fields = ("started_brewing", "outages")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from flask import Flask, make_response, request, render_template
app = Flask(__name__)
# entry point for our users
# renders a template that asks for their name
# index.html points to /setcookie
@app.route("/index")
@app.route("/")
def index():
return render_template("index.html")
# set the cookie and send it back to the user
@app.route("/setcookie", methods = ["POST", "GET"])
def setcookie():
if request.method == "POST":
user = request.form["nm"]
# Note that cookies are set on response objects.
# Since you normally just return strings
# Flask will convert them into response objects for you
resp = make_response(render_template("readcookie.html"))
# add a cookie to our response object
#cookievar #value
resp.set_cookie("userID", user)
# return our response object includes our cookie
return resp
# check users cookie for their name
@app.route("/getcookie")
def getcookie():
name = request.cookies.get("userID")
return '<h1>welcome '+name+'</h1>'
if __name__ == "__main__":
app.run(port=5006)
|
nilq/baby-python
|
python
|
from __future__ import annotations
import functools
import os
import traceback
from enum import Enum
from typing import Callable
from typing import TypeVar
from CCAgT_utils.constants import FILENAME_SEP
from CCAgT_utils.constants import STRUCTURE
R = TypeVar('R')
def basename(filename: str, with_extension: bool = False) -> str:
"""From a full filename get the basename with or not with the
extension.
Parameters
----------
filename : str
A full filename
with_extension : bool, optional
Flag to return the basename with extension, if True return
the basename with the file extension, else will return just the
basename, by default False
Returns
-------
str
The basename of the <filename> with or not the file extension
"""
bn = os.path.basename(filename)
if with_extension:
return bn
else:
return os.path.splitext(bn)[0]
def get_traceback(f: Callable[..., R]) -> Callable[..., R]:
"""Decorator for print an error that occurs inside of some process
Parameters
----------
f : Callable
The function that will be decorated, need to be a function called
by a worker.
Returns
-------
Callable
The return of the function if all runs fine
Raises
------
e
Will capture the exception from the process using the `traceback`
print.
"""
@functools.wraps(f)
def wrapper(*args: object, **kwargs: object) -> R:
try:
return f(*args, **kwargs)
except Exception as e:
print('Caught exception in worker thread:')
traceback.print_exc()
raise e
return wrapper
class FILENAME_ITEM(Enum):
slide = 0
tile_id = 1,
x_position_raw = 2,
y_position_raw = 3
def items_from_filename(filename: str) -> list[str]:
"""From a full filename get the itens/infos at the basename
Parameters
----------
filename : str
A full filename to an image or mask of CCAgT dataset
Returns
-------
list
A list with the 4 information that have at the basename
"""
bn = basename(filename)
items = bn.split(FILENAME_SEP)
return items
def slide_from_filename(filename: str) -> str:
"""Based on a filename get the slide ID information
Parameters
----------
filename : str
A full filename to an image or mask of CCAgT dataset
Returns
-------
str
The slide ID of the filename
"""
return items_from_filename(filename)[FILENAME_ITEM.slide.value]
def find_files(
dir_path: str,
extension: str | tuple[str, ...],
look_recursive: bool = False,
selection: set[str] = set(),
) -> dict[str, str]:
"""Find all files into at the path and subdirectories
Parameters
----------
dir_path : str
Path of the base directory to look
extension : str | tuple[str]
Extension of the dessired files
Returns
-------
dict[str, str]
A dict with the filename as key and the relative path for the
file
"""
if look_recursive:
files = {
file: os.path.join(path, file) for path, _, files in os.walk(dir_path) for file in files
if file.endswith(extension) and (len(selection) == 0 or file in selection)
}
else:
files = {
file: os.path.join(dir_path, file) for file in os.listdir(dir_path)
if file.endswith(extension) and (len(selection) == 0 or file in selection)
}
return files
def create_structure(dir_path: str, slides: set[str]) -> None:
dir_images = os.path.join(dir_path, STRUCTURE['i'])
dir_masks = os.path.join(dir_path, STRUCTURE['m'])
for slide in slides:
os.makedirs(os.path.join(dir_images, slide), exist_ok=True)
os.makedirs(os.path.join(dir_masks, slide), exist_ok=True)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
European Biotechnology pipelines
Scrapy pipelines docs: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
'''
import datetime
import re
import scrapy
from event.items import EventItem, ResponseItem
from common.util import xpath_class, lot2dol, flatten, lmap
class EuropeanBiotechnologyEventPipeline(object):
def process_item(self, item: ResponseItem, spider):
def parse_date(datestring):
if datestring is None:
return None
ds = datestring.replace('-', '').strip()
return datetime.datetime.strptime(ds, '%d.%m.%Y')
def parse_description(desc):
# Following regexes catches all info even if multiple contact info is given, like the following
# 'Phone: +49-89-949-203-81, Fax: +49-89-949-203-89, eMail: info@analytica.de'
contact_infos = re.findall(
r'(?:eMail|Phone|Fax):\s*.*?(?=,|\n|$)', desc)
# 'Info: Green Power Conferences, Robert Wilson'
contact_names = re.findall(
r'(?<=Info:\s).*?(?=\n|$|eMail|Phone|Fax)', desc)
if len(contact_names) > 0:
# Get the part that preceeds the contact info
description = desc.split(contact_names[0])[0]
else:
description = desc
contact_details = lmap(parse_contact_info, contact_infos)
contact_details.extend(
flatten(
lmap(parse_contact_names, contact_names)
)
)
contacts = lot2dol(contact_details)
return description, contacts
def parse_contact_info(info):
contact_type, contact_detail = [
re.sub(r'\s*', '', s.lower())
for s in info.split(':')
]
return contact_type, contact_detail
def parse_contact_names(info):
contact_names = [s.strip() for s in info.split(',')]
try:
organizer = contact_names.pop(0)
except IndexError:
organizer = ''
return [
('organizer', organizer),
*[('name', n) for n in contact_names]
]
def parse_location(loc):
# if there are parentheses, they hold the code of the country
# 'Basel (CH)'
if '(' in loc:
city, country = map(
str.strip,
filter(None, re.split(r'\((?:.*?)\)', loc))
)
else:
city = loc
country = None
return city, country
res = scrapy.Selector(text=item['body'])
name = res.xpath(
f"//div[{xpath_class(['ce-inner-headline'])}]//span/text()").get()
desc = res.xpath(
f"normalize-space(string(//div[{xpath_class(['ce-inner-text'])}]/p))").get()
start = res.xpath(
f"//span[{xpath_class(['event-date'])} and position()=1]/text()").get()
end = res.xpath(
f"//span[{xpath_class(['event-date'])} and position()=2]/text()").get()
event_url = res.xpath(
f"//div[{xpath_class(['ce-inner-url'])}]/a/@href").get()
city = res.xpath(
f"//span[{xpath_class(['event-location'])}]/text()").get('')
description, contacts = parse_description(desc)
emails = ' '.join(contacts.get('email', []))
phones = ' '.join(contacts.get('phone', []))
names = ' '.join(contacts.get('name', []))
organizer = ' '.join(contacts.get('organizer', []))
city, country = parse_location(city)
event = EventItem()
event['name'] = name
event['event_url'] = event_url
event['description'] = description
# event['focus'] = scrapy.Field()
# event['event_type'] = scrapy.Field()
event['start'] = parse_date(start)
event['end'] = parse_date(end)
# event['length_in_days'] = scrapy.Field()
event['country'] = country
# event['state'] = scrapy.Field()
event['city'] = city
# event['venue'] = scrapy.Field()
# event['price'] = scrapy.Field()
# event['currency'] = scrapy.Field()
# event['stand'] = scrapy.Field()
# event['abstract'] = scrapy.Field()
# event['talk'] = scrapy.Field()
# event['ticket_deadline'] = scrapy.Field()
# event['stand_deadline'] = scrapy.Field()
# event['talk_deadline'] = scrapy.Field()
event['contact_name'] = names
event['contact_email'] = emails
event['contact_phone'] = phones
event['organizer'] = organizer
# event['organizer_url'] = scrapy.Field()
# event['newsletter'] = scrapy.Field()
# event['twitter'] = scrapy.Field()
# event['facebook'] = scrapy.Field()
# event['linkedin'] = scrapy.Field()
# event['instagram'] = scrapy.Field()
# event['hashtags'] = scrapy.Field()
# event['relevant_to_bio'] = scrapy.Field()
# event['relevant_to_ind_bio'] = scrapy.Field()
# event['ignore'] = scrapy.Field()
# event['notes'] = scrapy.Field()
# event['source'] = scrapy.Field()
# event['id'] = scrapy.Field()
return event
|
nilq/baby-python
|
python
|
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.GemRequirement import GemRequirement
@linter(executable='sqlint', use_stdin=True, output_format='regex',
output_regex=r'.+:(?P<line>\d+):(?P<column>\d+):'
r'(?P<severity>ERROR|WARNING) (?P<message>(?:\s*.+)*)')
class SQLintBear:
"""
Check the given SQL files for syntax errors or warnings.
This bear supports ANSI syntax. Check out
<https://github.com/purcell/sqlint> for more detailed information.
"""
LANGUAGES = {'SQL'}
REQUIREMENTS = {GemRequirement('sqlint')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax'}
@staticmethod
def create_arguments(filename, file, config_file):
return ()
|
nilq/baby-python
|
python
|
# Test
# acc_des = 'This is a test account.2'
# acc_username = '2'
# acc_password = '2'
# secret_msg = 'Hello :)'
# enc_acc_dess = enc.encrypt_data(
# 'b2001bccdcb7ea5556526cb70e58206996c3039282dd62e2ddc4a1d55be6c1d6',
# data=acc_des)
# enc_username = enc.encrypt_data(
# 'b2001bccdcb7ea5556526cb70e58206996c3039282dd62e2ddc4a1d55be6c1d6',
# data=acc_username)
# enc_acc_password = enc.encrypt_data(
# 'b2001bccdcb7ea5556526cb70e58206996c3039282dd62e2ddc4a1d55be6c1d6',
# data=acc_password)
# # Test putting encrypted data to the database
# try:
# db.insert(
# password_vault_tab,
# {'uid': '123123', 'acc_description': enc_acc_dess,
# 'acc_username': enc_username, 'acc_password': enc_acc_password})
# except psycopg2.Error as e:
# print(e, end='')
# VERY DANGEROUS, DELETE EVERYTHING WITH THE SAME UID
# db.delete_row(password_vault_tab, condition='uid=\'{}\''.format('123123'))
# print('{}\n{}\n{}'.format(secret_msg, encrypted_msg, decrypted_msg))
# salt = enc.generate_pin_salt()
|
nilq/baby-python
|
python
|
# r"""
# For training model.
# Consist of some Trainers.
# """
#
# import argparse
# import torch.nn as nn
#
# from pathlib import Path
# from torch.optim import SGD
# from torch.cuda.amp import GradScaler
# from torch.optim.lr_scheduler import StepLR
# from torchvision.transforms import transforms
# from torchvision.datasets import MNIST
#
# from utils.log import add_log_file
# from metaclass.metatrainer import MetaTrainClassify
# from utils.general import timer, load_all_yaml, save_all_yaml, init_seed, select_one_device
#
# from val_classify import ValClassify
#
# from mine.SmartNet.smartnet import SmartNet
#
# r"""Set Global Constant for file save and load"""
# ROOT = Path.cwd() # **/visual-framework root directory
#
#
# class TrainClassify(MetaTrainClassify):
# def __init__(self, args):
# super(TrainClassify, self).__init__(args)
#
# # Get path_dict
# self.path_dict = self.set_save_path(('hyp', 'hyp.yaml'),
# ('args', 'args.yaml'),
# ('logger', 'logger.log'),
# ('writer', 'tensorboard'),
# ('last', 'weights/last.pt'),
# ('best', 'weights/best.pt'),
# ('datasets', 'datasets.yaml'))
#
# # Add FileHandler for logger
# add_log_file(self.path_dict['logger'])
#
# # Set tensorboard writer
# self.writer = self.set_tensorboard_writer(self.path_dict['writer'])
#
# # Set one device
# self.device = select_one_device(self.device) # requires model, images, labels .to(self.device)
# self.cuda = (self.device != 'cpu')
#
# # Load hyp yaml
# self.hyp = load_all_yaml(self.hyp)
#
# # Initialize or auto seed manual and save in self.hyp
# self.hyp['seed'] = init_seed(self.hyp['seed'])
#
# # Get datasets path dict
# self.datasets = load_all_yaml(self.datasets)
#
# # Save yaml dict
# save_all_yaml((vars(args), self.path_dict['args']),
# (self.hyp, self.path_dict['hyp']),
# (self.datasets, self.path_dict['datasets']))
# args = self.release()
#
# # Load checkpoint
# self.checkpoint = self.load_checkpoint(self.weights)
#
# # Initialize or load model
# self.model = self.load_model(SmartNet(self.inc, self.datasets['nc'], self.image_size, self.channels,
# invalid=0.01, num_add=5, add_cut_percentage=0.9,
# act='relu', device=self.device), load=self._load_model)
#
# # Unfreeze model
# self.unfreeze_model()
#
# # Freeze layers of model
# self.freeze_layers(self.freeze_names)
#
# # Set parameter groups list to for the optimizer
# self.param_groups = self.set_param_groups((('weight', nn.Parameter, {'weight_decay': self.hyp['weight_decay']}),
# ))
#
# # Initialize and load optimizer
# self.optimizer = self.load_optimizer(SGD(self.param_groups,
# lr=self.hyp['lr0'], momentum=self.hyp['momentum'], nesterov=True),
# load=self._load_optimizer)
# self.param_groups = self.release()
#
# # Initialize and load lr_scheduler
# self.lr_scheduler = self.load_lr_scheduler(StepLR(self.optimizer, 20), load=self._load_lr_scheduler)
#
# # Initialize and load GradScaler
# self.scaler = self.load_gradscaler(GradScaler(enabled=self.cuda), load=self._load_gradscaler)
#
# # Initialize or load start_epoch
# self.start_epoch = self.load_start_epoch(load=self._load_start_epoch)
#
# # Initialize or load best_fitness
# self.best_fitness = self.load_best_fitness(load=self._load_best_fitness)
#
# # Empty self.checkpoint when load finished
# self.checkpoint = self.release()
#
# # Get dataloader for training testing
# transform = transforms.Compose([transforms.ToTensor()])
#
# self.train_dataloader = self.set_dataloader(MNIST(self.datasets['path'], self.datasets['train'], transform),
# shuffle=self.shuffle)
#
# if self.datasets['test'] is not None:
# self.val_dataloader = self.set_dataloader(MNIST(self.datasets['path'], self.datasets['val'], transform))
# self.test_dataloader = self.set_dataloader(MNIST(self.datasets['path'], self.datasets['test'], transform))
# else:
# self.val_dataloader = self.set_dataloader(MNIST(self.datasets['path'], self.datasets['val'], transform))
# self.test_dataloader = None
#
# # Get loss function
# self.loss_fn = nn.CrossEntropyLoss()
#
# # Set val class
# self.val_class = ValClassify
#
#
# def parse_args_classify(known: bool = False):
# parser = argparse.ArgumentParser()
# parser.add_argument('--tensorboard', type=bool, default=True, help='')
# parser.add_argument('--visual_image', type=bool, default=False,
# help='whether make images visual in tensorboard')
# parser.add_argument('--visual_graph', type=bool, default=False,
# help='whether make model graph visual in tensorboard')
# parser.add_argument('--weights', type=str, default='', help='')
# parser.add_argument('--freeze_names', type=list, default=[],
# help='name of freezing layers in model')
# parser.add_argument('--device', type=str, default='0', help='cpu or cuda:0 or 0')
# parser.add_argument('--epochs', type=int, default=100, help='epochs for training')
# parser.add_argument('--batch_size', type=int, default=64, help='')
# parser.add_argument('--workers', type=int, default=0, help='')
# parser.add_argument('--shuffle', type=bool, default=True, help='')
# parser.add_argument('--pin_memory', type=bool, default=False, help='')
# parser.add_argument('--datasets', type=str, default=str(ROOT / 'mine/data/datasets/classification/MNIST.yaml'),
# help='')
# parser.add_argument('--save_name', type=str, default='exp', help='')
# parser.add_argument('--save_path', type=str, default=str(ROOT / 'runs/train/classify'), help='')
# parser.add_argument('--hyp', type=str, default=str(ROOT / 'data/hyp/hyp_classify_train.yaml'), help='')
#
# parser.add_argument('--inc', type=int, default=1, help='')
# parser.add_argument('--image_size', type=int, default=28, help='')
# parser.add_argument('--channels', type=list, default=[512, 256, 128, 64], help='')
# parser.add_argument('--load_model', type=str, default=None, help='')
# parser.add_argument('--load_optimizer', type=bool, default=False, help='')
# parser.add_argument('--load_lr_scheduler', type=bool, default=False, help='')
# parser.add_argument('--load_gradscaler', type=bool, default=False, help='')
# parser.add_argument('--load_start_epoch', type=str, default=None, help='')
# parser.add_argument('--load_best_fitness', type=bool, default=False, help='')
# namespace = parser.parse_known_args()[0] if known else parser.parse_args()
# return namespace
#
#
# @timer
# def train_classify():
# arguments = parse_args_classify()
# trainer = TrainClassify(arguments)
# trainer.train()
#
#
# if __name__ == '__main__':
# train_classify()
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
from keystone import auth
from keystone.common import dependency
from keystone.common.kvs import core as kvs_core
from keystone.server import common
class BackendLoader(fixtures.Fixture):
"""Initialize each manager and assigns them to an attribute."""
def __init__(self, testcase):
super(BackendLoader, self).__init__()
self._testcase = testcase
def setUp(self):
super(BackendLoader, self).setUp()
# TODO(blk-u): Shouldn't need to clear the registry here, but some
# tests call load_backends multiple times. These should be fixed to
# only call load_backends once.
dependency.reset()
# TODO(morganfainberg): Shouldn't need to clear the registry here, but
# some tests call load_backends multiple times. Since it is not
# possible to re-configure a backend, we need to clear the list. This
# should eventually be removed once testing has been cleaned up.
kvs_core.KEY_VALUE_STORE_REGISTRY.clear()
self.clear_auth_plugin_registry()
drivers, _unused = common.setup_backends()
for manager_name, manager in drivers.items():
setattr(self._testcase, manager_name, manager)
self.addCleanup(self._testcase.cleanup_instance(*list(drivers.keys())))
del self._testcase # break circular reference
def clear_auth_plugin_registry(self):
auth.core.AUTH_METHODS.clear()
auth.core.AUTH_PLUGINS_LOADED = False
|
nilq/baby-python
|
python
|
""" Utility functions """
import os
from collections import namedtuple
def process_args(args, mode):
"""
save arguments into a name tuple as all scripts have the same arguments template
:param args: argument list as passed from the command line
:type args: list
"""
if len(args) > 4:
raise ValueError("Wrong number of arguments")
# if an output filename is given, we want it
if len(args) == 4:
output_file = os.path.basename(args[3])
else:
output_file = os.path.basename(args[2]) + "." + mode
# now save remaining args
xml_mode = "xml." + args[1]
data_file = args[2]
Args = namedtuple('Args', 'xml_mode input_file output_file')
return Args(xml_mode, data_file, output_file)
def read_records(file):
"""
:param file: file to read containing record
:return: dict with record names as keys
"""
recs = {}
for line in open(file):
line = line.split(':')
# extract record name
rec = line[0].strip()
# build list of fields
recs[rec] = []
fields = [f.strip() for f in line[1].split(',')]
recs[rec].append(fields)
return recs
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
class Solution:
def reOrderArray(self, array):
# write code here
i = 0
length = len(array)
while(i<length):
while(i<length and array[i]%2!=0): # 找到偶数
i += 1
j = i + 1
while(j < length and array[j]%2==0 ): # 找到奇数
j += 1
if j < length:
tmp = array[j]
while(j>i): # i到j-1 元素后移一位
array[j] = array[j-1]
j -= 1
array[i] = tmp
i += 1
return array
if __name__ == "__main__":
array = [1, 2, 3, 4, 5]
solution = Solution()
result = solution.reOrderArray(array)
print(result)
|
nilq/baby-python
|
python
|
class GSP:
def __init__(self):
self.start = []
self.goal = []
self.stack = []
self.actions = ['Stack','UnStack','Pick','Put']
self.predicate = ['On','OnTable']
self.prereq = ['Clear','Holding','ArmEmpty']
def accept(self):
self.start = input("Enter Start state : ").split("^")
self.goal = input("Enter Goal state : ").split("^")
def contains(self,l1,l2,x):
if x in l2:
return True
else:
return False
def break_compound(self,l1):
for i in l1:
self.stack.append(i)
def process(self):
self.accept()
self.stack.append(goal)
while len(self.stack) != 0:
#Break compound clause onto stack
if len(self.stack[-1]) > 1:
break_compound(self.stack[-1])
|
nilq/baby-python
|
python
|
from flask import render_template, request, jsonify
from datetime import datetime
from hw_todo.utils import get_canvas_tasks
from hw_todo.tests import app
db_canvas = {"Tasks": []}
db = db_canvas
@app.route('/docs')
def get_docs():
print('sending docs')
return render_template('swaggerui.html')
@app.route('/', methods=['POST', 'GET'])
def index():
"""
(GET, POST)
GET -> Homepage, returns list of tasks
POST -> Add a new task to the database
"""
if request.method == 'POST':
if 'assignment' not in request.form or 'due_date' not in request.form or 'course' not in request.form:
return jsonify(({'error': 'assignment, due_date and course required as form data'})), 400
assignment = request.form['assignment']
due_date = datetime.strptime(request.form['due_date'], '%Y-%m-%dT%H:%M')
course = request.form['course']
try:
# database.session.add(new_task)
# database.session.commit()
db["Tasks"].append({"assignment": assignment,
"due_date": due_date,
"course": course})
return db
except Exception as e:
print(e)
return 'There was an issue adding your task'
else:
# tasks = Todo.query.order_by(Todo.due_date).all() # Orders by due date
# completedTasks = len(list(filter(lambda x: x.completed, tasks)))
# pendingTasks = len(tasks) - completedTasks
tasks = db["Tasks"]
completedTasks = 0
pendingTasks = 0
try:
for x in range(len(tasks)):
if tasks[x]["Completed"]:
completedTasks += 1
if tasks[x]["Pending"]:
pendingTasks += 1
return {"tasks": tasks, "completedTasks": completedTasks, "pendingTasks": pendingTasks}
except KeyError:
return {"tasks": tasks, "completedTasks": completedTasks, "pendingTasks": pendingTasks}
@app.route('/update/<int:id>', methods=['POST'])
def update(id):
"""
(POST)
Updates any field of the given assignment
"""
existing_tasks = db_canvas["Tasks"]
task_to_update = {}
for x in range(len(db_canvas["Tasks"])):
if existing_tasks[x]["Canvas ID"] == id:
task_to_update = existing_tasks[x]
if task_to_update == {}:
return {"ERROR": "ID Not Found"}
try:
task_to_update["Assignment"] = request.form['assignment']
task_to_update["Due Date"] = datetime.strptime(request.form['due_date'], '%Y-%m-%dT%H:%M')
task_to_update["Course"] = request.form['course']
except Exception as e:
print(e)
return {"ERROR": "MISSING INFORMATION"}
try:
# database.session.commit()
return db_canvas
except Exception as e:
print(e)
return 'There was an issue updating your task'
@app.route('/<int:id>', methods=['DELETE'])
def delete(id):
"""
(DELETE)
Deletes the given assignment
"""
existing_tasks = db_canvas["Tasks"]
task_location = ""
task_to_delete = {}
for x in range(len(db_canvas["Tasks"])):
if existing_tasks[x]["Canvas ID"] == id:
task_location = x
task_to_delete = existing_tasks[x]
if task_to_delete == {}:
return {"ERROR": "ID Not Found"}
try:
db_canvas["Tasks"].pop(task_location)
return db_canvas
except:
return 'There was a problem deleting that task'
def check_if_exists(canvas_id):
"""
Helper Method
Checks if a given canvas assignment already exists in the database
:return: Boolean (True if exists in database, False if not)
"""
existing_tasks = db_canvas["Tasks"]
for x in range(len(db_canvas["Tasks"])):
if existing_tasks[x]["Canvas ID"] == canvas_id:
return True
return False
@app.route('/canvas')
def canvas():
"""
(GET)
Updates the database with all new assignments from Canvas LMS
"""
tasks = get_canvas_tasks()
for task in tasks:
if not check_if_exists(task['canvas_id']):
try:
# new_task = Todo(assignment=task['assignment'], due_date=task['due_date'], course=task['course'],
# canvas_id=task['canvas_id'])
db_canvas["Tasks"].append({
"Assignment": task['assignment'],
"Due Date": task['due_date'],
"Course": task['course'],
"Canvas ID": task['canvas_id'],
"Completed": False,
"Pending": False
})
except Exception as e:
print(e)
return 'There was an issue pulling your tasks from canvas'
return db_canvas
@app.route('/complete/<int:id>', methods=['PUT'])
def complete(id):
"""
(GET)
Updates the completed field of the given assignment to either True or False
"""
existing_tasks = db_canvas["Tasks"]
task_to_complete = {}
for x in range(len(db_canvas["Tasks"])):
if existing_tasks[x]["Canvas ID"] == id:
task_to_complete = existing_tasks[x]
if task_to_complete == {}:
print("HIT")
return {"ERROR": "ID Not Found"}
try:
task_to_complete["Completed"] = not task_to_complete["Completed"]
# database.session.commit()
return db_canvas, 200
except Exception as e:
print(e)
return 'There was a problem completing that task'
|
nilq/baby-python
|
python
|
"""
File: pylinex/basis/EffectiveRank.py
Author: Keith Tauscher
Date: 17 Oct 2017
Description: File containing function which, given a training set of curves and
a corresponding noise level, determines the effective rank of the
training set, which is the number of modes to fit within the error
(see the docstring for effective_training_set_rank for details on
what that can mean).
"""
import numpy as np
from .TrainedBasis import TrainedBasis
def effective_training_set_rank(training_set, noise_level,\
mean_translation=False, method='abs', number_of_modes_to_consider=None,\
use_min_noise_level=False, level=1., suppress_runtime_error=False):
"""
Finds the number of modes which are needed to fit the given training set to
the given noise level.
training_set: 2D numpy.ndarray of shape (ncurves, nchannels)
noise_level: 1D numpy.ndarray of shape (nchannels,)
mean_translation: if True (default False), the mean of the training set is
subtracted before taking SVD.
method: if 'rms', RMS of normalized bias (bias/error) must be less than
level for all curves for rank to be returned
if 'abs', normalized bias (bias/error) must be less than level for
all curves and all channels
number_of_modes_to_consider: if int, maximum number of modes to compute.
Should be much larger than the
expected rank. If it is not larger
than the rank, this will throw a
RuntimeError.
if None, exhaustive search is performed by
internally setting
number_of_modes_to_consider as the
minimum of ncurves and nchannels
use_min_noise_level: if True, minimum of noise level used for every channel
otherwise, noise level's changes with different data
channels are accounted for
level: multiple of the noise level to consider
suppress_runtime_error: if True, if no considered rank satisfies constraint
defined by the arguments to this function,
number_of_modes_to_consider is returned
if False, if no considered rank satisfies
constraint defined by the arguments to
this function, a RuntimeError is raised.
This is the default behavior.
returns: integer number of modes necessary to fit every curve in the
training set to within noise_level
"""
if type(number_of_modes_to_consider) is type(None):
number_of_modes_to_consider = np.min(training_set.shape)
svd_basis = TrainedBasis(training_set, number_of_modes_to_consider,\
error=noise_level, mean_translation=mean_translation)
level2 = (level ** 2)
for rank in range(1, number_of_modes_to_consider + 1):
importance_weighted_basis =\
svd_basis.basis[:rank].T * svd_basis.importances[np.newaxis,:rank]
fit = np.dot(importance_weighted_basis,\
svd_basis.training_set_space_singular_vectors[:rank]).T
if mean_translation:
fit = fit + np.mean(training_set, axis=0)[np.newaxis,:]
if use_min_noise_level:
normalized_bias = (fit - training_set) / np.min(noise_level)
else:
normalized_bias = (fit - training_set) / noise_level[np.newaxis,:]
if method.lower() == 'rms':
mean_squared_normalized_bias =\
np.mean(np.power(normalized_bias, 2), axis=1)
if np.all(mean_squared_normalized_bias < level2):
return rank
elif method.lower() == 'abs':
if np.all(normalized_bias < level):
return rank
else:
raise ValueError("method not recognized. Must be 'rms' or 'abs'.")
if suppress_runtime_error:
return number_of_modes_to_consider
else:
raise RuntimeError("The rank of the given training set was larger " +\
"than the number of modes considered.")
|
nilq/baby-python
|
python
|
from app import db,create_app
from flask_script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app.models import Blogpost
app=create_app('development')
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('server', Server)
manager.add_command('db', MigrateCommand)
@manager.command
def test():
'''Run the unit tests'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app,db = db, Blogpost=Blogpost )
if __name__ == '__main__':
manager.run()
|
nilq/baby-python
|
python
|
import os
from torch_geometric.data import InMemoryDataset, DataLoader, Batch
from torch_geometric import data as DATA
from torch.utils.data.dataloader import default_collate
import torch
import numpy as np
import time
# initialize the dataset
class DTADataset(InMemoryDataset):
def __init__(self, root='/tmp', dataset='davis',
xd=None, y=None, transform=None,
pre_transform=None, smile_graph=None, target_key=None, target_graph=None):
super(DTADataset, self).__init__(root, transform, pre_transform)
self.dataset = dataset
self.process(xd, target_key, y, smile_graph, target_graph)
@property
def raw_file_names(self):
pass
# return ['some_file_1', 'some_file_2', ...]
@property
def processed_file_names(self):
return [self.dataset + '_data_mol.pt', self.dataset + '_data_pro.pt']
def download(self):
# Download to `self.raw_dir`.
pass
def _download(self):
pass
def _process(self):
if not os.path.exists(self.processed_dir):
os.makedirs(self.processed_dir)
def process(self, xd, target_key, y, smile_graph, target_graph):
assert (len(xd) == len(target_key) and len(xd) == len(y)), 'The three lists must be the same length!'
data_list_mol = []
data_list_pro = []
data_list_pro_len = []
data_list_pro_cm = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
tar_key = target_key[i]
labels = y[i]
# convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
target_features, target_size, concatMap= target_graph[tar_key]
GCNData_mol = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData_mol.__setitem__('c_size', torch.LongTensor([c_size]))
data_list_mol.append(GCNData_mol)
data_list_pro.append(target_features)
data_list_pro_len.append(target_size)
data_list_pro_cm.append(concatMap)
self.data_mol = data_list_mol
self.data_pro = data_list_pro
self.data_pro_len = data_list_pro_len
self.dataz_pro_cm = data_list_pro_cm
def __len__(self):
return len(self.data_mol)
def __getitem__(self, idx):
return self.data_mol[idx], self.data_pro[idx], self.data_pro_len[idx], self.dataz_pro_cm[idx]
# training function at each epoch
def train(model, device, train_loader, optimizer, epoch, writer, TRAIN_BATCH_SIZE):
print('Training on {} samples...'.format(len(train_loader.dataset)))
model.train()
LOG_INTERVAL = 10
train_loss = []
loss_fn = torch.nn.MSELoss()
since = time.time()
for batch_idx, data in enumerate(train_loader):
data_mol = data[0].to(device)
data_pro = data[1].to(device)
data_pro_len = data[2].to(device)
data_pro_cm = data[3].to(device)
optimizer.zero_grad()
output = model(data_mol, data_pro, data_pro_len, data_pro_cm)
loss = loss_fn(output, data_mol.y.view(-1, 1).float().to(device))
loss.backward()
optimizer.step()
if batch_idx % LOG_INTERVAL == 0:
print('Train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch,
batch_idx * TRAIN_BATCH_SIZE,
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
train_loss.append(loss.item())
epoch_train_loss = np.average(train_loss)
writer.add_scalar('Train/Loss', epoch_train_loss, epoch)
end = time.time()
print("Epoch Time:%.3f" % (end - since))
# predict
def predicting(model, device, loader):
model.eval()
total_preds = torch.Tensor()
total_labels = torch.Tensor()
print('Make prediction for {} samples...'.format(len(loader.dataset)))
with torch.no_grad():
for data in loader:
data_mol = data[0].to(device)
data_pro = data[1].to(device)
data_pro_len = data[2].to(device)
data_pro_cm = data[3].to(device)
output = model(data_mol, data_pro, data_pro_len, data_pro_cm)
total_preds = torch.cat((total_preds, output.cpu()), 0)
total_labels = torch.cat((total_labels, data_mol.y.view(-1, 1).cpu()), 0)
return total_labels.numpy().flatten(), total_preds.numpy().flatten()
#prepare the protein and drug pairs
def collate(data_list):
batchA = Batch.from_data_list([data[0] for data in data_list])
batchB = default_collate([data[1] for data in data_list])
batchC = default_collate([data[2] for data in data_list])
batchD = default_collate([data[3] for data in data_list])
return batchA, batchB, batchC, batchD
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
def main():
h, w = map(int, input().split())
s = [list(input()) for _ in range(h)]
ans = 0
for i in range(h - 1):
for j in range(w - 1):
count = 0
for ni, nj in [(i, j), (i + 1, j), (i, j + 1), (i + 1, j + 1)]:
if s[ni][nj] == "#":
count += 1
if count % 2 == 1:
ans += 1
print(ans)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import time
from typing import List, Dict, Any, Tuple
_measurements = {}
_formats = {}
_default_format = 'Duration of "{name_range}": {humanized_duration}'
def set_format(format: str) -> None:
if not isinstance(format, str):
raise TypeError('Format should be of type "str"')
global _default_format
_default_format = format
def _humanize_duration(duration: float) -> str:
days = int(duration // (24 * 3600))
duration = duration % (24 * 3600)
hours = int(duration // 3600)
duration %= 3600
minutes = int(duration // 60)
duration %= 60
seconds = round(duration, 2)
parts_a = []
parts_b = []
if days == 1:
parts_a.append('1 day')
elif days > 1:
parts_a.append(f'{days} days')
if hours == 1:
parts_a.append('1 hour')
elif hours > 1:
parts_a.append(f'{days} hours')
if minutes == 1:
parts_a.append('1 minute')
elif minutes > 1:
parts_a.append(f'{minutes} minutes')
if seconds == 1:
parts_b.append('1 second')
else:
parts_b.append(f'{seconds} seconds')
if len(parts_a) > 0:
parts_a = [', '.join(parts_a)]
string = ' and '.join(parts_a + parts_b)
return string
def _calculate_average_for_time_points(time_points: List[float]) -> float:
average = 0.0
if len(time_points) > 1:
for idx in range(1, len(time_points)):
duration = time_points[idx] - time_points[idx - 1]
average += duration
average = average / (len(time_points) - 1)
return average
class Measurement():
def __init__(self, name: str) -> None:
self.name = name
self._compare_to_index = -2
def _calculate_idx_a_b(self) -> Tuple[float, float]:
if self._compare_to_index < 0:
idx_a = len(self.time_points) + self._compare_to_index
else:
idx_a = self._compare_to_index
idx_a = min(len(self.time_points) - 1, idx_a)
idx_a = max(idx_a, 0)
idx_b = len(self.time_points) - 1
return (idx_a, idx_b)
@property
def time_points(self) -> List[float]:
return _measurements[self.name]
@property
def duration(self) -> float:
idx_a, idx_b = self._calculate_idx_a_b()
return self.time_points[idx_b] - self.time_points[idx_a]
def __getitem__(self, idx: int) -> 'Measurement':
if not isinstance(idx, int):
raise TypeError(f'{idx} should be of type "int"')
measurement = Measurement(self.name)
measurement._compare_to_index = idx
return measurement
def __call__(self, format=None, **kwargs: Dict[str, Any]) -> 'Measurement':
print(self.to_string(format=format, **kwargs))
return self
def __repr__(self) -> str:
a = self.time_points[0]
b = self.time_points[-1]
return f'<{self.name}: {a}->{b}>'
def to_string(self, format: str = None, **kwargs: Dict[str, Any]) -> str:
if format is None:
if self.name in _formats.keys():
format = _formats[self.name]
else:
format = _default_format
idx_a, idx_b = self._calculate_idx_a_b()
# a = self.time_points[idx_a]
# b = self.time_points[idx_b]
# duration = b - a
hduration = _humanize_duration(self.duration)
string = format \
.replace('{name}', self.name) \
.replace(
'{name_range}',
f'{self.name}[{idx_a}]->{self.name}[{idx_b}]') \
.replace('{duration}', str(self.duration)) \
.replace('{humanized_duration}', hduration) \
.replace('{hduration}', hduration) \
.replace('{idx_a}', str(idx_a)) \
.replace('{idx_b}', str(idx_b))
for key, value in kwargs.items():
string = string.replace(f'{{{key}}}', str(value))
return string
def __str__(self) -> str:
return self.to_string()
def set_format(self, format: str = None) -> 'Measurement':
if format is None:
if self.name in _formats.keys():
del _formats[self.name]
else:
_formats[self.name] = format
return self
def squeeze(self) -> 'Measurement':
global _measurements
time_points = _measurements[self.name]
if len(time_points) > 2:
time_points = [
time_points[0],
time_points[-1]
]
_measurements[self.name] = time_points
return self
def summary(self) -> 'Measurement':
from rich.console import Console
from rich.table import Table
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Measurement", style="dim")
table.add_column("Points count", justify="right")
table.add_column("Average duration", justify="right")
table.add_column("First point", justify="right")
table.add_column("Last point", justify="right")
table.add_row(
self.name,
str(len(self.time_points)),
_humanize_duration(
_calculate_average_for_time_points(self.time_points)),
str(self.time_points[0]),
str(self.time_points[-1])
)
console.print(table)
return self
def __getattr__(attr: str):
if attr not in _measurements.keys():
_measurements[attr] = []
_measurements[attr].append(time.perf_counter())
return Measurement(attr)
def delete(measurement: str) -> None:
if measurement in _measurements.keys():
del _measurements[measurement]
if measurement in _formats.keys():
del _formats[measurement]
def clear() -> None:
global _measurements
global _formats
global _default_format
_measurements = {}
_formats = {}
_default_format = 'Duration of "{name_range}": {humanized_duration}'
def summary() -> None:
from rich.console import Console
from rich.table import Table
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Measurement", style="dim")
table.add_column("Points count", justify="right")
table.add_column("Average duration", justify="right")
table.add_column("First point", justify="right")
table.add_column("Last point", justify="right")
for measurement, time_points in _measurements.items():
table.add_row(
measurement,
str(len(time_points)),
_humanize_duration(
_calculate_average_for_time_points(time_points)),
str(time_points[0]),
str(time_points[-1])
)
console.print(table)
|
nilq/baby-python
|
python
|
from PyQt5.QtCore import QObject, pyqtSignal
class Model(QObject):
amount_changed = pyqtSignal(int)
even_odd_changed = pyqtSignal(str)
enable_reset_changed = pyqtSignal(bool)
users_changed = pyqtSignal(list)
@property
def users(self):
return self._users
def add_user(self, value):
self._users.append(value)
self.users_changed.emit(self._users)
def delete_user(self, value):
del self._users[value]
self.users_changed.emit(self._users)
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
self.amount_changed.emit(value)
@property
def even_odd(self):
return self._even_odd
@even_odd.setter
def even_odd(self, value):
self._even_odd = value
self.even_odd_changed.emit(value)
@property
def enable_reset(self):
return self._enable_reset
@enable_reset.setter
def enable_reset(self, value):
self._enable_reset = value
self.enable_reset_changed.emit(value)
def __init__(self):
super().__init__()
self._amount = 0
self._even_odd = ''
self._enable_reset = False
self._users = ["hans"]
|
nilq/baby-python
|
python
|
def hello(who):
print 'Hello, %s!' % who
if __name__ == '__main__':
print hello(sys.args[1] if len(sys.args) >= 2 else 'World')
|
nilq/baby-python
|
python
|
import argparse
import sys
import time
import unittest
import warnings
import emoji
from lib.const import CSPM_RUNNING_K8S_MASTER_CHECK_LOG, CSPM_RUNNING_K8S_WORKER_CHECK_LOG, CSPM_START_LOG
from lib.cspm.api import wait_for_compliance_event, wait_for_finding
from lib.cspm.finding import (
is_expected_k8s_master_node_finding,
is_expected_k8s_worker_node_finding,
parse_output_and_extract_findings,
)
from lib.kubernetes import KubernetesHelper
from lib.log import wait_agent_log
from lib.stepper import Step
class TestE2EKubernetes(unittest.TestCase):
namespace = "default"
in_cluster = False
def setUp(self):
warnings.simplefilter("ignore", category=ResourceWarning)
warnings.simplefilter("ignore", category=UserWarning)
warnings.simplefilter("ignore", category=DeprecationWarning)
self.kubernetes_helper = KubernetesHelper(namespace=self.namespace, in_cluster=self.in_cluster)
self.resource_id = "k8s-e2e-tests-control-plane_kubernetes_*_node"
def test_k8s(self):
print("")
agent_name = "security-agent"
with Step(msg="select pod", emoji=":man_running:"):
self.kubernetes_helper.select_pod_name("app=datadog-agent")
with Step(msg="check agent start", emoji=":man_running:"):
wait_agent_log(agent_name, self.kubernetes_helper, CSPM_START_LOG)
with Step(msg="check agent event", emoji=":check_mark_button:"):
output = self.kubernetes_helper.exec_command(
agent_name, ["security-agent", "compliance", "check", "--report"]
)
findings = parse_output_and_extract_findings(
output,
[CSPM_RUNNING_K8S_MASTER_CHECK_LOG, CSPM_RUNNING_K8S_WORKER_CHECK_LOG],
)
self.finding = None
for f in findings:
if is_expected_k8s_master_node_finding(f) or is_expected_k8s_worker_node_finding(f):
self.finding = f
if self.finding is None:
raise LookupError(
f"{agent_name} | {CSPM_RUNNING_K8S_MASTER_CHECK_LOG} | {CSPM_RUNNING_K8S_WORKER_CHECK_LOG}"
)
with Step(msg="wait for intake (~1m)", emoji=":alarm_clock:"):
time.sleep(1 * 60)
with Step(msg="check app compliance event", emoji=":SOON_arrow:"):
wait_for_compliance_event(f"resource_id:{self.resource_id}")
with Step(msg="wait for finding generation (~1m)", emoji=":alarm_clock:"):
time.sleep(1 * 60)
with Step(msg="check app finding", emoji=":chart_increasing_with_yen:"):
wait_for_finding(f"@resource_type:kubernetes_*_node @resource:{self.resource_id}")
print(emoji.emojize(":heart_on_fire:"), flush=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--namespace", default="default")
parser.add_argument("--in-cluster", action="store_true")
parser.add_argument("unittest_args", nargs="*")
args = parser.parse_args()
# setup some specific tests
TestE2EKubernetes.namespace = args.namespace
TestE2EKubernetes.in_cluster = args.in_cluster
unit_argv = [sys.argv[0]] + args.unittest_args
unittest.main(argv=unit_argv)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#
# Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
import os
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn import metrics
from hkmeans import HKMeans
from clustering_utils import fetch_20ng, save_report_and_heatmap
# This example compares Scikit Learn's Lloyd's K-Means to the Hartigan's K-Means
# delivered in this distribution. We will use the 20 News Groups dataset as a
# benchmark (about 19K docs, 20 clusters).
# step 0 - create an output directory if it does not exist
output_path = os.path.join("output", "ex1")
if not os.path.exists(output_path):
os.makedirs(output_path)
# step 1 - read the dataset
texts, gold_labels_array, n_clusters, topics, n_samples = fetch_20ng('all')
print("Clustering dataset contains %d texts from %d topics" % (n_samples, n_clusters))
# The following settings are meant for comparison purposes and should be adjusted
# based on the real-world use-case.
# The default for Lloyd's K-Means in sklearn is n_init=10, max_iter=300;
# For Hartigan's K-Means it is enough to use max_iter=15.
# Here we use max_iter=15 for both to be able to compare run-time
# We set kmeans algorithm to 'full' to apply lloyd's k-means
n_init = 10
max_iter = 15
setups = [
("Scikit-Learn Lloyd's K-Means", lambda: KMeans(n_clusters=n_clusters, n_init=n_init,
max_iter=max_iter, algorithm='full')),
("Hartigan's K-Means", lambda: HKMeans(n_clusters=n_clusters, n_init=n_init,
max_iter=max_iter))
]
# step 2 - represent the clustering data using bow of the 10k most frequent
# unigrams in the dataset, excluding stop words. Note that if you wish to
# apply some text pre-processing like stemming - that's the place to do that.
print("Vectorization starts...", end=' ')
vectorization_start_t = time()
vectorizer = TfidfVectorizer(max_features=10000, stop_words='english')
vectors = vectorizer.fit_transform(texts)
vectorization_end_t = time()
print("ended in %.3f secs." % (vectorization_end_t - vectorization_start_t))
print("Clustering settings: n_init=%d, max_iter=%d:" % (n_init, max_iter))
for algorithm_name, factory in setups:
print("Running with %s:" % algorithm_name)
# step 3 - cluster the data
print("\tClustering starts...", end=' ')
clustering_start_t = time()
algorithm = factory()
algorithm.fit(vectors)
clustering_end_t = time()
print("ended in %.3f secs." % (clustering_end_t - clustering_start_t))
predictions_array = algorithm.labels_
# measure the clustering quality
homogeneity = metrics.homogeneity_score(gold_labels_array, predictions_array)
completeness = metrics.completeness_score(gold_labels_array, predictions_array)
v_measure = metrics.v_measure_score(gold_labels_array, predictions_array)
ami = metrics.adjusted_mutual_info_score(gold_labels_array, predictions_array)
ari = metrics.adjusted_rand_score(gold_labels_array, predictions_array)
print("\tClustering measures: AMI: %.3f, ARI: %.3f" % (ami, ari))
save_report_and_heatmap(gold_labels_array, predictions_array, topics,
algorithm, algorithm_name, output_path,
ami, ari, homogeneity, completeness, v_measure,
n_samples, vectorization_end_t-vectorization_start_t,
clustering_end_t-clustering_start_t)
|
nilq/baby-python
|
python
|
import pytest
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from picknmix import Layer
class TestLayer:
def test_different_numbers_of_preprocessor_and_models(self):
with pytest.raises(Exception):
assert Layer([LinearRegression(), LinearRegression()],
[MinMaxScaler()])
def test_fit_single_model_without_preprocess(self):
layer_model = Layer([LinearRegression()])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
# X and y are linearly related, predictions will be almost perfect
result = layer_model.fit(X, y)
assert result.shape == (4,1)
assert np.allclose(result.flatten(), y)
def test_fir_single_model_with_preprocess(self):
layer_model = Layer([LinearRegression()],
[MinMaxScaler()])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
# X and y are linearly related, predictions will be almost perfect
result = layer_model.fit(X, y)
assert result.shape == (4,1)
assert np.allclose(result.flatten(), y)
def test_fit_single_model_with_2_class_proba(self):
layer_model = Layer([LogisticRegression(solver='liblinear')],
proba=True)
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.array([1, 1, 0, 0])
result = layer_model.fit(X, y)
assert result.shape == (4,2)
def test_fit_single_model_with_multi_class_proba(self):
layer_model = Layer([LogisticRegression(solver='lbfgs',
multi_class='multinomial')],
proba=True)
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.array([1, 1, 0, 2])
result = layer_model.fit(X, y)
assert result.shape == (4,3)
def test_fit_multiple_models(self):
layer_model = Layer([LinearRegression(), LinearRegression()],
[None, MinMaxScaler()])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
# X and y are linearly related, predictions will be almost perfect
result = layer_model.fit(X, y)
assert result.shape == (4,2)
assert np.allclose(result[:,0], y)
assert np.allclose(result[:,1], y)
def test_fit_multiple_model_with_2_class_proba(self):
layer_model = Layer([LogisticRegression(solver='liblinear'),
LogisticRegression(solver='liblinear')],
proba=[True,False])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.array([1, 1, 0, 0])
result = layer_model.fit(X, y)
assert result.shape == (4,3)
def test_predict_single_model_without_preprocess(self):
layer_model = Layer([LinearRegression()])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
layer_model.fit(X, y)
result = layer_model.predict(np.array([[3, 5],[3, 5]]))
assert result.shape == (2,1)
assert np.allclose(result, np.array([[16],[16]]))
def test_predict_single_model_with_preprocess(self):
layer_model = Layer([LinearRegression()],
[MinMaxScaler()])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
layer_model.fit(X, y)
result = layer_model.predict(np.array([[3, 5]]))
assert result.shape == (1,1)
assert np.allclose(result, np.array([[16]]))
def test_predict_single_model_with_2_class_proba(self):
layer_model = Layer([LogisticRegression(solver='liblinear')],
proba=True)
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.array([1, 1, 0, 0])
layer_model.fit(X, y)
result = layer_model.predict(np.array([[3, 5]]))
assert result.shape == (1,2)
def test_predict_single_model_with_multi_class_proba(self):
layer_model = Layer([LogisticRegression(solver='lbfgs',
multi_class='multinomial')],
proba=True)
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.array([1, 1, 0, 2])
layer_model.fit(X, y)
result = layer_model.predict(np.array([[3, 5]]))
assert result.shape == (1,3)
def test_predict_multiple_model(self):
layer_model = Layer([LinearRegression(), LinearRegression()],
[None, MinMaxScaler()])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
layer_model.fit(X, y)
result = layer_model.predict(np.array([[3, 5]]))
assert result.shape == (1,2)
assert np.allclose(result, np.array([[16, 16]]))
def test_predict_multiple_model_with_2_class_proba(self):
layer_model = Layer([LogisticRegression(solver='liblinear'),
LogisticRegression(solver='liblinear')],
proba=[True,False])
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.array([1, 1, 0, 0])
layer_model.fit(X, y)
result = layer_model.predict(np.array([[3, 5], [2, 5]]))
assert result.shape == (2,3)
def test_using_proba_without_predict_proba_method(self):
with pytest.warns(Warning) as record:
layer_model = Layer([LinearRegression()],
proba=True)
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
layer_model.fit(X, y)
result = layer_model.predict(np.array([[3, 5],[3, 5]]))
assert result.shape == (2,1)
assert np.allclose(result, np.array([[16],[16]]))
assert record
|
nilq/baby-python
|
python
|
"""
Standard classes for the Converter module
"""
import logging
import pickle
import uuid
from django.core.cache import cache
class ConverterLoadError(Exception):
"""
Exception when loading a converter from its redis pickle
"""
msg = 'Error while loading converter'
class BaseConverter:
"""
Base class for conversion
Mock up for usage in type hinting
"""
INITIATED_STATUS = 'initiated'
INSERTING_STATUS = 'inserting'
PENDING_STATUS = 'pending'
FINISHED = 'finished'
WITH_ERRORS = 'finished with errors'
class ConverterResultDetail:
"""
Details of a conversion
"""
unit = None
original_value = 0
date = None
conversion_rate = 0
converted_value = 0
def __init__(self, unit: str, original_value: float,
date: date, conversion_rate: float,
converted_value: float):
"""
Initialize details
:param unit: dimension as a string
:param original_value: value before conversion
:param date: date of conversion
:param conversion_rate: rate of conversion
:param converted_value: resulting value
"""
self.unit = unit
self.original_value = original_value
self.date = date
self.conversion_rate = conversion_rate
self.converted_value = converted_value
class ConverterResultError:
"""
Error from a conversion
"""
unit = None
original_value = None
date = None
error = None
def __init__(self, unit: str, original_value: float,
date: date, error: str):
"""
Initialize error
:param unit: string of the dimension
:param original_value: value before conversion
:param date: date of conversion
:param error: description of the error
"""
self.unit = unit
self.original_value = original_value
self.date = date
self.error = error
class ConverterResult:
"""
Result of a batch of conversions
"""
id = None
target = None
detail = []
sum = 0
status = None
errors = []
def __init__(self, id: str = None, target: str = None,
detail: [ConverterResultDetail] = None,
sum: float = 0, status: str = BaseConverter.INITIATED_STATUS,
errors: [ConverterResultError] = None):
"""
Initialize result
:param id: ID of the batch
:param target: target currency
:param detail: List of ConverterResultDetail
:param sum: sum of all detailed conversions
:param status: status of the batch
:param errors: List of conversion errors
"""
self.id = id
self.target = target
self.detail = detail or []
self.sum = sum
self.status = status
self.errors = errors or []
def increment_sum(self, value):
"""
Sum individual conversion results
They are all in the target currency
:param value: result of a conversion
"""
try:
float(value)
self.sum += value
except ValueError:
logging.error("invalid value, "
"will not increment result sum", value)
def end_batch(self):
"""
Puts a final status on the batch
"""
if self.errors:
self.status = BaseConverter.WITH_ERRORS
else:
self.status = BaseConverter.FINISHED
return self.status
class BaseConverter:
"""
Base conversion class
"""
INITIATED_STATUS = 'initiated'
INSERTING_STATUS = 'inserting'
PENDING_STATUS = 'pending'
FINISHED = 'finished'
WITH_ERRORS = 'finished with errors'
id = None
status = INITIATED_STATUS
data = []
converted_lines = []
aggregated_result = {}
def __init__(self, id: str = None):
"""
Initialize BaseConverter
:param id: ID of the batch
"""
self.id = id or uuid.uuid4()
self.data = []
@classmethod
def load(cls, id: str) -> BaseConverter:
"""
Load Converter from cache
:param id: ID of the batch
"""
obj = cache.get(id)
if obj:
return pickle.loads(obj)
raise KeyError(f"Converter with id {id} not found in cache")
def save(self):
"""
Save Converter to cache
"""
cache.set(self.id, pickle.dumps(self))
def add_data(self, data: []) -> []:
"""
Check data and add it to the dataset
Return list of errors
:param data: list of items to convert
"""
if not data:
return [{'data': 'Empty data set', }]
errors = self.check_data(data)
if errors:
return errors
self.status = self.INSERTING_STATUS
self.save()
return []
def end_batch(self, status: str):
"""
set status of the batch
:param status: status from statuses
"""
self.status = status
def check_data(self, data):
"""
Validates data
Not implementd
:param data: list of items to convert
"""
raise NotImplementedError
def convert(self) -> ConverterResult:
"""
Converts data to base currency
Not implemented
"""
raise NotImplementedError
class Batch:
"""
Batch class
"""
id = None
status = None
def __init__(self, id: str, status: str):
"""
Initialize the batch
:param id: ID of the batch
:param status: status of the batch
"""
self.id = id
self.status = status
|
nilq/baby-python
|
python
|
# @author Kilari Teja
from halley.skills.tdl.utils import PropMap, Constants
import re
class OPERATOR(object):
DESCRIPTOR = None
@classmethod
def register(clas, tokenStore, statsCollector=None):
OPERATOR.registerStatic(clas, tokenStore)
@staticmethod
def registerStatic(clas, tokenStore, statsCollector=None):
clas.StatsCollector = statsCollector
if isinstance(clas.DESCRIPTOR, list):
map(lambda d: d.setClass(clas), clas.DESCRIPTOR)
tokenStore += clas.DESCRIPTOR
else:
clas.DESCRIPTOR.setClass(clas)
tokenStore.append(clas.DESCRIPTOR)
def __init__(self, action, selfToken, *args):
assert len(args) > 0
self._args = args
self._actn = action
self.label = Constants.TOKEN_TYPES.COMPOUND_EXPR
def bool(self, text):
return self.eval(text).val >= 0
def eval(self, text):
return reduce(self._actn, map(lambda arg: arg.eval(text), self._args))
class OpDescriptor(PropMap):
def __init__(self, regex, precedence, label, **kargs):
super(OpDescriptor, self).__init__(
clas=None,
regex=regex,
label=label,
precedence=precedence,
**kargs
)
def setClass(self, clas):
self.clas = clas
class Result(PropMap):
_FALSE = None
def __init__(self, val, word):
super(Result, self).__init__(val=val, word=word)
@staticmethod
def FALSE():
if Result._FALSE is None:
Result._FALSE = Result(-1, None)
return Result._FALSE
def resolveBinaryParameterMagAndDirn(selector, reverseMagSym, paramText):
mag, dirn = re.match(selector, paramText), False
if mag is None:
return (None, dirn)
mag = mag.groups()[0]
dirn = not mag.startswith(reverseMagSym)
mag = int(mag[1:] if not dirn else mag)
return (mag, dirn)
# Supports >, <, ''
def resolve3WayParameter(selector, paramText):
paramText = str(paramText)[1:]
# less than equal to
if paramText.startswith(">"):
return lambda num: num > int(paramText[1:])
elif paramText.startswith("<"):
return lambda num: num < int(paramText[1:])
else:
return lambda num: num == int(paramText)
|
nilq/baby-python
|
python
|
from typing import List
def info_from_jenkins_auth(username, password, required_scopes):
"""
Check and retrieve authentication information from basic auth.
Returned value will be passed in 'token_info' parameter of your operation function, if there is one.
'sub' or 'uid' will be set in 'user' parameter of your operation function, if there is one.
:param username login provided by Authorization header
:type username: str
:param password password provided by Authorization header
:type password: str
:param required_scopes Always None. Used for other authentication method
:type required_scopes: None
:return: Information attached to user or None if credentials are invalid or does not allow access to called API
:rtype: dict | None
"""
return {'uid': 'user_id'}
def info_from_jwt_auth(api_key, required_scopes):
"""
Check and retrieve authentication information from api_key.
Returned value will be passed in 'token_info' parameter of your operation function, if there is one.
'sub' or 'uid' will be set in 'user' parameter of your operation function, if there is one.
:param api_key API key provided by Authorization header
:type api_key: str
:param required_scopes Always None. Used for other authentication method
:type required_scopes: None
:return: Information attached to provided api_key or None if api_key is invalid or does not allow access to called API
:rtype: dict | None
"""
return {'uid': 'user_id'}
|
nilq/baby-python
|
python
|
#! /usr/bin/env python2
import os
filepath = os.path.join(
str(os.environ.get("GITHUB_WORKSPACE")), str(os.environ.get("FILE_TO_MODIFY"))
)
with open(filepath) as f:
newText = f.read().replace(
str(os.environ.get("FIND")), str(os.environ.get("REPLACE"))
)
with open(filepath, "w") as f:
f.write(newText)
with open(filepath, "r") as f:
print(f.read())
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test.utils import override_settings, modify_settings
from django_dynamic_fixture import G
from django_webtest import WebTest
from fluent_pages.models.db import PageLayout
from fluent_contents.models import Placeholder
from fluent_contents.plugins.rawhtml.models import RawHtmlItem
from ..admin import PublishingAdmin
from ..models import PublishingModel
from ..pagetypes.fluentpage.models import FluentPage as Page
from ..utils import create_content_instance, get_draft_hmac#, verify_draft_url, get_draft_url
User = get_user_model()
class ModelM(PublishingModel):
title = models.CharField(max_length=255)
class Meta:
app_label = 'fluentcms_publishing'
admin.site.register(ModelM, PublishingAdmin)
class AdminTest(WebTest):
""" Base utility methods to test interaction with the site admin. """
csrf_checks = False
def refresh(self, obj, obj_pk=None):
"""
Return the same object reloaded from the database, or optinally load
an arbitrary object by PK if this ID is provided.
"""
if obj_pk is None:
obj_pk = obj.pk
return obj.__class__.objects.get(pk=obj_pk)
def ct_for_model(self, model_class_or_obj):
return ContentType.objects.get_for_model(model_class_or_obj)
def assertNoFormErrorsInResponse(self, response):
"""
Fail if response content has any lines containing the 'errorlist'
keyword, which indicates the form submission failed with errors.
"""
errorlist_messages = [
l.strip()
for l in response.text.split('\n')
if 'errorlist' in l
]
self.assertEqual([], errorlist_messages)
def admin_publish_item(self, obj, user=None):
ct = self.ct_for_model(obj)
admin_app = '_'.join(ct.natural_key())
response = self.app.get(
reverse('admin:%s_publish' % admin_app, args=(obj.pk,)),
user=user,
)
self.assertNoFormErrorsInResponse(response)
self.assertEqual(302, response.status_code)
def admin_unpublish_item(self, obj, user=None):
ct = self.ct_for_model(obj)
admin_app = '_'.join(ct.natural_key())
response = self.app.get(
reverse('admin:%s_unpublish' % admin_app, args=(obj.pk,)),
user=user,
)
self.assertNoFormErrorsInResponse(response)
self.assertEqual(302, response.status_code)
class TestPublishingAdmin(AdminTest):
"""
Test publishing features via site admin.
"""
def setUp(self):
self.staff = G(
User,
is_staff=True,
is_active=True,
is_superuser=True,
)
self.model = ModelM.objects.create(title="O hai, world!")
def test_publish_model(self):
# Confirm model is unpublished and versioned as such
self.assertIsNone(self.model.publishing_linked)
# Check admin change model includes publish links, not unpublish ones
response = self.app.get(
reverse('admin:fluentcms_publishing_modelm_change',
args=(self.model.pk, )),
user=self.staff)
self.assertEqual(response.status_code, 200)
self.assertTrue([f for f in response.text.split('\n') if 'submit' in f if '_publish' in f])
self.assertFalse([f for f in response.text.split('\n') if 'submit' in f if '_unpublish' in f])
# Publish via admin
self.admin_publish_item(self.model, user=self.staff)
self.model = self.refresh(self.model)
self.assertIsNotNone(self.model.publishing_linked)
self.assertTrue(self.model.has_been_published)
self.assertTrue(self.model.get_published().has_been_published)
# Check admin change model includes unpublish link (published item)
response = self.app.get(
reverse('admin:fluentcms_publishing_modelm_change',
args=(self.model.pk, )),
user=self.staff)
self.assertEqual(response.status_code, 200)
self.assertFalse([f for f in response.text.split('\n') if 'submit' in f if '_publish' in f])
self.assertTrue([f for f in response.text.split('\n') if 'submit' in f if '_unpublish' in f])
# Publish again
self.model.title += ' - changed'
self.model.save()
self.admin_publish_item(self.model, user=self.staff)
self.model = self.refresh(self.model)
# Unpublish via admin
self.admin_unpublish_item(self.model, user=self.staff)
# New version has unpublished status
self.model = self.refresh(self.model)
self.assertIsNone(self.model.publishing_linked)
self.assertFalse(self.model.has_been_published)
# Check admin change model includes publish links, not unpublish ones
response = self.app.get(
reverse('admin:fluentcms_publishing_modelm_change',
args=(self.model.pk, )),
user=self.staff)
self.assertEqual(response.status_code, 200)
self.assertTrue([f for f in response.text.split('\n') if 'submit' in f if '_publish' in f])
self.assertFalse([f for f in response.text.split('\n') if 'submit' in f if '_unpublish' in f])
class TestPublishingAdminForPage(AdminTest):
def setUp(self):
self.ct = self.ct_for_model(Page)
self.admin = G(
User,
is_staff=True,
is_active=True,
is_superuser=True,
)
self.layout = G(
PageLayout,
template_path='default.html',
)
self.page = Page.objects.create(
author=self.admin,
title='Hello, world!',
slug='hello-world',
layout=self.layout,
)
self.content_instance = create_content_instance(
RawHtmlItem,
self.page,
placeholder_name='content',
html='<b>lorem ipsum dolor sit amet...</b>'
)
# Generate URL paths/links to test
self.admin_add_page_url = reverse(
'admin:fluentpage_fluentpage_add')
self.admin_change_page_url = reverse(
'admin:fluentpage_fluentpage_change',
args=(self.page.pk, ))
def test_admin_monkey_patch_slug_duplicates(self):
# Test our monkey patch works to fix duplicate `slug` field errors
# caused by draft and published copies of the same item sharing a slug.
# Confirm we have a draft publishable item that has a slug field
self.assertEqual('hello-world', self.page.slug)
self.assertIsNone(self.page.publishing_linked)
# Publish item via admin with same slug
self.admin_publish_item(self.page, user=self.admin)
self.page = self.refresh(self.page)
self.assertIsNotNone(self.page.publishing_linked)
self.assertEqual(
'hello-world', self.page.get_published().slug)
# Confirm we can update draft version via admin with shared slug
response = self.app.get(
self.admin_change_page_url,
user=self.admin)
self.assertEqual(response.status_code, 200)
form = response.forms['fluentpage_form']
form['title'].value = 'O hai, world!'
response = form.submit('_continue', user=self.admin)
self.assertNotContains(
response, 'This slug is already used by an other page at the same level', status_code=302,
)
self.layoutpage = self.refresh(self.page)
self.assertEqual('hello-world', self.page.slug)
self.assertEqual('O hai, world!', self.page.title)
# Confirm we can re-publish draft version via admin with shared slug
self.admin_publish_item(self.page, user=self.admin)
self.page = self.refresh(self.page)
self.assertIsNotNone(self.page.publishing_linked)
self.assertEqual(
'hello-world', self.page.get_published().slug)
self.assertEqual(
'O hai, world!', self.page.get_published().title)
# Confirm we cannot create a different item via admin with same slug
response = self.app.get(
self.admin_add_page_url,
user=self.admin)
form = response.forms['page_form']
form['ct_id'].select(self.ct.pk) # Choose Page page type
response = form.submit(user=self.admin).follow()
self.assertNotContains(response, 'error')
form = response.forms['fluentpage_form']
form['layout'].select(self.layout.pk)
form['title'] = 'O hai, world'
form['slug'] = self.page.slug # Same slug as existing page
response = form.submit('_continue', user=self.admin)
self.assertContains(
response, 'This slug is already used by an other page at the same level',
)
def test_admin_monkey_patch_override_url_duplicates(self):
# Test our monkey patch works to fix duplicate `override_url` field
# errors caused by draft and published copies of the same item sharing
# an override URL.
# Add override URL to item
self.page.override_url = '/'
self.page.save()
# Publish item via admin with same override URL
self.admin_publish_item(self.page, user=self.admin)
self.page = self.refresh(self.page)
self.assertIsNotNone(self.page.publishing_linked)
self.assertEqual(
'/', self.page.get_published().override_url)
# Confirm we can update draft version via admin with same override URL
response = self.app.get(
self.admin_change_page_url,
user=self.admin)
self.assertEqual(response.status_code, 200)
form = response.forms['fluentpage_form']
form['title'].value = 'O hai, world!'
response = form.submit('_continue', user=self.admin)
self.assertNotContains(
response, 'This URL is already taken by an other page.', status_code=302,
)
self.page = self.refresh(self.page)
self.assertEqual('/', self.page.override_url)
self.assertEqual('O hai, world!', self.page.title)
# Confirm we can re-publish draft version via admin with same override
self.admin_publish_item(self.page, user=self.admin)
self.page = self.refresh(self.page)
self.assertIsNotNone(self.page.publishing_linked)
self.assertEqual(
'/', self.page.get_published().override_url)
self.assertEqual(
'O hai, world!', self.page.get_published().title)
# Confirm we cannot create a different item via admin with same
# override URL
response = self.app.get(
self.admin_add_page_url,
user=self.admin)
form = response.forms['page_form']
form['ct_id'].select(self.ct.pk) # Choose Page page type
response = form.submit(user=self.admin).follow()
self.assertNotContains(response, 'error')
form = response.forms['fluentpage_form']
form['layout'].select(self.layout.pk)
form['title'] = 'O hai, world!'
form['slug'] = 'o-hai-woorld'
form['override_url'] = self.page.override_url # Same override
response = form.submit('_continue', user=self.admin)
self.assertContains(
response, 'This URL is already taken by an other page.',
)
@modify_settings(MIDDLEWARE_CLASSES={'append': 'fluentcms_publishing.middleware.PublishingMiddleware'})
class TestPublishingForPageViews(AdminTest):
def setUp(self):
self.user = G(User)
self.admin = G(
User,
is_staff=True,
is_active=True,
is_superuser=True,
)
self.layout = G(
PageLayout,
template_path='default.html',
)
self.page = Page.objects.create(
author=self.admin,
title='Hello, world!',
slug='hello-world',
layout=self.layout,
)
self.content_instance = create_content_instance(
RawHtmlItem,
self.page,
placeholder_name='content',
html='<b>lorem ipsum dolor sit amet...</b>'
)
def test_url_routing_for_draft_and_published_copies(self):
# Unpublished page is not visible to anonymous users
response = self.app.get('/hello-world/', expect_errors=True)
self.assertEqual(response.status_code, 404)
# Unpublished page is visible to staff user with '?edit' param redirect
response = self.app.get(
'/hello-world/',
user=self.admin,
).follow()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Hello, world!')
# Publish page
self.page.publish()
self.assertEqual(
'/hello-world/',
self.page.get_published().get_absolute_url())
# Published page is visible to anonymous users
response = self.app.get('/hello-world/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Hello, world!')
# Change Title and slug (URL) of draft page
self.page.title = 'O hai, world!'
self.page.slug = 'o-hai-world'
self.page.save()
self.page = self.refresh(self.page)
self.assertEqual(
'/o-hai-world/', self.page.get_absolute_url())
# URL of published page remains unchanged
self.assertEqual(
'/hello-world/',
self.page.get_published().get_absolute_url())
# Published page is at unchanged URL
response = self.app.get('/hello-world/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Hello, world!')
# Draft page is at changed URL
response = self.app.get(
'/o-hai-world/',
user=self.admin,
).follow()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'O hai, world!')
# Draft page is visible at changed URL via ?edit URL
response = self.app.get(
'/o-hai-world/?edit',
user=self.admin,
).follow()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'O hai, world!')
# Draft page is *not* visible at ?edit URL of old (published page) URL
response = self.app.get(
'/hello-world/?edit',
user=self.admin,
)
self.assertEqual(response.status_code, 302)
response = response.follow(expect_errors=True)
self.assertEqual(response.status_code, 404)
def test_verified_draft_url_for_publishingmodel(self):
# Unpublished page is not visible to anonymous users
response = self.app.get(
self.page.get_absolute_url(),
user=self.user,
expect_errors=True)
self.assertEqual(response.status_code, 404)
# Unpublished page is visible to staff user with '?edit' param redirect
response = self.app.get(
self.page.get_absolute_url(),
user=self.admin)
self.assertEqual(response.status_code, 302)
self.assertTrue('?edit=' in response['Location'])
response = response.follow()
self.assertEqual(response.status_code, 200)
# Unpublished page is visible to any user with signed '?edit' param
salt = '123'
url_hmac = get_draft_hmac(salt, self.page.get_absolute_url())
response = self.app.get(
self.page.get_absolute_url() + '?edit=%s:%s' % (
salt, url_hmac),
user=self.user)
self.assertEqual(response.status_code, 200)
# Publish page
self.page.publish()
# Published page is visible to anonymous users
response = self.app.get(
self.page.get_absolute_url(),
user=self.user)
self.assertEqual(response.status_code, 200)
|
nilq/baby-python
|
python
|
"""
tests.support.pytest.fixtures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The purpose of this fixtures module is provide the same set of available fixture for the old unittest
test suite under ``test/integration``, ``tests/multimaster`` and ``tests/unit``.
Please refrain from adding fixtures to this module and instead add them to the appropriate
``conftest.py`` file.
"""
import os
import shutil
import stat
import sys
import pytest
import salt.utils.files
from salt.serializers import yaml
from salt.utils.immutabletypes import freeze
from tests.support.runtests import RUNTIME_VARS
def _get_virtualenv_binary_path():
try:
return _get_virtualenv_binary_path.__virtualenv_binary__
except AttributeError:
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(
real_prefix, "Scripts", "virtualenv.exe"
)
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
_get_virtualenv_binary_path.__virtualenv_binary__ = virtualenv_binary
return virtualenv_binary
@pytest.fixture(scope="session")
def integration_files_dir(salt_factories):
"""
Fixture which returns the salt integration files directory path.
Creates the directory if it does not yet exist.
"""
dirname = salt_factories.root_dir.join("integration-files")
dirname.ensure(dir=True)
return dirname
@pytest.fixture(scope="session")
def state_tree_root_dir(integration_files_dir):
"""
Fixture which returns the salt state tree root directory path.
Creates the directory if it does not yet exist.
"""
dirname = integration_files_dir.join("state-tree")
dirname.ensure(dir=True)
return dirname
@pytest.fixture(scope="session")
def pillar_tree_root_dir(integration_files_dir):
"""
Fixture which returns the salt pillar tree root directory path.
Creates the directory if it does not yet exist.
"""
dirname = integration_files_dir.join("pillar-tree")
dirname.ensure(dir=True)
return dirname
@pytest.fixture(scope="session")
def base_env_state_tree_root_dir(state_tree_root_dir):
"""
Fixture which returns the salt base environment state tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = state_tree_root_dir.join("base")
dirname.ensure(dir=True)
RUNTIME_VARS.TMP_STATE_TREE = dirname.realpath().strpath
RUNTIME_VARS.TMP_BASEENV_STATE_TREE = RUNTIME_VARS.TMP_STATE_TREE
return dirname
@pytest.fixture(scope="session")
def prod_env_state_tree_root_dir(state_tree_root_dir):
"""
Fixture which returns the salt prod environment state tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = state_tree_root_dir.join("prod")
dirname.ensure(dir=True)
RUNTIME_VARS.TMP_PRODENV_STATE_TREE = dirname.realpath().strpath
return dirname
@pytest.fixture(scope="session")
def base_env_pillar_tree_root_dir(pillar_tree_root_dir):
"""
Fixture which returns the salt base environment pillar tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = pillar_tree_root_dir.join("base")
dirname.ensure(dir=True)
RUNTIME_VARS.TMP_PILLAR_TREE = dirname.realpath().strpath
RUNTIME_VARS.TMP_BASEENV_PILLAR_TREE = RUNTIME_VARS.TMP_PILLAR_TREE
return dirname
@pytest.fixture(scope="session")
def prod_env_pillar_tree_root_dir(pillar_tree_root_dir):
"""
Fixture which returns the salt prod environment pillar tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = pillar_tree_root_dir.join("prod")
dirname.ensure(dir=True)
RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE = dirname.realpath().strpath
return dirname
@pytest.fixture(scope="session")
def salt_syndic_master_config(request, salt_factories):
root_dir = salt_factories._get_root_dir_for_daemon("syndic_master")
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "syndic_master")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
tests_known_hosts_file = root_dir.join("salt_ssh_known_hosts").strpath
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
config_defaults["root_dir"] = root_dir.strpath
config_defaults["known_hosts_file"] = tests_known_hosts_file
config_defaults["syndic_master"] = "localhost"
config_defaults["transport"] = request.config.getoption("--transport")
config_overrides = {}
ext_pillar = []
if salt.utils.platform.is_windows():
ext_pillar.append(
{"cmd_yaml": "type {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
else:
ext_pillar.append(
{"cmd_yaml": "cat {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
extension_modules_path = root_dir.join("extension_modules").strpath
if not os.path.exists(extension_modules_path):
shutil.copytree(
os.path.join(RUNTIME_VARS.FILES, "extension_modules"),
extension_modules_path,
)
# Copy the autosign_file to the new master root_dir
autosign_file_path = root_dir.join("autosign_file").strpath
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, "autosign_file"), autosign_file_path
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
os.chmod(autosign_file_path, autosign_file_permissions)
config_overrides.update(
{
"ext_pillar": ext_pillar,
"extension_modules": extension_modules_path,
"file_roots": {
"base": [
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE],
},
}
)
return salt_factories.configure_master(
request,
"syndic_master",
order_masters=True,
config_defaults=config_defaults,
config_overrides=config_overrides,
)
@pytest.fixture(scope="session")
def salt_syndic_config(request, salt_factories, salt_syndic_master_config):
return salt_factories.configure_syndic(
request, "syndic", master_of_masters_id="syndic_master"
)
@pytest.fixture(scope="session")
def salt_master_config(request, salt_factories, salt_syndic_master_config):
root_dir = salt_factories._get_root_dir_for_daemon("master")
conf_dir = root_dir.join("conf").ensure(dir=True)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "master")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
tests_known_hosts_file = root_dir.join("salt_ssh_known_hosts").strpath
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
config_defaults["root_dir"] = root_dir.strpath
config_defaults["known_hosts_file"] = tests_known_hosts_file
config_defaults["syndic_master"] = "localhost"
config_defaults["transport"] = request.config.getoption("--transport")
config_defaults["reactor"] = [
{"salt/test/reactor": [os.path.join(RUNTIME_VARS.FILES, "reactor-test.sls")]}
]
config_overrides = {"interface": "0.0.0.0"}
ext_pillar = []
if salt.utils.platform.is_windows():
ext_pillar.append(
{"cmd_yaml": "type {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
else:
ext_pillar.append(
{"cmd_yaml": "cat {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
ext_pillar.append(
{
"file_tree": {
"root_dir": os.path.join(RUNTIME_VARS.PILLAR_DIR, "base", "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
}
)
config_overrides["pillar_opts"] = True
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
extension_modules_path = root_dir.join("extension_modules").strpath
if not os.path.exists(extension_modules_path):
shutil.copytree(
os.path.join(RUNTIME_VARS.FILES, "extension_modules"),
extension_modules_path,
)
# Copy the autosign_file to the new master root_dir
autosign_file_path = root_dir.join("autosign_file").strpath
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, "autosign_file"), autosign_file_path
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
os.chmod(autosign_file_path, autosign_file_permissions)
config_overrides.update(
{
"ext_pillar": ext_pillar,
"extension_modules": extension_modules_path,
"file_roots": {
"base": [
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE],
},
}
)
# Let's copy over the test cloud config files and directories into the running master config directory
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if not entry.startswith("cloud"):
continue
source = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
dest = conf_dir.join(entry).strpath
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copyfile(source, dest)
return salt_factories.configure_master(
request,
"master",
master_of_masters_id="syndic_master",
config_defaults=config_defaults,
config_overrides=config_overrides,
)
@pytest.fixture(scope="session")
def salt_minion_config(request, salt_factories, salt_master_config):
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "minion")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = request.config.getoption("--transport")
config_overrides = {
"file_roots": {
"base": [
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE],
},
}
virtualenv_binary = _get_virtualenv_binary_path()
if virtualenv_binary:
config_overrides["venv_bin"] = virtualenv_binary
return salt_factories.configure_minion(
request,
"minion",
master_id="master",
config_defaults=config_defaults,
config_overrides=config_overrides,
)
@pytest.fixture(scope="session")
def salt_sub_minion_config(request, salt_factories, salt_master_config):
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "sub_minion")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = request.config.getoption("--transport")
config_overrides = {
"file_roots": {
"base": [
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE],
},
}
virtualenv_binary = _get_virtualenv_binary_path()
if virtualenv_binary:
config_overrides["venv_bin"] = virtualenv_binary
return salt_factories.configure_minion(
request,
"sub_minion",
master_id="master",
config_defaults=config_defaults,
config_overrides=config_overrides,
)
@pytest.hookspec(firstresult=True)
def pytest_saltfactories_syndic_configuration_defaults(
request, factories_manager, root_dir, syndic_id, syndic_master_port
):
"""
Hook which should return a dictionary tailored for the provided syndic_id with 3 keys:
* `master`: The default config for the master running along with the syndic
* `minion`: The default config for the master running along with the syndic
* `syndic`: The default config for the master running along with the syndic
Stops at the first non None result
"""
factory_opts = {"master": None, "minion": None, "syndic": None}
if syndic_id == "syndic":
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "syndic")
) as rfh:
opts = yaml.deserialize(rfh.read())
opts["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
opts["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
opts["transport"] = request.config.getoption("--transport")
factory_opts["syndic"] = opts
return factory_opts
@pytest.hookspec(firstresult=True)
def pytest_saltfactories_syndic_configuration_overrides(
request, factories_manager, syndic_id, config_defaults
):
"""
Hook which should return a dictionary tailored for the provided syndic_id.
This dictionary will override the default_options dictionary.
The returned dictionary should contain 3 keys:
* `master`: The config overrides for the master running along with the syndic
* `minion`: The config overrides for the master running along with the syndic
* `syndic`: The config overridess for the master running along with the syndic
The `default_options` parameter be None or have 3 keys, `master`, `minion`, `syndic`,
while will contain the default options for each of the daemons.
Stops at the first non None result
"""
@pytest.fixture(scope="session", autouse=True)
def bridge_pytest_and_runtests(
reap_stray_processes,
base_env_state_tree_root_dir,
prod_env_state_tree_root_dir,
base_env_pillar_tree_root_dir,
prod_env_pillar_tree_root_dir,
salt_factories,
salt_syndic_master_config,
salt_syndic_config,
salt_master_config,
salt_minion_config,
salt_sub_minion_config,
):
# Make sure unittest2 uses the pytest generated configuration
RUNTIME_VARS.RUNTIME_CONFIGS["master"] = freeze(salt_master_config)
RUNTIME_VARS.RUNTIME_CONFIGS["minion"] = freeze(salt_minion_config)
RUNTIME_VARS.RUNTIME_CONFIGS["sub_minion"] = freeze(salt_sub_minion_config)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic_master"] = freeze(salt_syndic_master_config)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic"] = freeze(salt_syndic_config)
RUNTIME_VARS.RUNTIME_CONFIGS["client_config"] = freeze(
salt.config.client_config(salt_master_config["conf_file"])
)
# Make sure unittest2 classes know their paths
RUNTIME_VARS.TMP_ROOT_DIR = salt_factories.root_dir.realpath().strpath
RUNTIME_VARS.TMP_CONF_DIR = os.path.dirname(salt_master_config["conf_file"])
RUNTIME_VARS.TMP_MINION_CONF_DIR = os.path.dirname(salt_minion_config["conf_file"])
RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR = os.path.dirname(
salt_sub_minion_config["conf_file"]
)
RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR = os.path.dirname(
salt_syndic_master_config["conf_file"]
)
RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR = os.path.dirname(
salt_syndic_config["conf_file"]
)
# Only allow star importing the functions defined in this module
__all__ = [
name
for (name, func) in locals().items()
if getattr(func, "__module__", None) == __name__
]
|
nilq/baby-python
|
python
|
import torch
import numpy as np
def colormap(N=256):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'uint8'
cmap = []
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap.append((r, g, b))
return cmap
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def labelcolormap(N):
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r = 0
g = 0
b = 0
id = i
for j in range(7):
str_id = uint82bin(id)
r = r ^ (np.uint8(str_id[-1]) << (7-j))
g = g ^ (np.uint8(str_id[-2]) << (7-j))
b = b ^ (np.uint8(str_id[-3]) << (7-j))
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
return cmap
class Colorize(object):
def __init__(self, n=22):
self.cmap = labelcolormap(22)
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
|
nilq/baby-python
|
python
|
"""
Crie um programa que leia duas notas de um aluno e calcule sua média,
mostrando uma mensagem no final, de acordo com a média atingida:
— Média abaixo de 5.0: REPROVADO
— Media entre 5.0 e 6.9: RECUPERAÇÃO
— Média 7.0 ou superior: APROVADO
"""
nt1 = float(input('Digite a nota da primeira avaliação: '))
nt2 = float(input('Digite a nota da segunda avaliação: '))
media = (nt1 + nt2) / 2
print('A média do aluno é \033[32m{:.2f}\033[m'.format(media))
if media < 5:
print('Está \033[31mREPROVADO!\033[m')
elif 5 <= media < 7:
print('Está de \033[33mRECUPERAÇÃO\033[m')
else:
print('Está \033[34mAPROVADO\033[m')
|
nilq/baby-python
|
python
|
STARS = {"Alpheratz": {'sidereal': '357d41.7', 'declination': '29d10.9'},
"Ankaa": {'sidereal': '353d14.1', 'declination': '-42d13.4'},
"Schedar": {'sidereal': '349d38.4', 'declination': '56d37.7'},
"Diphda": {'sidereal': '348d54.1', 'declination': '-17d54.1'},
"Achernar": {'sidereal': '335d25.5', 'declination': '-57d09.7'},
"Hamal": {'sidereal': '327d58.7', 'declination': '23d32.3'},
"Polaris": {'sidereal': '316d41.3', 'declination': '89d20.1'},
"Akamar": {'sidereal': '315d16.8', 'declination': '-40d14.8'},
"Menkar": {'sidereal': '314d13.0', 'declination': '4d09.0'},
"Mirfak": {'sidereal': '308d37.4', 'declination': '49d55.1'},
"Aldebaran": {'sidereal': '290d47.1', 'declination': '16d32.3'},
"Rigel": {'sidereal': '281d10.1', 'declination': '-8d11.3'},
"Capella": {'sidereal': '280d31.4', 'declination': '46d00.7'},
"Bellatrix": {'sidereal': '278d29.8', 'declination': '6d21.6'},
"Elnath": {'sidereal': '278d10.1', 'declination': '28d37.1'},
"Alnilam": {'sidereal': '275d44.3', 'declination': '-1d11.8'},
"Betelgeuse": {'sidereal': '270d59.1', 'declination': '7d24.3'},
"Canopus": {'sidereal': '263d54.8', 'declination': '-52d42.5'},
"Sirius": {'sidereal': '258d31.7', 'declination': '-16d44.3'},
"Adara": {'sidereal': '255d10.8', 'declination': '-28d59.9'},
"Procyon": {'sidereal': '244d57.5', 'declination': '5d10.9'},
"Pollux": {'sidereal': '243d25.2', 'declination': '27d59.0'},
"Avior": {'sidereal': '234d16.6', 'declination': '-59d33.7'},
"Suhail": {'sidereal': '222d50.7', 'declination': '-43d29.8'},
"Miaplacidus": {'sidereal': '221d38.4', 'declination': '-69d46.9'},
"Alphard": {'sidereal': '217d54.1', 'declination': '-8d43.8'},
"Regulus": {'sidereal': '207d41.4', 'declination': ''},
"Dubhe": {'sidereal': '193d49.4', 'declination': '61d39.5'},
"Denebola": {'sidereal': '182d31.8', 'declination': '14d28.9'},
"Gienah": {'sidereal': '175d50.4', 'declination': '-17d37.7'},
"Acrux": {'sidereal': '173d07.2', 'declination': '-63d10.9'},
"Gacrux": {'sidereal': '171d58.8', 'declination': '-57d11.9'},
"Alioth": {'sidereal': '166d19.4', 'declination': '55d52.1'},
"Spica": {'sidereal': '158d29.5', 'declination': '-11d14.5'},
"Alcaid": {'sidereal': '152d57.8', 'declination': '49d13.8'},
"Hadar": {'sidereal': '148d45.5', 'declination': '-60d26.6'},
"Menkent": {'sidereal': '148d05.6', 'declination': '-36d26.6'},
"Arcturus": {'sidereal': '145d54.2', 'declination': '19d06.2'},
"Rigil Kent.": {'sidereal': '139d49.6', 'declination': '-60d53.6'},
"Zubenelg.": {'sidereal': '137d03.7', 'declination': '-16d06.3'},
"Kochab": {'sidereal': '137d21.0', 'declination': '74d05.2'},
"Alphecca": {'sidereal': '126d09.9', 'declination': '26d39.7'},
"Antares": {'sidereal': '112d24.4', 'declination': '-26d27.8'},
"Atria": {'sidereal': '107d25.2', 'declination': '-69d03.0'},
"Sabik": {'sidereal': '102d10.9', 'declination': '-15d44.4'},
"Shaula": {'sidereal': '96d20.0', 'declination': '-37d06.6'},
"Rasalhague": {'sidereal': '96d05.2', 'declination': '12d33.1'},
"Etamin": {'sidereal': '90d45.9', 'declination': '51d29.3'},
"Kaus Aust.": {'sidereal': '83d41.9', 'declination': '-34d22.4'},
"Vega": {'sidereal': '80d38.2', 'declination': '38d48.1'},
"Nunki": {'sidereal': '75d56.6', 'declination': '-26d16.4'},
"Altair": {'sidereal': '62d06.9', 'declination': '8d54.8'},
"Peacock": {'sidereal': '53d17.2', 'declination': '-56d41.0'},
"Deneb": {'sidereal': '49d30.7', 'declination': '45d20.5'},
"Enif": {'sidereal': '33d45.7', 'declination': '9d57.0'},
"Alnair": {'sidereal': '27d42.0', 'declination': '-46d53.1'},
"Fomalhaut": {'sidereal': '15d22.4', 'declination': '-29d32.3'},
"Scheat": {'sidereal': '13d51.8', 'declination': '28d10.3'},
"Markab": {'sidereal': '13d36.7', 'declination': '15d17.6'},
}
|
nilq/baby-python
|
python
|
import UnitTest
class WithTest(UnitTest.UnitTest):
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
def testSimple(self):
with self.Dummy():
pass
with self.Dummy() as v:
pass
d = self.Dummy()
with d:
pass
self.assertTrue(d.enter_called)
self.assertTrue(d.exit_called)
z = None
with self.Dummy(10) as v:
z = v
self.assertEqual(z, 10)
self.fail("Bug #XXX - With statement fails for unknown reason")
return
d = self.Dummy(gobble=True) # Fails for unknown reason
with d:
raise Exception()
self.assertEqual(type(d.exc_info[1]), Exception)
def testNested(self):
l = None
with self.Dummy(1) as v1, self.Dummy(2) as v2, self.Dummy(3) as v3:
l = [v1, v2, v3]
self.assertEqual(l, [1,2,3])
l = None
with self.Dummy(1) as v1:
l = []
l.append(v1)
with self.Dummy(2) as v2:
l.append(v2)
with self.Dummy(3) as v3:
l.append(v3)
self.assertEqual(l, [1,2,3])
def testComplexAssign(self):
d = {1: [0, 1, 2]}
with self.Dummy('z') as d[1]:
self.assertEqual(d, {1:'z'})
d = {1: [0, 1, 2]}
with self.Dummy('z') as d[1][0]:
self.assertEqual(d[1][0], 'z')
self.assertEqual(d, {1: ['z', 1, 2]})
d = {1: [0, 1, 2]}
with self.Dummy('z') as d.values()[0][1]:
self.assertEqual(d, {1: [0, 'z', 2]})
d = {1: [0, 1, 2]}
with self.Dummy(['a', 'b', 'c']) as (d[1][0], d[1][1], d[1][2]):
self.assertEqual(d, {1: ['a', 'b', 'c']})
d = {1: [0, 1, 2]}
with self.Dummy(['a', 'b', 'c']) as (d[1], d[2], d[3]):
self.assertEqual(d, {1:'a', 2:'b', 3:'c'})
def testFlowControl(self):
# Hard to make work correctly!
# Should walk ast and track them
"""
def return_stmt():
for i in range(10):
with self.Dummy():
if i == 2:
return i
self.assertEqual(return_stmt(), 2)
def break_stmt():
x = 0
for i in range(10):
with self.Dummy():
x = i
if i == 2:
break
return x
self.assertEqual(break_stmt(), 2)
def continue_stmt():
x = 0
for i in range(10):
x += 1
with self.Dummy():
continue
x += 100
return x
self.assertEqual(continue_stmt(), 10)
"""
|
nilq/baby-python
|
python
|
import sys
import os
import select
import socket
import errno
import logging
try:
BrokenPipeError
except NameError:
BrokenPipeError = None
def ignore_broken_pipe(fn, *args):
try:
return fn(*args)
except OSError as e:
if e.errno == errno.EPIPE:
return None
raise
except BrokenPipeError:
return None
class StdSocket:
"""
Fake socket to read from stdin and write to stdout
conforming to the interface specified at
http://docs.paramiko.org/en/1.15/api/transport.html
"""
timeout = None
def settimeout(self, timeout):
self.timeout = timeout
def send(self, string):
if sys.stdout.closed:
return 0
return os.write(sys.stdout.fileno(), string)
def recv(self, count):
if sys.stdin.closed:
return b''
r, w, x = select.select([sys.stdin], [], [], self.timeout)
if sys.stdin in r:
return os.read(sys.stdin.fileno(), count)
raise socket.timeout()
def close(self):
sys.stdin.close()
sys.stdout.close()
class Stream:
STDOUT = 0
STDERR = 1
def pipe(self, key, stream, other, size):
output = (self.ready(key, stream) and self.read(key, size))
if output:
other.write(key, output)
return output
class ProcessStream(Stream):
def __init__(self, process):
self.stdin = process.stdin
self.stdout = process.stdout
self.stderr = process.stderr
self.streams = [self.stdout, self.stderr]
def read(self, key, n):
return os.read(self.streams[key].fileno(), n)
def write(self, key, buf):
return ignore_broken_pipe(os.write, self.stdin.fileno(), buf)
def ready(self, key, stream):
return stream is self.streams[key]
class ChannelStream(Stream):
def __init__(self, channel):
self.channel = channel
self.streams = [channel]
self.func_map = [
[self.channel.recv, self.channel.sendall, self.channel.recv_ready],
[self.channel.recv_stderr, self.channel.sendall_stderr, self.channel.recv_stderr_ready],
]
def read(self, key, n):
return self.func_map[key][0](n)
def write(self, key, buf):
return self.func_map[key][1](buf)
def ready(self, key, stream):
return self.func_map[key][2]()
def pipe_streams(input, output, size=1024):
done = False
while not done:
r, w, x = select.select(input.streams + output.streams, [], [])
for stream in r:
if stream in output.streams:
stdout = output.pipe(Stream.STDOUT, stream, input, size)
stderr = output.pipe(Stream.STDERR, stream, input, size)
if not (stdout or stderr):
logging.debug('Output streams closed')
done = True
if stream in input.streams:
stdin = input.pipe(Stream.STDOUT, stream, output, size)
if not stdin:
logging.debug('Input streams closed')
done = True
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from sentry.api.base import Endpoint
from sentry.api.permissions import assert_perm
from sentry.models import Group, GroupBookmark
from rest_framework.response import Response
class GroupBookmarkEndpoint(Endpoint):
def post(self, request, group_id):
group = Group.objects.get(
id=group_id,
)
assert_perm(group, request.user, request.auth)
bookmark = GroupBookmark(
project=group.project,
group=group,
user=request.user,
)
bookmark.save()
return Response()
|
nilq/baby-python
|
python
|
from collections import deque
water_reserve = int(input())
names = deque()
while True:
name = input()
if name == "Start":
while True:
input_row = input()
if input_row.startswith("refill"):
# add litters to water_reserve
water_reserve += int(input_row.split(" ")[1])
elif input_row == "End":
break
else:
asked_liters = int(input_row)
# check for availability
if asked_liters <= water_reserve:
water_reserve -= asked_liters
print(f"{names.popleft()} got water")
else:
print(f"{names.popleft()} must wait")
# print how much liters of water left
print(f"{water_reserve} liters left")
break
else:
names.append(name)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#==============================================================================
# python3_test.py
#------------------------------------------------------------------------------
# description :This is a basic python script example with a file header
# author :l-althueser
#
# usage :python python3_test.py
# python_version :3.5.1
#
# changes/notes :20160425 :Added file header.
# :20160426 :Added ability to print "Hello World!"
#==============================================================================
# The following line will be printed
print("Hello World.")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from nose.tools import assert_equal, assert_true, assert_almost_equal, nottest, assert_false
from os.path import isdir,isfile
from os import listdir
import os
import sys
import subprocess
import pandas as p
file_path = os.path.realpath(__file__)
test_dir_path = os.path.dirname(file_path)
tmp_dir_path = test_dir_path + '/nose_tmp_output'
tmp_basename_dir = tmp_dir_path + '/1'
tmp_basename_dir2 = tmp_dir_path + '/2'
tmp_basename_file = tmp_dir_path + '/file'
CWD = os.getcwd()
class TestCMD(object):
def setUp(self):
"""Create temporary dir if necessary,
otherwise clear contents of it"""
if not isdir(tmp_dir_path):
os.mkdir(tmp_dir_path)
self.tearDown()
os.mkdir(tmp_basename_dir)
os.chdir(test_dir_path)
def tearDown(self):
"""remove temporary output files"""
for d in os.listdir(tmp_dir_path):
d_path = os.path.join(tmp_dir_path,d)
try:
os.remove(d_path)
except:
for f in os.listdir(d_path):
f_path = os.path.join(d_path,f)
os.remove(f_path)
os.rmdir(d_path)
assert os.listdir(tmp_dir_path) == []
def run_command(self,cov_file='coverage',comp_file='composition.fa',
tags=[],basename='nose_tmp_output/1'):
call_string = "concoct --coverage_file test_data/{0} --composition_file test_data/{1} --basename {2} -c 10 --no_total_coverage 2> /dev/null".format(cov_file,comp_file,basename)
for tag in tags:
call_string += " " + tag
self.c = 0 # Exit code
try:
self.op = subprocess.check_output(
call_string,
shell=True)
except subprocess.CalledProcessError as exc:
self.c = exc.returncode
def file_len(self,fh):
i=0
with open(fh) as f:
for i, l in enumerate(f):
pass
return i + 1
def md5sum(self,fh):
infile = open("filename", 'rb')
content = infile.read()
infile.close()
m = hashlib.md5()
m.update(content)
return m.hexdigest()
def test_no_errors(self):
self.run_command()
assert_equal(self.c,0,
msg = "Command exited with nonzero status")
def test_directory_creation(self):
self.run_command()
assert_true(isdir(tmp_basename_dir),
msg = "Temporary directory not created")
m_time_first = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
# Rerun the concoct and see that the directory is overwritten
self.run_command()
m_time_second = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
assert_true(m_time_first != m_time_second,
msg = "basename dir is not overwritten")
L = listdir(tmp_dir_path)
assert_true(len(L) == 1,
msg = "Multiple output directories or files was created")
# File creation
self.run_command(basename=tmp_basename_file)
assert_true(isfile(tmp_basename_file+'_clustering_gt1000.csv'),
msg = "Clustering file is not created, when file is used as basename")
L = listdir(tmp_basename_dir)
assert_true(len(L) == 6,
msg = "Wrong number of output files, observed {0}".format(L))
def test_prior_to_clustering(self):
self.run_command()
d_p = os.path.join(tmp_basename_dir)
assert_true(isfile(d_p+ '/args.txt'),
msg="Args file is not created")
assert_true(isfile(d_p+ '/log.txt'),
msg="Log file is not created")
assert_true(isfile(d_p+ '/original_data_gt1000.csv'),
msg="Original data file is not created")
assert_true(isfile(d_p+ '/PCA_transformed_data_gt1000.csv'),
msg="PCA transformed data file is not created")
def test_output_files_creation(self):
# dir as basename
self.run_command()
d_p = os.path.join(tmp_basename_dir)
assert_true(
isfile(d_p+ '/clustering_gt1000.csv'),
msg='Large contigs clustering file is not created'
)
assert_true(
isfile(d_p+ '/PCA_transformed_data_gt1000.csv'),
msg='PCA file is not created'
)
assert_true(
isfile(d_p+ '/original_data_gt1000.csv'),
msg='Original data file is not created'
)
assert_true(
isfile(d_p+ '/log.txt'),
msg='Log file is not created'
)
# dir as file
self.run_command(basename=tmp_basename_file)
d_p = tmp_basename_file +'_'
assert_true(
isfile(d_p+ 'clustering_gt1000.csv'),
msg='Large contigs clustering file is not created'
)
assert_true(
isfile(d_p+ 'PCA_transformed_data_gt1000.csv'),
msg='PCA file is not created'
)
assert_true(
isfile(d_p+ 'original_data_gt1000.csv'),
msg='Original data file is not created'
)
assert_true(
isfile(d_p+ 'log.txt'),
msg='Log file is not created'
)
def test_threshold_functionality(self):
self.run_command()
d_p = tmp_basename_dir
od_1 = d_p+'/original_data_gt1000.csv'
clust_gt_1 = d_p+'/clustering_gt1000.csv'
odl_1 = self.file_len(od_1)
clust_gtl_1= self.file_len(clust_gt_1)
self.run_command(comp_file='composition_some_shortened.fa',
basename=tmp_basename_dir2+'/')
d_p2 = tmp_basename_dir2
od_2 = d_p2+'/original_data_gt1000.csv'
clust_gt_2 = d_p2+'/clustering_gt1000.csv'
odl_2 = self.file_len(od_2)
clust_gtl_2= self.file_len(clust_gt_2)
assert_true(odl_1!=odl_2,
msg='Original data have the same lengths')
assert_true(clust_gtl_1!=clust_gtl_2,
msg='Filtered clustering files have the same lengths')
def test_logging(self):
self.run_command()
with open(tmp_basename_dir+'/log.txt','r') as log:
log_content = log.read()
assert_true(len(log_content)>10,
"Log content is too small")
pca_report = [row for row in log_content.split('\n') if 'Performed PCA, resulted in ' in row][0]
pca_dimensions_log = int(pca_report.split()[-2])
with open(tmp_basename_dir+'/PCA_transformed_data_gt1000.csv', 'r') as pca_comps:
header = pca_comps.readlines()[0]
header = header.strip()
last_dim = int(header.split(',')[-1])
pca_dimensions = last_dim + 1
assert_equal(pca_dimensions, pca_dimensions_log)
def test_seed(self):
#Test default behaviour, seed = 11
self.run_command()
first_time = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
with open(tmp_basename_dir+'/clustering_gt1000.csv','r') as clustering:
first_file=clustering.read()
self.run_command()
second_time = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
with open(tmp_basename_dir+'/clustering_gt1000.csv','r') as clustering:
second_file=clustering.read()
assert_true(not (first_time==second_time),
msg='clustering_gt1000.csv did not change')
assert_true(first_file == second_file,
msg='Clustering outcomes were not the same with same seeds')
#Should be equal to both above since default seed is 1
self.run_command(tags=["--seed","1"])
first_time = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
with open(tmp_basename_dir+'/clustering_gt1000.csv','r') as clustering:
first_file=clustering.read()
assert_true(not (first_time==second_time),
msg='clustering_gt1000.csv did not change')
assert_true(first_file == second_file,
msg='Clustering outcomes were not the same with same seeds')
#Test that 0 gives different seed
self.run_command(tags=['--seed','0'])
first_time = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
with open(tmp_basename_dir+'/clustering_gt1000.csv','r') as clustering:
first_file=clustering.read()
#Should give different clustering
self.run_command(tags=['--seed','0'])
second_time = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
with open(tmp_basename_dir+'/clustering_gt1000.csv','r') as clustering:
second_file=clustering.read()
assert_true(not (first_time==second_time),
msg='clustering_gt1000.csv did not change')
assert_true(not (first_file == second_file),
msg='Clustering outcomes were the same with random seeds')
#Test that two differnet seeds give different clustering
#Should give clustering 2
self.run_command(tags=['--seed','2'])
first_time = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
with open(tmp_basename_dir+'/clustering_gt1000.csv','r') as clustering:
first_file=clustering.read()
#Should give clustering 3
self.run_command(tags=['--seed','3'])
second_time = os.path.getmtime(tmp_basename_dir+'/clustering_gt1000.csv')
with open(tmp_basename_dir+'/clustering_gt1000.csv','r') as clustering:
second_file=clustering.read()
assert_true(not (first_time==second_time),
msg='clustering_gt1000.csv did not change')
assert_true(not (first_file == second_file),
msg='Clustering outcomes were the same with different seeds')
def test_log_coverage(self):
self.run_command()
original_coverage_data_path = os.path.join(tmp_basename_dir,'original_data_gt1000.csv')
df = p.io.parsers.read_csv(original_coverage_data_path,index_col=0,sep=',')
true_pseudo_cov = -1.3143
calc_pseudo_cov = df.sample_1[0]
assert_almost_equal(true_pseudo_cov,calc_pseudo_cov,places=4)
def test_log_coverage_no_cov_normalization(self):
self.run_command(tags=["--no_cov_normalization"])
original_coverage_data_path = os.path.join(tmp_basename_dir,'original_data_gt1000.csv')
df = p.io.parsers.read_csv(original_coverage_data_path,index_col=0,sep=',')
true_pseudo_cov = -1.8107
calc_pseudo_cov = df.sample_1[0]
assert_almost_equal(true_pseudo_cov,calc_pseudo_cov,places=4)
def test_big_file_validation(self):
""" Run Validate.pl on the result files after running a larger input
file and make sure the statistics are good enough. """
self.run_command(cov_file='large_contigs/coverage_table.tsv',
comp_file='large_contigs/contigs.fa',
basename=os.path.join(tmp_dir_path, 'large_contigs/'))
validate_path = os.path.join(test_dir_path, '..', 'scripts', 'Validate.pl')
clustering_reference = os.path.join(test_dir_path, 'test_data', 'large_contigs',
'clustering_gt1000_taxassign.csv')
clustering_file = os.path.join(tmp_dir_path,'large_contigs',
'clustering_gt1000.csv')
assert_true(isfile(validate_path))
assert_true(isfile(clustering_reference))
assert_true(isfile(clustering_file))
validate_so = subprocess.check_output(['perl', validate_path,
'--sfile={}'.format(clustering_reference),
'--cfile={}'.format(clustering_file) ])
print("Results for large clustering file: ")
print(validate_so)
headers = validate_so.split(b'\n')[0].split(b'\t')
stats = validate_so.split(b'\n')[1].split(b'\t')
stats_dict = dict(list(zip(headers, stats)))
assert_true(float(stats_dict[b'AdjRand']) > 0.85,
msg=("Insufficient adjusted rand index "
"reached, requires > 0.85"))
assert_true(float(stats_dict[b'Prec.']) > 0.95,
msg=("Insufficient precision reached, "
"requires > /0.95"))
assert_true(float(stats_dict[b'Rec.']) > 0.90,
msg=("Insufficient recall reached, "
"requires > 0.90"))
conf_file = os.path.join(test_dir_path, 'Conf.csv')
if isfile(conf_file):
os.remove(conf_file)
def test_one_contig_threshold(self):
"""Make sure we don't execute clustering of 0 or 1 contig"""
# Make sure the error code is not set before running command
assert_false(hasattr(self,"c"))
# Longest contig is 33356 so we put the threshold just below
self.run_command(tags=["--length_threshold 33350"])
# The command should have failed with code 255
assert_true(hasattr(self,"c"))
assert_equal(self.c,255)
|
nilq/baby-python
|
python
|
import os
import Threshold
import UsersBuilding
import Cluster
import configparser
import json
from collections import defaultdict
def get_project_path(file_name="README.md", actual_path=None):
"""
:param file_name: name of a file in the top level of the project
:param actual_path: actual path, if not specified its calculated
:return: global path of the project
"""
if not actual_path:
actual_path = os.path.dirname(os.path.abspath(file_name))
if os.path.isfile(actual_path+"/"+file_name):
return actual_path
else:
return get_project_path(file_name, os.path.abspath(os.path.join(actual_path, os.pardir)))
def init(paths_config="paths", exec_config="exec"):
"""
:param paths_config: name of paths config file
:param exec_config: name of exec config file
:return: none
"""
global actual_day, project_path, config_paths, config_exec, save_plots, save_jsons, save_csvs
# string to know the actual day through all files
actual_day = ""
project_path = get_project_path()+"/"
# Read the config file
config_paths = configparser.ConfigParser()
config_paths.read(project_path+'src/movements_characterization/configs/'+paths_config+'.ini')
config_exec = configparser.ConfigParser()
config_exec.read(project_path+'src/movements_characterization/configs/'+exec_config+'.ini')
save_jsons = config_exec.getboolean('aux_files','json_files')
save_plots = config_exec.getboolean('aux_files','plots')
save_csvs = config_exec.getboolean('aux_files','csvs')
def new_global(name, value):
globals()[name] = value
def get_zone_index(name):
return zones_names.index(name)
def get_data_from_json_or_calc(wanted_data, call_param = None):
dir_route = get_route_according_validation('final_data')
day = actual_day
file_route = dir_route+day+".json"
def calcValue():
if wanted_data=="Threshold": return Threshold.get_optimal_threshold(call_param)
elif wanted_data=="n_clusters_distortion" or wanted_data=="n_clusters_inertia": return Cluster.get_optimal_clusters(call_param)
elif wanted_data=="UsrCreationTime": return UsersBuilding.calc_usr_creation_time(call_param)
if os.path.isfile(file_route):
# Opening JSON file
f = open(file_route,)
# returns JSON object as a dictionary
dict_data = json.load(f)
try:
value = dict_data[day][wanted_data]
print(f"{wanted_data} found in memory, using it.")
f.close()
return value
except KeyError:
print(f"{wanted_data} not in memory, calculating...")
dict_data = defaultdict(dict, dict_data)
value = calcValue()
dict_data[day][wanted_data] = value
save_to_json(dict_data, file_route)
f.close()
return value
else:
print("File with different processed data dont found, creating...")
dict_data = defaultdict(dict)
print(f"{wanted_data} not in memory, calculating...")
value = calcValue()
dict_data[day][wanted_data] = value
create_dir_if_not_exists(dir_route)
save_to_json(dict_data, file_route)
return value
def add_data_to_json_data(data, day, param):
file_route = get_route_according_validation('final_data')+actual_day+".json"
# Opening JSON file
f = open(file_route,)
# returns JSON object as a dictionary
dict_data = json.load(f)
dict_data = defaultdict(dict, dict_data)
dict_data[day][param] = data
save_to_json(dict_data, file_route)
f.close()
def save_to_json(data, route):
with open(route, "w") as fp:
json.dump(data, fp, indent=3)
def read_json_file(path):
with open(path) as json_file:
aux = json.load(json_file)
return aux
def create_dir_if_not_exists(dir):
if not os.path.isdir(dir):
f_dir = dir.split("/")
size = len(f_dir)
for sub_dir in f_dir:
if sub_dir == ".." or sub_dir == "":
size -= 1
if size > 1:
os.makedirs(dir)
else:
os.mkdir(dir)
def get_route_according_validation(element):
if 'validation' in globals():
if validation:
return project_path+config_paths['GeneralDirs']['validation']+"level"+str(zone_level)+"/"+config_paths['SharedDirs'][element]
# other cases
return project_path+config_paths['GeneralDirs']['model_creation']+"level"+str(zone_level)+"/"+config_paths['SharedDirs'][element]
def get_zone_name_from_dict(ap_name, zones_dict):
for zone, zone_vector in zones_dict.items():
if ap_name in zone_vector:
return zone
return "rm"
def check_if_study_zone(ap_name, zones_dict):
if ap_name in zones_dict[active_father_zone]:
return "yes"
return "rm"
|
nilq/baby-python
|
python
|
import os
import logging
import pytest
log = logging.getLogger(__name__)
from .testutils import check_serialize_parse
def _get_test_files_formats():
skiptests = []
for f in os.listdir("test/n3"):
if f not in skiptests:
fpath = "test/n3/" + f
if f.endswith(".rdf"):
yield fpath, "xml"
elif f.endswith(".n3"):
yield fpath, "n3"
def all_n3_files():
skiptests = [
"test/n3/example-lots_of_graphs.n3", # only n3 can serialize QuotedGraph, no point in testing roundtrip
]
for fpath, fmt in _get_test_files_formats():
if fpath in skiptests:
log.debug("Skipping %s, known issue" % fpath)
else:
yield fpath, fmt
@pytest.mark.parametrize(
"fpath,fmt",
_get_test_files_formats(),
)
def test_n3_writing(fpath, fmt):
check_serialize_parse(fpath, fmt, "n3")
|
nilq/baby-python
|
python
|
import math
import torch
from torch.autograd import Variable
from core.model_tools.deformations.exponential import Exponential
from core.models.abstract_statistical_model import AbstractStatisticalModel
from core.models.model_functions import create_regular_grid_of_points, compute_sobolev_gradient
from core.observations.deformable_objects.deformable_multi_object import DeformableMultiObject
from in_out.array_readers_and_writers import *
from in_out.dataset_functions import create_template_metadata, compute_noise_dimension
from support.probability_distributions.inverse_wishart_distribution import InverseWishartDistribution
from support.probability_distributions.multi_scalar_inverse_wishart_distribution import \
MultiScalarInverseWishartDistribution
from support.probability_distributions.normal_distribution import NormalDistribution
import logging
logger = logging.getLogger(__name__)
class BayesianAtlas(AbstractStatisticalModel):
"""
Bayesian atlas object class.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
def __init__(self):
AbstractStatisticalModel.__init__(self)
self.template = DeformableMultiObject()
self.objects_name = []
self.objects_name_extension = []
self.objects_noise_dimension = []
self.multi_object_attachment = None
self.exponential = Exponential()
self.use_sobolev_gradient = True
self.smoothing_kernel_width = None
self.initial_cp_spacing = None
self.number_of_objects = None
self.number_of_control_points = None
self.bounding_box = None
# Dictionary of numpy arrays.
self.fixed_effects['template_data'] = None
self.fixed_effects['control_points'] = None
self.fixed_effects['covariance_momenta_inverse'] = None
self.fixed_effects['noise_variance'] = None
# Dictionary of probability distributions.
self.priors['covariance_momenta'] = InverseWishartDistribution()
self.priors['noise_variance'] = MultiScalarInverseWishartDistribution()
# Dictionary of probability distributions.
self.individual_random_effects['momenta'] = NormalDistribution()
self.freeze_template = False
self.freeze_control_points = False
####################################################################################################################
### Encapsulation methods:
####################################################################################################################
# Template data ----------------------------------------------------------------------------------------------------
def get_template_data(self):
return self.fixed_effects['template_data']
def set_template_data(self, td):
self.fixed_effects['template_data'] = td
self.template.set_data(td)
# Control points ---------------------------------------------------------------------------------------------------
def get_control_points(self):
return self.fixed_effects['control_points']
def set_control_points(self, cp):
self.fixed_effects['control_points'] = cp
self.number_of_control_points = len(cp)
# Covariance momenta inverse ---------------------------------------------------------------------------------------
def get_covariance_momenta_inverse(self):
return self.fixed_effects['covariance_momenta_inverse']
def set_covariance_momenta_inverse(self, cmi):
self.fixed_effects['covariance_momenta_inverse'] = cmi
self.individual_random_effects['momenta'].set_covariance_inverse(cmi)
def set_covariance_momenta(self, cm):
self.set_covariance_momenta_inverse(np.linalg.inv(cm))
# Noise variance ---------------------------------------------------------------------------------------------------
def get_noise_variance(self):
return self.fixed_effects['noise_variance']
def set_noise_variance(self, nv):
self.fixed_effects['noise_variance'] = nv
# Full fixed effects -----------------------------------------------------------------------------------------------
def get_fixed_effects(self):
out = {}
if not self.freeze_template:
for key, value in self.fixed_effects['template_data'].items():
out[key] = value
if not self.freeze_control_points:
out['control_points'] = self.fixed_effects['control_points']
return out
def set_fixed_effects(self, fixed_effects):
if not self.freeze_template:
template_data = {key: fixed_effects[key] for key in self.fixed_effects['template_data'].keys()}
self.set_template_data(template_data)
if not self.freeze_control_points:
self.set_control_points(fixed_effects['control_points'])
####################################################################################################################
### Public methods:
####################################################################################################################
def update(self):
"""
Final initialization steps.
"""
self.number_of_objects = len(self.template.object_list)
self.bounding_box = self.template.bounding_box
self.set_template_data(self.template.get_data())
if self.fixed_effects['control_points'] is None:
self._initialize_control_points()
else:
self._initialize_bounding_box()
self._initialize_momenta()
self._initialize_noise_variance()
def compute_log_likelihood(self, dataset, population_RER, individual_RER, mode='complete', with_grad=False):
"""
Compute the log-likelihood of the dataset, given parameters fixed_effects and random effects realizations
population_RER and indRER.
Start by updating the class 1 fixed effects.
:param dataset: LongitudinalDataset instance
:param population_RER: Dictionary of population random effects realizations.
:param individual_RER: Dictionary of individual random effects realizations.
:param with_grad: Flag that indicates wether the gradient should be returned as well.
:return:
"""
# Initialize: conversion from numpy to torch -------------------------------------------------------------------
template_data, template_points, control_points = self._fixed_effects_to_torch_tensors(with_grad)
momenta = self._individual_RER_to_torch_tensors(individual_RER, with_grad and mode == 'complete')
# Deform, update, compute metrics ------------------------------------------------------------------------------
residuals = self._compute_residuals(dataset, template_data, template_points, control_points, momenta)
# Update the fixed effects only if the user asked for the complete log likelihood.
if mode == 'complete':
sufficient_statistics = self.compute_sufficient_statistics(dataset, population_RER, individual_RER,
residuals=residuals)
self.update_fixed_effects(dataset, sufficient_statistics)
# Compute the attachment, with the updated noise variance parameter in the 'complete' mode.
attachments = self._compute_individual_attachments(residuals)
attachment = torch.sum(attachments)
# Compute the regularity terms according to the mode.
regularity = 0.0
if mode == 'complete':
regularity = self._compute_random_effects_regularity(momenta)
regularity += self._compute_class1_priors_regularity()
if mode in ['complete', 'class2']:
regularity += self._compute_class2_priors_regularity(template_data, control_points)
# Compute gradient if needed -----------------------------------------------------------------------------------
if with_grad:
total = regularity + attachment
total.backward()
gradient = {}
gradient_numpy = {}
# Template data.
if not self.freeze_template:
if 'landmark_points' in template_data.keys():
gradient['landmark_points'] = template_points['landmark_points'].grad
if 'image_intensities' in template_data.keys():
gradient['image_intensities'] = template_data['image_intensities'].grad
# for key, value in template_data.items():
# if value.grad is not None:
# gradient[key] = value.grad
if self.use_sobolev_gradient and 'landmark_points' in gradient.keys():
gradient['landmark_points'] = compute_sobolev_gradient(
gradient['landmark_points'], self.smoothing_kernel_width, self.template)
# Control points.
if not self.freeze_control_points: gradient['control_points'] = control_points.grad
# Individual effects.
if mode == 'complete': gradient['momenta'] = momenta.grad
# Convert to numpy.
for (key, value) in gradient.items(): gradient_numpy[key] = value.data.cpu().numpy()
# Return as appropriate.
if mode in ['complete', 'class2']:
return attachment.detach().cpu().numpy(), regularity.detach().cpu().numpy(), gradient_numpy
elif mode == 'model':
return attachments.detach().cpu().numpy(), gradient_numpy
else:
if mode in ['complete', 'class2']:
return attachment.detach().cpu().numpy(), regularity.detach().cpu().numpy()
elif mode == 'model':
return attachments.detach().cpu().numpy()
def compute_sufficient_statistics(self, dataset, population_RER, individual_RER, residuals=None):
"""
Compute the model sufficient statistics.
"""
if residuals is None:
# Initialize: conversion from numpy to torch ---------------------------------------------------------------
# Template data.
template_data = self.fixed_effects['template_data']
template_data = Variable(torch.from_numpy(template_data).type(Settings().tensor_scalar_type),
requires_grad=False)
# Control points.
control_points = self.fixed_effects['control_points']
control_points = Variable(torch.from_numpy(control_points).type(Settings().tensor_scalar_type),
requires_grad=False)
# Momenta.
momenta = individual_RER['momenta']
momenta = Variable(torch.from_numpy(momenta).type(Settings().tensor_scalar_type), requires_grad=False)
# Compute residuals ----------------------------------------------------------------------------------------
residuals = [torch.sum(residuals_i)
for residuals_i in self._compute_residuals(dataset, template_data, control_points, momenta)]
# Compute sufficient statistics --------------------------------------------------------------------------------
sufficient_statistics = {}
# Empirical momenta covariance.
momenta = individual_RER['momenta']
sufficient_statistics['S1'] = np.zeros((momenta[0].size, momenta[0].size))
for i in range(dataset.number_of_subjects):
sufficient_statistics['S1'] += np.dot(momenta[i].reshape(-1, 1), momenta[i].reshape(-1, 1).transpose())
# Empirical residuals variances, for each object.
sufficient_statistics['S2'] = np.zeros((self.number_of_objects,))
for k in range(self.number_of_objects):
sufficient_statistics['S2'][k] = residuals[k].detach().cpu().numpy()
# Finalization -------------------------------------------------------------------------------------------------
return sufficient_statistics
def update_fixed_effects(self, dataset, sufficient_statistics):
"""
Updates the fixed effects based on the sufficient statistics, maximizing the likelihood.
"""
# Covariance of the momenta update.
prior_scale_matrix = self.priors['covariance_momenta'].scale_matrix
prior_dof = self.priors['covariance_momenta'].degrees_of_freedom
covariance_momenta = sufficient_statistics['S1'] + prior_dof * np.transpose(prior_scale_matrix) \
/ (dataset.number_of_subjects + prior_dof)
self.set_covariance_momenta(covariance_momenta)
# Variance of the residual noise update.
noise_variance = np.zeros((self.number_of_objects,))
prior_scale_scalars = self.priors['noise_variance'].scale_scalars
prior_dofs = self.priors['noise_variance'].degrees_of_freedom
for k in range(self.number_of_objects):
noise_variance[k] = (sufficient_statistics['S2'] + prior_scale_scalars[k] * prior_dofs[k]) \
/ float(dataset.number_of_subjects * self.objects_noise_dimension[k] + prior_dofs[k])
self.set_noise_variance(noise_variance)
def initialize_template_attributes(self, template_specifications):
"""
Sets the Template, TemplateObjectsName, TemplateObjectsNameExtension, TemplateObjectsNorm,
TemplateObjectsNormKernelType and TemplateObjectsNormKernelWidth attributes.
"""
t_list, t_name, t_name_extension, t_noise_variance, t_multi_object_attachment = \
create_template_metadata(template_specifications)
self.template.object_list = t_list
self.objects_name = t_name
self.objects_name_extension = t_name_extension
self.multi_object_attachment = t_multi_object_attachment
self.template.update()
self.objects_noise_dimension = compute_noise_dimension(self.template, self.multi_object_attachment)
####################################################################################################################
### Private methods:
####################################################################################################################
def _compute_attachment(self, residuals):
"""
Fully torch.
"""
return torch.sum(self._compute_individual_attachments(residuals))
def _compute_individual_attachments(self, residuals):
"""
Fully torch.
"""
number_of_subjects = len(residuals)
attachments = Variable(torch.zeros((number_of_subjects,)).type(Settings().tensor_scalar_type),
requires_grad=False)
for i in range(number_of_subjects):
attachments[i] = - 0.5 * torch.sum(residuals[i] / Variable(
torch.from_numpy(self.fixed_effects['noise_variance']).type(Settings().tensor_scalar_type),
requires_grad=False))
return attachments
def _compute_random_effects_regularity(self, momenta):
"""
Fully torch.
"""
number_of_subjects = momenta.shape[0]
regularity = 0.0
# Momenta random effect.
for i in range(number_of_subjects):
regularity += self.individual_random_effects['momenta'].compute_log_likelihood_torch(momenta[i])
# Noise random effect.
for k in range(self.number_of_objects):
regularity -= 0.5 * self.objects_noise_dimension[k] * number_of_subjects \
* math.log(self.fixed_effects['noise_variance'][k])
return regularity
def _compute_class1_priors_regularity(self):
"""
Fully torch.
Prior terms of the class 1 fixed effects, i.e. those for which we know a close-form update. No derivative
wrt those fixed effects will therefore be necessary.
"""
regularity = 0.0
# Covariance momenta prior.
regularity += self.priors['covariance_momenta'].compute_log_likelihood(
self.fixed_effects['covariance_momenta_inverse'])
# Noise variance prior.
regularity += self.priors['noise_variance'].compute_log_likelihood(self.fixed_effects['noise_variance'])
return regularity
def _compute_class2_priors_regularity(self, template_data, control_points):
"""
Fully torch.
Prior terms of the class 2 fixed effects, i.e. those for which we do not know a close-form update. Derivative
wrt those fixed effects will therefore be necessary.
"""
regularity = 0.0
# Prior on template_data fixed effects (if not frozen). None implemented yet TODO.
if not self.freeze_template:
regularity += 0.0
# Prior on control_points fixed effects (if not frozen). None implemented yet TODO.
if not self.freeze_control_points:
regularity += 0.0
return regularity
def _compute_residuals(self, dataset, template_data, template_points, control_points, momenta):
"""
Core part of the ComputeLogLikelihood methods. Fully torch.
"""
# Initialize: cross-sectional dataset --------------------------------------------------------------------------
targets = dataset.deformable_objects
targets = [target[0] for target in targets]
# Deform -------------------------------------------------------------------------------------------------------
residuals = []
self.exponential.set_initial_template_points(template_points)
self.exponential.set_initial_control_points(control_points)
for i, target in enumerate(targets):
self.exponential.set_initial_momenta(momenta[i])
self.exponential.update()
deformed_points = self.exponential.get_template_points()
deformed_data = self.template.get_deformed_data(deformed_points, template_data)
residuals.append(self.multi_object_attachment.compute_distances(deformed_data, self.template, target))
return residuals
def _initialize_control_points(self):
"""
Initialize the control points fixed effect.
"""
if not Settings().dense_mode:
control_points = create_regular_grid_of_points(self.bounding_box, self.initial_cp_spacing)
else:
control_points = self.template.get_points()
self.set_control_points(control_points)
self.number_of_control_points = control_points.shape[0]
logger.info('Set of ' + str(self.number_of_control_points) + ' control points defined.')
def _initialize_momenta(self):
"""
Initialize the momenta fixed effect.
"""
self.individual_random_effects['momenta'].mean = \
np.zeros((self.number_of_control_points * Settings().dimension,))
self._initialize_covariance() # Initialize the prior and the momenta random effect.
def _initialize_covariance(self):
"""
Initialize the scale matrix of the inverse wishart prior, as well as the covariance matrix of the normal
random effect.
"""
assert self.exponential.kernel.kernel_width is not None
dimension = Settings().dimension # Shorthand.
rkhs_matrix = np.zeros((self.number_of_control_points * dimension, self.number_of_control_points * dimension))
for i in range(self.number_of_control_points):
for j in range(self.number_of_control_points):
cp_i = self.fixed_effects['control_points'][i, :]
cp_j = self.fixed_effects['control_points'][j, :]
kernel_distance = math.exp(
- np.sum((cp_j - cp_i) ** 2) / (self.exponential.kernel.kernel_width ** 2)) # Gaussian kernel.
for d in range(dimension):
rkhs_matrix[dimension * i + d, dimension * j + d] = kernel_distance
rkhs_matrix[dimension * j + d, dimension * i + d] = kernel_distance
self.priors['covariance_momenta'].scale_matrix = np.linalg.inv(rkhs_matrix)
self.set_covariance_momenta_inverse(rkhs_matrix)
def _initialize_noise_variance(self):
self.set_noise_variance(np.asarray(self.priors['noise_variance'].scale_scalars))
def _initialize_bounding_box(self):
"""
Initialize the bounding box. which tightly encloses all template objects and the atlas control points.
Relevant when the control points are given by the user.
"""
assert (self.number_of_control_points > 0)
dimension = Settings().dimension
control_points = self.get_control_points()
for k in range(self.number_of_control_points):
for d in range(dimension):
if control_points[k, d] < self.bounding_box[d, 0]:
self.bounding_box[d, 0] = control_points[k, d]
elif control_points[k, d] > self.bounding_box[d, 1]:
self.bounding_box[d, 1] = control_points[k, d]
####################################################################################################################
### Private utility methods:
####################################################################################################################
def _fixed_effects_to_torch_tensors(self, with_grad):
"""
Convert the input fixed_effects into torch tensors.
"""
# Template data.
template_data = self.fixed_effects['template_data']
template_data = {key: Variable(torch.from_numpy(value).type(Settings().tensor_scalar_type),
requires_grad=(not self.freeze_template and with_grad))
for key, value in template_data.items()}
# Template points.
template_points = self.template.get_points()
template_points = {key: Variable(torch.from_numpy(value).type(Settings().tensor_scalar_type),
requires_grad=(not self.freeze_template and with_grad))
for key, value in template_points.items()}
# Control points.
if Settings().dense_mode:
control_points = template_data
else:
control_points = self.fixed_effects['control_points']
control_points = Variable(torch.from_numpy(control_points).type(Settings().tensor_scalar_type),
requires_grad=((not self.freeze_control_points) and with_grad))
return template_data, template_points, control_points
def _individual_RER_to_torch_tensors(self, individual_RER, with_grad):
"""
Convert the input individual_RER into torch tensors.
"""
# Momenta.
momenta = individual_RER['momenta']
momenta = torch.from_numpy(momenta).requires_grad_(with_grad).type(Settings().tensor_scalar_type)
return momenta
####################################################################################################################
### Printing and writing methods:
####################################################################################################################
def print(self, individual_RER):
pass
def write(self, dataset, population_RER, individual_RER, update_fixed_effects=True, write_residuals=True):
# Write the model predictions, and compute the residuals at the same time.
residuals = self._write_model_predictions(dataset, individual_RER,
compute_residuals=(update_fixed_effects or write_residuals))
# Optionally update the fixed effects.
if update_fixed_effects:
sufficient_statistics = self.compute_sufficient_statistics(dataset, population_RER, individual_RER,
residuals=residuals)
self.update_fixed_effects(dataset, sufficient_statistics)
# Write residuals.
if write_residuals:
residuals_list = [[residuals_i_k.detach().cpu().numpy() for residuals_i_k in residuals_i]
for residuals_i in residuals]
write_2D_list(residuals_list, self.name + "__EstimatedParameters__Residuals.txt")
# Write the model parameters.
self._write_model_parameters(individual_RER)
def _write_model_predictions(self, dataset, individual_RER, compute_residuals=True):
# Initialize.
template_data, template_points, control_points = self._fixed_effects_to_torch_tensors(False)
momenta = self._individual_RER_to_torch_tensors(individual_RER, False)
# Deform, write reconstructions and compute residuals.
self.exponential.set_initial_template_points(template_points)
self.exponential.set_initial_control_points(control_points)
residuals = [] # List of torch 1D tensors. Individuals, objects.
for i, subject_id in enumerate(dataset.subject_ids):
self.exponential.set_initial_momenta(momenta[i])
self.exponential.update()
deformed_points = self.exponential.get_template_points()
deformed_data = self.template.get_deformed_data(deformed_points, template_data)
if compute_residuals:
residuals.append(self.multi_object_attachment.compute_distances(
deformed_data, self.template, dataset.deformable_objects[i][0]))
names = []
for k, (object_name, object_extension) \
in enumerate(zip(self.objects_name, self.objects_name_extension)):
name = self.name + '__Reconstruction__' + object_name + '__subject_' + subject_id + object_extension
names.append(name)
self.template.write(names, {key: value.data.cpu().numpy() for key, value in deformed_data.items()})
return residuals
def _write_model_parameters(self, individual_RER):
# Template.
template_names = []
for i in range(len(self.objects_name)):
aux = self.name + "__EstimatedParameters__Template_" + self.objects_name[i] + self.objects_name_extension[i]
template_names.append(aux)
self.template.write(template_names)
# Control points.
write_2D_array(self.get_control_points(), self.name + "__EstimatedParameters__ControlPoints.txt")
# Momenta.
write_3D_array(individual_RER['momenta'], self.name + "__EstimatedParameters__Momenta.txt")
# Momenta covariance.
write_2D_array(self.get_covariance_momenta_inverse(),
self.name + "__EstimatedParameters__CovarianceMomentaInverse.txt")
# Noise variance.
write_2D_array(np.sqrt(self.get_noise_variance()), self.name + "__EstimatedParameters__NoiseStd.txt")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, os.path, sys
import socket
if __name__ == "__main__":
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',))
print "PROJECT_ROOT=", PROJECT_ROOT
sys.path.append(PROJECT_ROOT)
# Add virtualenv dirs to python path
host = socket.gethostname()
print "HOSTNAME=%s" % host
if host=='irrigatorpro':
if "test" in PROJECT_ROOT:
VIRTUAL_ENV_ROOT = '/www/VirtualEnvs/test/'
else:
VIRTUAL_ENV_ROOT = '/www/VirtualEnvs/irrigator_pro/'
else:
VIRTUAL_ENV_ROOT = os.path.join( PROJECT_ROOT, 'VirtualEnvs', 'irrigator_pro')
print "VIRTUAL_ENV_ROOT='%s'" % VIRTUAL_ENV_ROOT
activate_this = os.path.join(VIRTUAL_ENV_ROOT, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
# Get settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "irrigator_pro.settings")
import django
django.setup()
from farms.auth_users_processing import extract_email, AuthUserException, add_users
from farms.models import Farm
print extract_email('Leblanc, alain (aalebl@gmail.com) ')
print extract_email('aalebl@gmail.com')
try:
extract_email('aalebl@gmail')
print 'Missed exception.'
except AuthUserException:
print 'Caught exception ok'
new_users = ['alainleblanc@yahoo.com', 'isidore@laferme.ca']
farm = Farm.objects.get(pk=20)
add_users(farm, new_users)
|
nilq/baby-python
|
python
|
import csv
import requests
import io
import json
import uuid
from collections import OrderedDict, defaultdict, Counter
import urllib.parse
from functools import lru_cache
# for LRU cache
CACHE_MAX_SIZE = 65536
__all__ = ['RProperty', 'RQuery', 'PeriodoReconciler',
'CsvReconciler', 'non_none_values', 'grouper', 'CACHE_MAX_SIZE']
# a wrapper for
# https://github.com/periodo/periodo-reconciler/blob/master/API.md
# http://stackoverflow.com/questions/2348317/how-to-write-a-pager-for-python-iterators/2350904#2350904
def grouper(iterator, page_size):
"""
yield pages of results from input interable
Parameters
----------
iterator : Python interator
the iterator to be converted into pages
page_size : int
page size
Returns
-------
iterator
a iterator of pages
"""
page = []
for item in iterator:
page.append(item)
if len(page) == page_size:
yield page
page = []
if len(page) > 0:
yield page
def non_none_values(dict_):
return dict([
(k, v) for (k, v) in dict_.items() if v is not None
])
class RProperty(object):
def __init__(self, p, v):
self.p = p
self.v = v
def to_dict(self):
return {'p': self.p, 'v': self.v}
def __repr__(self):
return ("""RProperty({}, {})"""
.format(json.dumps(self.p), json.dumps(self.v)))
class RQuery(object):
def __init__(self, query, label=None, limit=None, properties=None):
self.query = query
if label is None:
self.label = str(uuid.uuid4())
else:
self.label = label
self.limit = limit
self.properties = properties
def to_key_value(self):
v = {'query': self.query}
if self.limit is not None:
v['limit'] = self.limit
if (self.properties is not None and len(self.properties)):
v['properties'] = [p.to_dict() for p in self.properties]
return (self.label, v)
def __repr__(self):
if (self.properties is not None) and (len(self.properties)):
properties_repr = (""", properties=[{}]"""
.format(",\n".join([repr(p)
for p in self.properties])))
else:
properties_repr = ""
if self.limit is not None:
limit_repr = ", limit={}".format(json.dumps(self.limit))
else:
limit_repr = ""
return ("""RQuery({}, label={}{}{})"""
.format(json.dumps(self.query),
json.dumps(
self.label),
limit_repr,
properties_repr))
class PeriodoReconciler(object):
def __init__(self, host='localhost:8142', protocol='http'):
self.host = host
self.protocol = protocol
self.base_url = '{}://{}/'.format(protocol, host)
def __repr__(self):
return ("""PeriodoReconciler(host={}, protocol={})"""
.format(json.dumps(self.host),
json.dumps(self.protocol)))
def describe(self):
r = requests.get(self.base_url)
return r.json()
@lru_cache(maxsize=CACHE_MAX_SIZE)
def _call_reconciler(self, query_dict_json, method='GET'):
if method.upper() == 'GET':
r = requests.get(self.base_url, params={
'queries': query_dict_json})
elif method.upper() == 'POST':
r = requests.post(self.base_url, data={
'queries': query_dict_json})
if r.status_code == 200:
return r.json()
else:
r.raise_for_status()
def _reconcile_query_by_query(self, queries, method='GET'):
queries_dict = OrderedDict([q.to_key_value() for q in queries])
results_dict = dict()
for (k, v) in queries_dict.items():
# don't let the label for the query mess up the caching
query_dict = {'_': v}
query_dict_json = json.dumps(query_dict, sort_keys=True)
result = self._call_reconciler(query_dict_json, method)
results_dict[k] = result['_']
return results_dict
def reconcile(self, queries, method='GET', query_by_query=False):
if query_by_query:
return self._reconcile_query_by_query(queries, method)
queries_dict = OrderedDict([q.to_key_value() for q in queries])
if method.upper() == 'GET':
r = requests.get(self.base_url, params={
'queries': json.dumps(queries_dict)})
elif method.upper() == 'POST':
r = requests.post(self.base_url, data={
'queries': json.dumps(queries_dict)})
if r.status_code == 200:
return r.json()
else:
r.raise_for_status()
def suggest_properties(self):
r = requests.get(urllib.parse.urljoin(
self.base_url, '/suggest/properties'))
if r.status_code == 200:
return r.json()['result']
def suggest_entities(self, prefix):
r = requests.get(urllib.parse.urljoin(
self.base_url, '/suggest/entities'), params={
'prefix': prefix
})
if r.status_code == 200:
return r.json()['result']
def preview_period(self, period_id, flyout=False):
params = {'id': period_id}
if flyout:
params['flyout'] = True
url = urllib.parse.urljoin(self.base_url, '/preview')
r = requests.get(urllib.parse.urljoin(
self.base_url, '/preview'), params=params)
if r.status_code == 200:
return r.content
else:
r.raise_for_status()
class CsvReconciler(object):
match_column_fields = (
'match_num', 'match_name', 'match_id',
'candidates_count',
'match_fallback_id', 'match_fallback_name')
def __init__(self, csvfile, p_recon, query,
location=None, start=None, stop=None,
ignored_queries='',
transpose_query=False,
page_size=1000,
query_by_query=True,
match_column_prefix="",
match_top_candidate=True):
"""
"""
self.csvfile = csvfile
self.p_recon = p_recon
self.query = query
self.location = location
self.start = start
self.stop = stop
self.ignored_queries = ignored_queries
self.transpose_query = transpose_query
self.page_size = page_size
self.query_by_query = query_by_query
self.match_column_prefix = match_column_prefix
self.match_top_candidate = match_top_candidate
# if the query matches any entry in ignored_queries,
# throw out the match
# using csv.reader to parse ignored_queries because the parameter is
# a comma=delimited list
c_reader = csv.reader(io.StringIO(self.ignored_queries))
try:
self.ignored_queries_set = set(next(c_reader))
except StopIteration as e:
self.ignored_queries_set = set()
self.reader = csv.DictReader(csvfile)
# check that query, location, start, stop are in fieldnames
# TO DO: I may want to move away from using assert
for f in [query, location, start, stop]:
if f is not None:
assert f in self.reader.fieldnames
# which properties are included?
self.included_properties = non_none_values({
'location': location,
'start': start,
'stop': stop
})
# compute the columns names for the match results, which
# have an optional prefix (match_column_prefix)
self.match_column_names = OrderedDict(
[(name, f"{self.match_column_prefix}{name}")
for name in CsvReconciler.match_column_fields])
# initialize a summary count of the matches
self.match_summary = Counter()
def _transpose_query(self, q):
"""
transpose only if there is a single ","
"""
if not self.transpose_query:
return q
terms = [term.strip() for term in q.split(",")]
if (len(terms) == 2):
return terms[1] + " " + terms[0]
else:
return q
def results_with_rows(self):
# bin the input rows into pages and then feed the pages
# to the reconciler
# from the reconciler, yield each result
for (i, page) in enumerate(grouper(self.reader, self.page_size)):
queries = []
# TO DO: I might be unnecessarily reproducing the page in memory
page_dict = OrderedDict()
for (j, row) in enumerate(page):
label = str(j)
page_dict[label] = row
queries.append(RQuery(
self._transpose_query(row[self.query]),
label=label,
properties=[
RProperty(p, row[v]) for (p, v)
in self.included_properties.items()
]
))
responses = self.p_recon.reconcile(
queries,
method='post',
query_by_query=self.query_by_query)
for (label, row) in page_dict.items():
# print ('\r results_with_rows', i, label, end="")
yield(row, responses[label])
def _matches(self, results_with_rows=None):
"""
this method process the results to return only matches
"""
# assume that the new match_* names are not already field names
assert len(set(self.reader.fieldnames) &
set(self.match_column_names.values())) == 0
# return matches from the entire CSV if
# we're not processing the inputted subset of results
if results_with_rows is None:
results_with_rows = self.results_with_rows()
# compute a counter on the matches in the loop
# mapping query to match_id, match_name
self.matches_for_query = defaultdict(Counter)
for (row, response) in results_with_rows:
results = response['result']
matching_results = [
result for result in results if result['match']]
match_num = len(matching_results)
# I think that number of matches must be 0 or 1
# otherwise: a bug in the reconciler
assert match_num < 2
if (match_num == 1) or (self.match_top_candidate and len(results)):
match_name = results[0]['name']
match_id = results[0]['id']
# keep track of how many times a given query
# maps to a (match_id, match_name) tuple
(self.matches_for_query[row[self.query]]
.update([(match_id, match_name)]))
else:
match_name = ''
match_id = ''
row[self.match_column_names['candidates_count']] = len(results)
row[self.match_column_names["match_num"]] = match_num
row[self.match_column_names["match_name"]] = match_name
row[self.match_column_names["match_id"]] = match_id
row[self.match_column_names["match_fallback_id"]] = ''
row[self.match_column_names["match_fallback_name"]] = ''
# eliminate results in which the query is in ignored_queries
if row[self.query] in self.ignored_queries_set:
row[self.match_column_names["match_num"]] = 0
row[self.match_column_names["match_name"]] = ''
row[self.match_column_names["match_id"]] = ''
yield (row)
def matches(self, results_with_rows=None):
"""
_matches is the first pass
"""
rows = list(self._matches(results_with_rows))
self.match_summary = Counter()
# let's now calculate fallback for rows
# without matches
for row in rows:
if not row[self.match_column_names["match_id"]]:
# set as fallback as the most common match
# for the same query term
query = row[self.query]
c = self.matches_for_query[query].most_common(1)
if len(c):
((match_id, match_name), count) = c[0]
row[(self
.match_column_names["match_fallback_id"])] = match_id
row[(self
.match_column_names
["match_fallback_name"])] = match_name
self.match_summary.update([(
row[self.query],
row[self.location] if self.location is not None else '',
row[self.start] if self.start is not None else '',
row[self.stop] if self.stop is not None else '',
row[self.match_column_names["match_num"]],
row[self.match_column_names["match_name"]],
row[self.match_column_names["match_id"]],
row[self.match_column_names["candidates_count"]],
row[self.match_column_names["match_fallback_id"]],
row[self.match_column_names["match_fallback_name"]]
)])
yield row
def to_csv(self, csvfile, rows, fieldnames=None):
if fieldnames is None:
fieldnames = (
self.reader.fieldnames +
list(self.match_column_names.values())
)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in rows:
writer.writerow(row)
def match_summary_to_csv(self, output):
"""
return self.self.match_summary as CSV
"""
headers = (['query', 'location', 'start', 'stop'] +
list(CsvReconciler.match_column_fields) + ['row_count'])
writer = csv.DictWriter(output, fieldnames=headers)
writer.writeheader()
for (v, c) in self.match_summary.most_common():
row = OrderedDict(zip(headers, list(v) + [c]))
writer.writerow(row)
|
nilq/baby-python
|
python
|
import os
from .. import FileBuilder
from .file_builder_test import FileBuilderTest
class BuildDirsTest(FileBuilderTest):
"""Tests correct determination of whether build directories are present.
Tests correct determination of whether the parent directories of
output files are present.
"""
def _build_dirs_build_file1(self, builder, filename):
"""The build file function for the first build function."""
self._write(filename, 'text')
def _build_dirs_build1(self, builder):
"""The first build function."""
builder.build_file(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt'),
'build_file1', self._build_dirs_build_file1)
builder.build_file(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output.txt'),
'build_file1', self._build_dirs_build_file1)
def _build_dirs_build_file2(self, builder, filename):
"""The first build file function for the second build function."""
self.assertTrue(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
raise RuntimeError()
def _build_dirs_build_file3(self, builder, filename):
"""The second build file function for the second build function."""
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir2')))
self._write(filename, 'text')
def _build_dirs_build_file4(self, builder, filename):
"""The third build file function for the second build function."""
self._write(filename, 'text')
def _build_dirs_build_file5(self, builder, filename):
"""The fourth build file function for the second build function."""
raise RuntimeError()
def _build_dirs_build2(self, builder):
"""The second build function."""
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
with self.assertRaises(RuntimeError):
builder.build_file(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt'),
'build_file2', self._build_dirs_build_file2)
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
self.assertFalse(
builder.exists(os.path.join(self._temp_dir, 'Dir1', 'Subdir')))
self.assertFalse(
builder.exists(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt')))
with self.assertRaises(RuntimeError):
builder.build_file(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output2.txt'),
'build_file2', self._build_dirs_build_file2)
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
self.assertFalse(
builder.exists(os.path.join(self._temp_dir, 'Dir1', 'Subdir')))
self.assertFalse(
builder.exists(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt')))
builder.build_file(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output.txt'),
'build_file4', self._build_dirs_build_file4)
with self.assertRaises(RuntimeError):
builder.build_file(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'build_file5', self._build_dirs_build_file5)
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir3')))
self.assertTrue(
builder.is_dir(os.path.join(self._temp_dir, 'Dir3', 'Subdir')))
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir2')))
builder.build_file(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output.txt'),
'build_file3', self._build_dirs_build_file3)
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir2')))
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir3')))
self.assertTrue(
builder.is_dir(os.path.join(self._temp_dir, 'Dir3', 'Subdir')))
def _build_dirs_build3(self, builder):
"""The third build function."""
self.assertFalse(
builder.exists(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output2.txt')))
self.assertTrue(builder.exists(os.path.join(self._temp_dir, 'Dir2')))
self.assertTrue(
builder.exists(os.path.join(self._temp_dir, 'Dir2', 'Subdir')))
self.assertTrue(builder.exists(os.path.join(self._temp_dir, 'Dir3')))
self.assertTrue(
builder.exists(os.path.join(self._temp_dir, 'Dir3', 'Subdir')))
builder.declare_read(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'))
self._check_contents(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'text')
def test_build_dirs(self):
"""Test correct determination of whether build directories are present.
"""
FileBuilder.build(
self._cache_filename, 'build_dirs_test', self._build_dirs_build1)
FileBuilder.build(
self._cache_filename, 'build_dirs_test', self._build_dirs_build2)
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output.txt'),
'text')
self.assertFalse(os.path.exists(os.path.join(self._temp_dir, 'Dir1')))
self._write(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output2.txt'),
'text')
self._write(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'text')
FileBuilder.build(
self._cache_filename, 'build_dirs_test', self._build_dirs_build3)
self.assertFalse(os.path.exists(os.path.join(self._temp_dir, 'Dir1')))
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output2.txt'),
'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'text')
|
nilq/baby-python
|
python
|
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from hashlib import sha512
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
import cbor
from sawtooth_sdk.protobuf.transaction_pb2 import Transaction
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.batch_pb2 import Batch
from sawtooth_sdk.protobuf.batch_pb2 import BatchList
import urllib.request
from urllib.error import HTTPError
import hashlib
def _sha512(data):
return hashlib.sha512(data).hexdigest()
def _get_prefix():
return _sha512("soce".encode('utf-8'))[0:6]
def _get_address(name):
soce_prefix = _get_prefix()
name_address = _sha512(name.encode('utf-8'))[0:64]
return soce_prefix + name_address
context = create_context('secp256k1')
private_key = context.new_random_private_key()
signer = CryptoFactory(context).new_signer(private_key)
action = 'create-voting'
name_id = 'voting1'
configurations_preferences_id = ['a', 'b']
sc_method = 'borda-voting'
payload = {
'action': action,
'name_id': name_id,
'configurations_preferences_id': configurations_preferences_id,
'sc_method': sc_method
}
address = _get_address(str(name_id))
address2 = _get_address(str(configurations_preferences_id))
#payload_bytes = cbor.dumps(payload)
payload_bytes = ";".join([str(action), str(name_id),
str(configurations_preferences_id),
str(None)]).encode()
txn_header_bytes = TransactionHeader(
family_name='soce',
family_version='1.0',
inputs=[address, address2],
outputs=[address, address2],
signer_public_key = signer.get_public_key().as_hex(),
# In this example, we're signing the batch with the same private key,
# but the batch can be signed by another party, in which case, the
# public key will need to be associated with that key.
batcher_public_key = signer.get_public_key().as_hex(),
# In this example, there are no dependencies. This list should include
# an previous transaction header signatures that must be applied for
# this transaction to successfully commit.
# For example,
# dependencies=['540a6803971d1880ec73a96cb97815a95d374cbad5d865925e5aa0432fcf1931539afe10310c122c5eaae15df61236079abbf4f258889359c4d175516934484a'],
dependencies=[],
payload_sha512=sha512(payload_bytes).hexdigest()
).SerializeToString()
signature = signer.sign(txn_header_bytes)
txn = Transaction(
header=txn_header_bytes,
header_signature=signature,
payload=payload_bytes
)
txns = [txn]
batch_header_bytes = BatchHeader(
signer_public_key=signer.get_public_key().as_hex(),
transaction_ids=[txn.header_signature for txn in txns],
).SerializeToString()
signature = signer.sign(batch_header_bytes)
batch = Batch(
header=batch_header_bytes,
header_signature=signature,
transactions=txns
)
batch_list_bytes = BatchList(batches=[batch]).SerializeToString()
try:
request = urllib.request.Request(
'http://localhost:8008/batches',
batch_list_bytes,
method='POST',
headers={'Content-Type': 'application/octet-stream'})
response = urllib.request.urlopen(request)
except HTTPError as e:
response = e.file
|
nilq/baby-python
|
python
|
"""
Written by Muhammad on 09/02/2018
"""
import datetime as dt
import logging
import numpy as np
import pandas as pd
import ast
def csv_to_dict(fname, stime=None, etime=None, sep="|", orient="list"):
"""Reads data from a csv file and returns a dictionary.
Parameter
---------
fname : str
Full path of a csv file.
stime : Optional[datetime.datetime]
The start time of interest
etime : Optional[datetime.datetime]
The end time of interest.
If set to None, reads data to the end of a day
sep : str
Delimiter to use
Returns
-------
data_dict : dict
A dictionary object that holds the data
"""
# Load to a pandas dataframe
print("Loading csv file to pandas dataframe")
date_parser = lambda x: dt.datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
df = pd.read_csv(fname, sep=sep, na_values="None",
parse_dates=['time'],
date_parser=date_parser)
if stime is not None:
df = df.loc[df.time >= stime, :]
if etime is not None:
df = df.loc[df.time <= etime, :]
# Convert to a dict
print("Converting pandas dataframe to dict")
# NOTE We'll use list orientation even though
# we need records orientation because some of
# the columns from the DF are lists which
# get interpreted as strings by pandas
# and it becomes messy, this is a simple
# method Muhammad deviced and I'm building on it.
data_dict = df.to_dict(orient="list")
print df["ptab"].dtypes
# Convert a string representation of list to a list
prm_keys = ["ptab", "ltab"]
fit_keys = ["elv", "gflg", "nlag", "p_l", "p_l_e", "p_s",
"p_s_e", "phi0", "phi0_e", "pwr0", "qflg", "slist", "v",
"v_e", "w_l", "w_l_e", "w_s", "w_s_e"]
keys_list = prm_keys + fit_keys
print("Converting string representation of lists to normal lists")
for ky in keys_list:
data_dict[ky] = [ast.literal_eval(x) for x in data_dict[ky]]
#for x in data_dict[ky]:
# try:
# ast.literal_eval(x)
# except:
# import pdb
# pdb.set_trace()
# # if we need a list of dicts conver the dict of lists to the format
# if orient == "records":
# listDict = [dict(zip(data_dict,t)) for t in zip(*data_dict.values())]
# return listDict
return data_dict
# run the code
def main(orient="list"):
# Set the logging level
logging.getLogger().setLevel(logging.WARNING)
# input parameters
stime = None
etime = None
#stime = dt.datetime(2012,12,31)
#etime = dt.datetime(2012,12,31, 1, 0)
csv_sep = "|" # Delimiter to use
# Convert dmap format to csv
fdir = "./data/tmp/"
#fname = fdir + "20121231.000000.20130101.000000.fhe.fitacf.csv"
fname = fdir + "20130110.180000.20130111.180000.bks.fitacf.csv"
#data_dict = csv_to_dict(fname, stime=stime, etime=etime, sep=csv_sep)
data_dict = csv_to_dict(fname, stime=stime, etime=etime, sep=csv_sep, orient=orient)
return data_dict
if __name__ == "__main__":
data_dict = main()
|
nilq/baby-python
|
python
|
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views import generic
from . import forms, models
class JoinUs(generic.CreateView):
form_class = forms.RegistrationForm
success_url = reverse_lazy('login')
template_name = 'membership/join-us.html'
class MemberDetail(PermissionRequiredMixin, generic.DetailView):
permission_required = ['assignments.view_member']
model = models.Member
slug_field = 'permalink'
class MemberList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Member
class ParentList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Parent
class ScoutList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Scout
class ContributorList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Contributor
|
nilq/baby-python
|
python
|
# vim: ts=4:sw=4:et:cc=120
from typing import Optional, Union
from ace.analysis import RootAnalysis
from ace.system.base import AlertingBaseInterface
class RemoteAlertTrackingInterface(AlertingBaseInterface):
async def register_alert_system(self, name: str) -> bool:
return await self.get_api().register_alert_system(name)
async def unregister_alert_system(self, name: str) -> bool:
return await self.get_api().unregister_alert_system(name)
async def get_alerts(self, name: str, timeout: Optional[int] = None) -> list[str]:
return await self.get_api().get_alerts(name, timeout=timeout)
async def submit_alert(self, root: Union[RootAnalysis, str]) -> bool:
raise NotImplementedError()
async def get_alert_count(self, name: str) -> int:
raise NotImplementedError()
|
nilq/baby-python
|
python
|
from jiminy.gym.envs.box2d.lunar_lander import LunarLander
from jiminy.gym.envs.box2d.lunar_lander import LunarLanderContinuous
from jiminy.gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore
from jiminy.gym.envs.box2d.car_racing import CarRacing
|
nilq/baby-python
|
python
|
import datetime
class Commit:
def __init__(self, hash: str, message: str, date_time: datetime.datetime,
author: str, email: str, repository: 'Repository'):
self._hash = hash
self.message = message
self.datetime = date_time
self.author = author
self.email = email
self._repository = repository
@property
def hash(self):
return self._hash
@hash.setter
def hash(self, value):
raise Exception(
'It is not possible to set a new hash value, instance a new commit instead'
)
@property
def children(self):
return self._repository.get_commit_children(self.hash)
@property
def parents(self):
return self._repository.get_commit_parents(self.hash)
def __repr__(self):
return self.__str__()
def __str__(self):
return self._hash
def __hash__(self) -> int:
return self._hash.__hash__()
def __eq__(self, other: 'Commit') -> bool:
return self.hash == other.hash
|
nilq/baby-python
|
python
|
import os
import argparse
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from apex.parallel import DistributedDataParallel as DDP
from utils import AverageMeter, accuracy
from datasets import ImageList, pil_loader, cv2_loader
from datasets import get_val_transform, HybridValPipe
from networks import MobileNetV3_Large, MobileNetV3_Small
parser = argparse.ArgumentParser(
description="Basic Pytorch ImageNet Example. Testing.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# various paths
parser.add_argument('--val_root', type=str, required=True, help='root path to validating images')
parser.add_argument('--val_list', type=str, required=True, help='validating image list')
parser.add_argument('--weights', type=str, required=True, help='checkpoint for testing')
# testing hyper-parameters
parser.add_argument('--workers', type=int, default=8, help='number of workers to load dataset (global)')
parser.add_argument('--batch_size', type=int, default=512, help='batch size (global)')
parser.add_argument('--model', type=str, default='MobileNetV3_Large', help='type of model',
choices=['MobileNetV3_Large', 'MobileNetV3_Small'])
parser.add_argument('--num_classes', type=int, default=1000, help='class number of testing set')
parser.add_argument('--trans_mode', type=str, default='tv', help='mode of image transformation (tv/dali)')
parser.add_argument('--dali_cpu', action='store_true', default=False, help='runs CPU based DALI pipeline')
parser.add_argument('--ema', action='store_true', default=False, help='whether to use EMA')
# amp and DDP hyper-parameters
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--channels_last', type=str, default='False')
args, unparsed = parser.parse_known_args()
args.channels_last = eval(args.channels_last)
if hasattr(torch, 'channels_last') and hasattr(torch, 'contiguous_format'):
if args.channels_last:
memory_format = torch.channels_last
else:
memory_format = torch.contiguous_format
else:
memory_format = None
def main():
cudnn.enabled=True
cudnn.benchmark = True
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
# create model
if args.model == 'MobileNetV3_Large':
model = MobileNetV3_Large(args.num_classes, 0.0, False)
elif args.model == 'MobileNetV3_Small':
model = MobileNetV3_Small(args.num_classes, 0.0, False)
else:
raise Exception('invalid type of model')
model = model.cuda().to(memory_format=memory_format) if memory_format is not None else model.cuda()
# For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
# This must be done AFTER the call to amp.initialize.
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
else:
model = nn.DataParallel(model)
# define transform and initialize dataloader
batch_size = args.batch_size // args.world_size
workers = args.workers // args.world_size
if args.trans_mode == 'tv':
val_transform = get_val_transform()
val_dataset = ImageList(root=args.val_root,
list_path=args.val_list,
transform=val_transform)
val_sampler = None
if args.distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, num_workers=workers,
pin_memory=True, sampler=val_sampler, shuffle=False)
elif args.trans_mode == 'dali':
pipe = HybridValPipe(batch_size=batch_size,
num_threads=workers,
device_id=args.local_rank,
root=args.val_root,
list_path=args.val_list,
size=256,
crop=224,
shard_id=args.local_rank,
num_shards=args.world_size,
dali_cpu=args.dali_cpu)
pipe.build()
val_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader")/args.world_size))
else:
raise Exception('invalid image transformation mode')
# restart from weights
if args.weights and os.path.isfile(args.weights):
if args.local_rank == 0:
print('loading weights from {}'.format(args.weights))
checkpoint = torch.load(args.weights, map_location=lambda storage,loc: storage.cuda(args.gpu))
if args.ema:
model.load_state_dict(checkpoint['ema'])
else:
model.load_state_dict(checkpoint['model'])
val_acc_top1, val_acc_top5 = validate(val_loader, model)
if args.local_rank == 0:
print('Val_acc_top1: {:.2f}'.format(val_acc_top1))
print('Val_acc_top5: {:.2f}'.format(val_acc_top5))
def validate(val_loader, model):
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
for data in tqdm(val_loader):
if args.trans_mode == 'tv':
x = data[0].cuda(non_blocking=True)
target = data[1].cuda(non_blocking=True)
elif args.trans_mode == 'dali':
x = data[0]['data'].cuda(non_blocking=True)
target = data[0]['label'].squeeze().cuda(non_blocking=True).long()
with torch.no_grad():
logits = model(x)
prec1, prec5 = accuracy(logits, target, topk=(1, 5))
if args.distributed:
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
top1.update(prec1.item(), x.size(0))
top5.update(prec5.item(), x.size(0))
return top1.avg, top5.avg
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as graph #matlab versiyasi pythonun
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd #csv faylini read etmek ucun
import csv
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
#import datamodify as dat
def datatobeTaken():
data = pd.read_csv("turboazmodified.csv")
dataframe = pd.DataFrame(data, columns= ['Yurush','Qiymet','Buraxilis ili'])
yurush = data['Yurush']
qiymet = data['Qiymet']
buraxilishili = data['Buraxilish ili']
yurush = (yurush - yurush.mean()) / yurush.std()
yurush = np.c_[np.ones(yurush.shape[0]),yurush]
qiymet = (qiymet - qiymet.mean()) / qiymet.std()
buraxilishili = (buraxilishili - buraxilishili.mean()) / buraxilishili.std()
yurush.astype(float)
m = len(qiymet)
return yurush, qiymet, buraxilishili;
data = pd.read_csv("turboazmodified.csv")
def firstplot():
yurush, qiymet, buraxilishili = datatobeTaken();
m = len(yurush)
for i in range(0, m):
if '+08' in yurush[i]:
yurush[i] = float(yurush[i].replace('+08',''))
if 'e' in yurush[i]:
yurush[i] = yurush[i].replace('e','')
yurush[i] = yurush[i] * 2.7
graph.xlabel('Yurush')
graph.scatter(yurush[:,1], qiymet, edgecolors='red')
graph.ylabel('Qiymet')
graph.title('Yurush vs Qiymet')
graph.show()
def secondplot():
yurush, qiymet, buraxilishili = datatobeTaken();
graph.scatter(buraxilishili, qiymet, edgecolor = 'b')
graph.xlabel('Buraxilis')
graph.ylabel('Qiymet')
graph.title('Buxaltir')
graph.show()
def thirdplot():
yurush, qiymet, buraxilishili = datatobeTaken();
fig = graph.figure()
ax1 = fig.add_subplot(111, projection='3d')
ax1.scatter(yurush[:,1], qiymet, buraxilishili)
graph.show()
def heuristicFunct(yurush, theta):
return np.dot(yurush, theta)
def costFunction(yurush, qiymet, theta):
m = 1328
sumofvariables = 0
for i in range(1, m):
sumofvariables +=(heuristicFunct(yurush[i], theta) - qiymet[i])**2
sumofvariables = sumofvariables * (1.0/(2*m))
return sumofvariables
def updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations):
theta[0] = theta[0] - learningrate * costFunction(yurush, qiymet, theta) * 2
theta[1] = theta[1] - learningrate * costFunction(yurush, qiymet, theta) * 2
return theta
def plottingCostFunction(sumofvariables):
graph.title("Cost Function is plotted")
graph.xlabel("Number of iterations")
graph.ylabel("Cost")
graph.plot(sumofvariables)
graph.show()
def test1(yurush, qiymet, buraxilishili):
#yurush, qiymet, buraxilishili = datatobeTaken();
yurush = 240000
buraxilishili = 2000
qiymet = 11500
yurush = (yurush - yurush.mean()) / yurush.std()
qiymet = (qiymet - qiymet.mean()) / qiymet.std()
buraxilishili = (buraxilishili - buraxilishili.mean()) / buraxilishili.std()
ntheta, costh = updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations)
predprice = ntheta[2] * buraxilishili + ntheta[1] * yurush + ntheta[0]
normqiymet = predprice * qiymet.std() + qiymet.mean()
actqiymet = qiymet * qiymet.std() + qiymet.mean()
print(normqiymet)
print(actqiymet)
def test2(yurush, qiymet, buraxilishili):
yurush = 415558
buraxilishili = 1996
qiymet = 8800
yurush = (yurush - yurush.mean()) / yurush.std()
#yurush = np.c_[np.ones(yurush.shape[0]),yurush]
qiymet = (qiymet - qiymet.mean()) / qiymet.std()
#qiymet = np.c_[np.ones(qiymet.shape[0]),qiymet]
buraxilishili = (buraxilishili - buraxilishili.mean()) / buraxilishili.std()
#buraxilishili = np.c_[np.ones(buraxilishili.shape[0]),buraxilishili]
ntheta, costh = updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations)
predprice = ntheta[2] * buraxilishili + ntheta[1] * yurush + ntheta[0]
normqiymet = predprice * qiymet.std() + qiymet.mean()
actqiymet = qiymet * qiymet.std() + qiymet.mean()
print(normqiymet)
print(actqiymet)
def linearRegrTrain():
linearreg = LinearRegression()
yurush, qiymet, buraxilishili = datatobeTaken();
yurushTrain, yurushTest, buraxilishiliTrain, buraxilishiliTest = train_test_split(yurush, buraxilishili, test_size = 1/3, random_state = 0)
linearreg.fit(yurushTrain, buraxilishiliTrain)
buraxilishiliPredict = linearreg.predict(yurushTest)
graph.scatter(yurushTrain, buraxilishiliTrain, color = 'black')
graph.plot(yurushTrain, linearreg.predict(yurushTrain), color = 'red')
graph.title("Hello")
graph.xlabel("Yurush")
graph.ylabel("Buraxilish ili")
graph.show()
def linearRegrTest():
linearreg = LinearRegression()
yurush, qiymet, buraxilishili = datatobeTaken();
yurushTrain, yurushTest, buraxilishiliTrain, buraxilishiliTest = train_test_split(yurush, buraxilishili, test_size = 1/3, random_state = 0)
linearreg.fit(yurushTest, buraxilishiliTest)
buraxilishiliPredict = linearreg.predict(yurushTrain)
graph.scatter(yurushTest, buraxilishiliTest, color = 'black')
graph.plot(yurushTest, linearreg.predict(yurushTest), color = 'red')
graph.title("Hello")
graph.xlabel("Yurush")
graph.ylabel("Buraxilish ili")
graph.show()
def normequation(yurush, qiymet):
yurush, qiymet, buraxilishili = datatobeTaken();
yurushTranspose = yurush.T
normeq = inv(yurushTranspose.dot(yurush)).dot(yurushTranspose).dot(qiymet)
print("The value we get from Normal Equation is %s" % (normeq))
return normeq
def PolynomialModel(degree, yurush, qiymet):
yurush, qiymet, buraxilishili = datatobeTaken();
poly = PolynomialFeatures(degree=degree)
polyyurush = poly.fit_transform(yurush)
regs = LinearRegression()
regs.fit(polyyurush, qiymet)
actval = (yurush - polyyurush.mean()) / yurush.std()
print(actval)
#print(yurush.sh)
graph.scatter(yurush[:,0], qiymet, color = "red")
graph.plot(yurush, regs.predict(poly.fit_transform(yurush)), color = 'blue')
graph.show()
def tobePrinted():
#theta = [1,1,1]
theta = [0,0]
numberofiterations = 5 #no. of interations to learn
learningrate = 0.01 #learning rate is 0.01
m = 1328
yurush, qiymet, buraxilishili = datatobeTaken();
for i in range(numberofiterations):
costfinished = costFunction(yurush, qiymet, theta) #getting cost from cost function
theta = (updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations))
print("Cost function in iteration %d is %s" % (i, costfinished))
print(theta[0],theta[1])
graph.scatter(buraxilishili, qiymet, edgecolors='b')
#graph.plot(buraxilishili, qiymet)
#graph.show(block = True)
#graph.close()
#PolynomialModel(2, yurush, qiymet)
#normequation(yurush, qiymet)
#test1(yurush, qiymet, buraxilishili)
#plottingCostFunction()
#firstplot()
#linearRegrTrain()
#linearRegrTest()
#secondplot()
#thirdplot()
test1(yurush, qiymet, buraxilishili)
tobePrinted()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
###################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: test.py
## Authors: Chris Lovett
##
## Requires: Python 3.x
##
###################################################################################################
import picluster
import sys
import time
# This test script shows how to interact with the Azure pi data center cloud service.
# It uses the 'requests' module to do HTTP interactions with Json data.
# See http://docs.python-requests.org/en/v1.0.0/user/quickstart/
import endpoint
ip = "192.168.1.999" # make it invalid ip address on purpose so it never colides with real machine
entity = {'IpAddress': ip, 'OsName': 'Raspbian', 'OsVersion': 'Jesse', 'CurrentTaskName': "RollingBuild", 'CurrentUserName': '','Command':''}
user = "Test"
def test_assert(e, message):
status = "SUCCESS"
if not e:
status = "FAILED"
print("{}, {}".format(message, status))
# add or update
t = picluster.PiBoardTable(endpoint.url, endpoint.apikey, user)
a = picluster.PiBoardEntity(entity)
r = t.update(a)
test_assert(r is None or r.ip_address != ip, "add or update entity")
# get all
r = t.get_all()
test_assert(len(r) > 0 and ip in [x.ip_address for x in r], "get_all")
# get the entity we added
r = t.get(ip)
test_assert(r and r.ip_address == ip, "get the entity we added")
# locking
r = t.lock(ip, 'Test')
test_assert(r and r.ip_address == ip and r.current_user_name == t.username, "lock our machine")
# now try and free the device using wrong user name
saved = t.username
t.username = 'Chuck'
failed = False
try:
r = t.unlock(ip)
failed = False
except:
failed = True
t.username = saved
test_assert(failed, "try and free the device using wrong user name")
# double check this is really the case
r = t.get(ip)
test_assert(r and r.ip_address == ip, "ensure entity is still there")
# now try and free the device using correct user name
r = t.unlock(ip)
test_assert(r and r.ip_address == ip, "unlock our machine")
# check it really is not locked
r = t.get(ip)
test_assert(r and r.current_user_name != t.username, "lock is gone")
# delete
r = t.delete(ip)
test_assert(r and r.current_user_name != t.username, "delete our machine")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# This class was auto-generated.
#
from onlinepayments.sdk.data_object import DataObject
from onlinepayments.sdk.domain.decrypted_payment_data import DecryptedPaymentData
from onlinepayments.sdk.domain.mobile_payment_product320_specific_input import MobilePaymentProduct320SpecificInput
class MobilePaymentMethodSpecificInput(DataObject):
"""
| Object containing the specific input details for mobile payments
"""
__authorization_mode = None
__decrypted_payment_data = None
__encrypted_payment_data = None
__ephemeral_key = None
__payment_product320_specific_input = None
__payment_product_id = None
__public_key_hash = None
__requires_approval = None
@property
def authorization_mode(self):
"""
| Determines the type of the authorization that will be used. Allowed values:
| * FINAL_AUTHORIZATION - The payment creation results in an authorization that is ready for capture. Final authorizations can't be reversed and need to be captured for the full amount within 7 days.
| * PRE_AUTHORIZATION - The payment creation results in a pre-authorization that is ready for capture. Pre-authortizations can be reversed and can be captured within 30 days. The capture amount can be lower than the authorized amount.
| * SALE - The payment creation results in an authorization that is already captured at the moment of approval.
| Only used with some acquirers, ignored for acquirers that don't support this. In case the acquirer doesn't allow this to be specified the authorizationMode is 'unspecified', which behaves similar to a final authorization.
Type: str
"""
return self.__authorization_mode
@authorization_mode.setter
def authorization_mode(self, value):
self.__authorization_mode = value
@property
def decrypted_payment_data(self):
"""
| The payment data if you do the decryption of the encrypted payment data yourself.
Type: :class:`onlinepayments.sdk.domain.decrypted_payment_data.DecryptedPaymentData`
"""
return self.__decrypted_payment_data
@decrypted_payment_data.setter
def decrypted_payment_data(self, value):
self.__decrypted_payment_data = value
@property
def encrypted_payment_data(self):
"""
| The payment data if we will do the decryption of the encrypted payment data. Typically you'd use encryptedCustomerInput in the root of the create payment request to provide the encrypted payment data instead.
| * For Apple Pay, the encrypted payment data can be found in property data of the PKPayment.token.paymentData property.
Type: str
"""
return self.__encrypted_payment_data
@encrypted_payment_data.setter
def encrypted_payment_data(self, value):
self.__encrypted_payment_data = value
@property
def ephemeral_key(self):
"""
| Ephemeral Key
| A unique generated key used by Apple to encrypt data.
Type: str
"""
return self.__ephemeral_key
@ephemeral_key.setter
def ephemeral_key(self, value):
self.__ephemeral_key = value
@property
def payment_product320_specific_input(self):
"""
| Object containing information specific to Google Pay. Required for payments with product 320.
Type: :class:`onlinepayments.sdk.domain.mobile_payment_product320_specific_input.MobilePaymentProduct320SpecificInput`
"""
return self.__payment_product320_specific_input
@payment_product320_specific_input.setter
def payment_product320_specific_input(self, value):
self.__payment_product320_specific_input = value
@property
def payment_product_id(self):
"""
| Payment product identifier - Please see Products documentation for a full overview of possible values.
Type: int
"""
return self.__payment_product_id
@payment_product_id.setter
def payment_product_id(self, value):
self.__payment_product_id = value
@property
def public_key_hash(self):
"""
| Public Key Hash
| A unique identifier to retrieve key used by Apple to encrypt information.
Type: str
"""
return self.__public_key_hash
@public_key_hash.setter
def public_key_hash(self, value):
self.__public_key_hash = value
@property
def requires_approval(self):
"""
| * true = the payment requires approval before the funds will be captured using the Approve payment or Capture payment API
| * false = the payment does not require approval, and the funds will be captured automatically
Type: bool
"""
return self.__requires_approval
@requires_approval.setter
def requires_approval(self, value):
self.__requires_approval = value
def to_dictionary(self):
dictionary = super(MobilePaymentMethodSpecificInput, self).to_dictionary()
if self.authorization_mode is not None:
dictionary['authorizationMode'] = self.authorization_mode
if self.decrypted_payment_data is not None:
dictionary['decryptedPaymentData'] = self.decrypted_payment_data.to_dictionary()
if self.encrypted_payment_data is not None:
dictionary['encryptedPaymentData'] = self.encrypted_payment_data
if self.ephemeral_key is not None:
dictionary['ephemeralKey'] = self.ephemeral_key
if self.payment_product320_specific_input is not None:
dictionary['paymentProduct320SpecificInput'] = self.payment_product320_specific_input.to_dictionary()
if self.payment_product_id is not None:
dictionary['paymentProductId'] = self.payment_product_id
if self.public_key_hash is not None:
dictionary['publicKeyHash'] = self.public_key_hash
if self.requires_approval is not None:
dictionary['requiresApproval'] = self.requires_approval
return dictionary
def from_dictionary(self, dictionary):
super(MobilePaymentMethodSpecificInput, self).from_dictionary(dictionary)
if 'authorizationMode' in dictionary:
self.authorization_mode = dictionary['authorizationMode']
if 'decryptedPaymentData' in dictionary:
if not isinstance(dictionary['decryptedPaymentData'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['decryptedPaymentData']))
value = DecryptedPaymentData()
self.decrypted_payment_data = value.from_dictionary(dictionary['decryptedPaymentData'])
if 'encryptedPaymentData' in dictionary:
self.encrypted_payment_data = dictionary['encryptedPaymentData']
if 'ephemeralKey' in dictionary:
self.ephemeral_key = dictionary['ephemeralKey']
if 'paymentProduct320SpecificInput' in dictionary:
if not isinstance(dictionary['paymentProduct320SpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['paymentProduct320SpecificInput']))
value = MobilePaymentProduct320SpecificInput()
self.payment_product320_specific_input = value.from_dictionary(dictionary['paymentProduct320SpecificInput'])
if 'paymentProductId' in dictionary:
self.payment_product_id = dictionary['paymentProductId']
if 'publicKeyHash' in dictionary:
self.public_key_hash = dictionary['publicKeyHash']
if 'requiresApproval' in dictionary:
self.requires_approval = dictionary['requiresApproval']
return self
|
nilq/baby-python
|
python
|
bl_info = {
"name": "Run CGA Grammar",
"description": "",
"author": "JUSTOM",
"version": (0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D > Tool Shelf",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Object"
}
import bpy
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
EnumProperty,
PointerProperty,
)
from bpy.types import (Panel,
Menu,
Operator,
PropertyGroup,
)
# ------------------------------------------------------------------------
# Scene Properties
# ------------------------------------------------------------------------
class PsbProperties(PropertyGroup):
fName: StringProperty(
name = "File",
description="Choose a file:",
default="",
subtype='FILE_PATH'
)
"""
my_enum: EnumProperty(
name="Dropdown:",
description="Apply Data to attribute.",
items=[ ('OP1', "Option 1", ""),
('OP2', "Option 2", ""),
('OP3', "Option 3", ""),
]
)
"""
# ------------------------------------------------------------------------
# Operators
# ------------------------------------------------------------------------
class RunGrammar(Operator):
"""Run Grammar"""
bl_idname = "object.run_cga_grammar"
bl_label = "Run Grammar"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
bpy.ops.object.mode_set(mode='EDIT')
scene = context.scene
psbTool = scene.psb_tool
#context = bpy.context
print(psbTool.fName)
return {'FINISHED'} # Lets Blender know the operator finished successfully.
# ------------------------------------------------------------------------
# Menus
# ------------------------------------------------------------------------
"""
class OBJECT_MT_CustomMenu(bpy.types.Menu):
bl_label = "Select"
bl_idname = "OBJECT_MT_custom_menu"
def draw(self, context):
layout = self.layout
# Built-in operators
layout.operator("object.select_all", text="Select/Deselect All").action = 'TOGGLE'
layout.operator("object.select_all", text="Inverse").action = 'INVERT'
layout.operator("object.select_random", text="Random")
"""
# ------------------------------------------------------------------------
# Panel in Object Mode
# ------------------------------------------------------------------------
class PsbPanel(Panel):
bl_label = "PSB Panel"
bl_idname = "PsbPanel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Tools"
bl_context = "objectmode"
@classmethod
def poll(self,context):
return context.object is not None
def draw(self, context):
layout = self.layout
scene = context.scene
psbTool = scene.psb_tool
layout.prop(psbTool, "fName")
layout.operator("object.run_cga_grammar")
"""
class OBJECT_PT_CustomPanel(Panel):
bl_label = "My Panel"
bl_idname = "OBJECT_PT_custom_panel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Tools"
bl_context = "objectmode"
@classmethod
def poll(self,context):
return context.object is not None
def draw(self, context):
layout = self.layout
scene = context.scene
psbTool = scene.psb_tool
layout.prop(psbTool, "my_bool")
layout.prop(psbTool, "my_enum", text="")
layout.prop(psbTool, "my_int")
layout.prop(psbTool, "my_float")
layout.prop(psbTool, "my_float_vector", text="")
layout.prop(psbTool, "my_string")
layout.prop(psbTool, "my_path")
layout.operator("wm.hello_world")
layout.menu(OBJECT_MT_CustomMenu.bl_idname, text="Presets", icon="SCENE")
layout.separator()
"""
# ------------------------------------------------------------------------
# Registration
# ------------------------------------------------------------------------
classes = (
PsbProperties,
RunGrammar,
#OBJECT_MT_CustomMenu,
PsbPanel
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.Scene.psb_tool = PointerProperty(type=PsbProperties)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
del bpy.types.Scene.psb_tool
if __name__ == "__main__":
register()
|
nilq/baby-python
|
python
|
from scipy import stats
import json
import operator
import subprocess
import statistics as stat
import tweetTextCleaner
from sklearn.feature_extraction.text import *
from datetime import datetime
from sklearn import cluster
import numpy
#import word2vecReader
#from tokenizer import simpleTokenize
filterTerms = ['iphone 7', 'pikachu', 'pokemon go', 'macbook pro', 'trump', 'note 7']
def processDate(inputDate):
dateTemp = inputDate.split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
date = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
return day, hour, datetime.strptime(date, '%b %d %Y')
def label(mode):
tweetIDSet = set()
print('extracting outliers...')
brandList = []
listFile = open('brand.list', 'r')
for line in listFile:
brandList.append(line.strip())
listFile.close()
'''
exceptionFile = open('dataset/exceptions/exceptions.list', 'r')
exceptionList = set()
for line in exceptionFile:
exceptionList.add(long(line.strip()))
exceptionFile.close()
'''
totalDisplayFile = open('dataset/experiment/clean.display', 'w')
totalOutputFile = open('dataset/experiment/clean.labeled', 'w')
statFile = open('dataset/analysis/stat.total', 'w')
#totalCleanScore = []
#totalCleanData = []
mentionList = set()
hashtagList = set()
totalBrandData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
temp = json.loads(line.strip())
brand = temp['brand']
if brand not in totalBrandData:
totalBrandData[brand] = [temp]
else:
totalBrandData[brand].append(temp)
inputFile.close()
for brand in brandList:
print(brand)
outLierFile = open('dataset/exceptions/'+brand+'.outliers', 'w')
brandData = []
brandScoreList = []
for data in totalBrandData[brand]:
tweetID = data['id']
#if tweetID not in exceptionList:
if tweetID not in tweetIDSet:
tweetIDSet.add(tweetID)
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic'])-1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite/retweet
statFile.write(str(favorite)+'\t'+str(retweet)+'\t'+str(followers)+'\t'+str(ratio)+'\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
labelScore = (2.0 * retweet + favorite) * 10000 / followers
brandData.append({'brand': brand,'content': content, 'score': labelScore, 'id': tweetID, 'day': day, 'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count, 'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers})
brandScoreList.append(labelScore)
zScores = stats.zscore(brandScoreList)
if len(zScores) != len(brandData):
print('Z-score Error!')
outputData = []
for index, item in enumerate(brandData):
item['zScore'] = float(zScores[index])
outputData.append(item)
cleanData = []
cleanScore = []
sorted_output = sorted(outputData, key=lambda x: x['score'])
for item in reversed(sorted_output):
z = item['zScore']
if z > 2:
outLierFile.write(str(item['score'])+' | '+str(z)+' : '+' | '+str(item['id'])+' | '+item['content']+'\n')
else:
cleanData.append(item)
cleanScore.append(item['score'])
#totalCleanScore.append(item['score'])
#totalCleanData.append(item)
outLierFile.close()
maxScore = max(cleanScore)
minScore = min(cleanScore)
normalScores = []
for score in cleanScore:
normalScores.append((score - minScore) / (maxScore - minScore))
stdevScore = stat.stdev(normalScores)
meanScore = stat.mean(normalScores)
print('mean: ' + str(meanScore))
print('stdev: ' + str(stdevScore))
print('mdean: ' + str(stat.median(normalScores)))
if stdevScore >= meanScore:
print('CAUTION')
else:
print('PASS')
print()
if mode == 1:
# label post with 1-10 score
cleanSize = len(cleanScore)
binSize = cleanSize/10
threshold = binSize
labelScore = 10
for count, item in enumerate(cleanData):
if count <= threshold or labelScore == 1:
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(labelScore)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = labelScore
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
else:
print(threshold)
threshold += binSize
labelScore -= 1
elif mode == 2:
# binary label (0, 1)
cleanSize = len(cleanScore)
for count, item in enumerate(cleanData):
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
if count <= 0.5 * cleanSize:
labelScore = 1
else:
labelScore = 0
item['label'] = labelScore
totalOutputFile.write(json.dumps(item) + '\n')
try:
totalDisplayFile.write(
brand + ' | ' + str(labelScore) + ' | ' + day + ' | ' + hour + ' | ' + item['content'] + ' | ' + str(
item['id']) + ' | ' + hashtagOutput + ' | ' + mentionsOutput + '\n')
except:
print(content)
else:
# label with normalized scores
scoreDistFile = open('dataset/stats/scoreDist.'+brand, 'w')
for index, normalScore in enumerate(normalScores):
item = cleanData[index]
score = normalScore * 10
scoreDistFile.write(str(score)+'\n')
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(score)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = score
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
scoreDistFile.close()
hashtagFile = open('dataset/experiment/hashtag.list', 'w')
mentionFile = open('dataset/experiment/mention.list', 'w')
for ht in hashtagList:
hashtagFile.write(ht+'\n')
for ment in mentionList:
mentionFile.write(ment+'\n')
hashtagFile.close()
mentionFile.close()
statFile.close()
totalOutputFile.close()
def label_new(mode, inputFile):
totalDisplayFile = open('dataset/commTweets/clean.display', 'w')
totalOutputFile = open('dataset/commTweets/clean.json', 'w')
mentionList = set()
hashtagList = set()
totalBrandData = {}
inputFile = open(inputFile, 'r')
for line in inputFile:
temp = json.loads(line.strip())
brand = temp['brand']
if brand not in totalBrandData:
totalBrandData[brand] = [temp]
else:
totalBrandData[brand].append(temp)
inputFile.close()
for brand in totalBrandData:
print(brand)
outLierFile = open('dataset/commTweets/outliers/'+brand+'.outliers', 'w')
brandData = []
brandScoreList = []
for data in totalBrandData[brand]:
tweetID = data['id']
text = data['text']
content = tweetTextCleaner.tweetCleaner(text)
retweet = float(data['retweet_count'])
favorite = float(data['favorite_count'])
followers = float(data['user_followers_count'])
author_statuses_count = float(data['user_statuses_count'])
author_favorite_count = float(data['user_favorite_count'])
author_listed_count = float(data['user_listed_count'])
day, hour, postData_object = processDate(data['create_at'])
_, _, authorData_object = processDate(data['user_create_at'])
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
labelScore = (2.0 * retweet + favorite) * 10000 / followers
brandData.append({'brand': brand, 'content': content, 'score': labelScore, 'id': tweetID, 'day': day, 'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count, 'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers})
brandScoreList.append(labelScore)
zScores = stats.zscore(brandScoreList)
if len(zScores) != len(brandData):
print('Z-score Error!')
outputData = []
for index, item in enumerate(brandData):
item['zScore'] = float(zScores[index])
outputData.append(item)
cleanData = []
cleanScore = []
sorted_output = sorted(outputData, key=lambda x: x['score'])
for item in reversed(sorted_output):
z = item['zScore']
if z > 2:
outLierFile.write(str(item['score'])+' | '+str(z)+' : '+' | '+str(item['id'])+' | '+item['content']+'\n')
else:
cleanData.append(item)
cleanScore.append(item['score'])
#totalCleanScore.append(item['score'])
#totalCleanData.append(item)
outLierFile.close()
maxScore = max(cleanScore)
minScore = min(cleanScore)
normalScores = []
for score in cleanScore:
normalScores.append((score - minScore) / (maxScore - minScore))
stdevScore = stat.stdev(normalScores)
meanScore = stat.mean(normalScores)
#print('mean: ' + str(meanScore))
#print('stdev: ' + str(stdevScore))
#print('mdean: ' + str(stat.median(normalScores)))
if stdevScore >= meanScore:
print('CAUTION')
else:
print('PASS')
print()
if mode == 1:
# label post with 1-10 score
cleanSize = len(cleanScore)
binSize = cleanSize/10
threshold = binSize
labelScore = 10
for count, item in enumerate(cleanData):
if count <= threshold or labelScore == 1:
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
hashtagOutput = 'NONE' if hashtagOutput == '' else hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
mentionsOutput = 'NONE' if mentionsOutput == '' else mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(labelScore)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = labelScore
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
else:
#print(threshold)
threshold += binSize
labelScore -= 1
elif mode == 2:
# binary label (0, 1)
cleanSize = len(cleanScore)
for count, item in enumerate(cleanData):
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
if count <= 0.5 * cleanSize:
labelScore = 1
else:
labelScore = 0
item['label'] = labelScore
totalOutputFile.write(json.dumps(item) + '\n')
try:
totalDisplayFile.write(
brand + ' | ' + str(labelScore) + ' | ' + day + ' | ' + hour + ' | ' + item['content'] + ' | ' + str(
item['id']) + ' | ' + hashtagOutput + ' | ' + mentionsOutput + '\n')
except:
print(content)
else:
# label with normalized scores
scoreDistFile = open('dataset/stats/scoreDist.'+brand, 'w')
for index, normalScore in enumerate(normalScores):
item = cleanData[index]
score = normalScore * 10
scoreDistFile.write(str(score)+'\n')
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(score)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = score
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
scoreDistFile.close()
hashtagFile = open('dataset/commTweets/hashtag.list', 'w')
mentionFile = open('dataset/commTweets/mention.list', 'w')
for ht in hashtagList:
hashtagFile.write(ht+'\n')
for ment in mentionList:
mentionFile.write(ment+'\n')
hashtagFile.close()
mentionFile.close()
totalOutputFile.close()
def groupSampler(groupMode, groupSize, seed):
print(groupMode)
inputFile = open('dataset/experiment/labeled_data/' + groupMode + '_' + str(groupSize) + '.labeled', 'r')
groupData = {}
for num in range(int(groupSize)):
groupData[num] = {}
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['content'].encode('utf-8').replace('\n', ' ').replace('\r', ' ')
group = data['group']
groupData[group][tweetID] = text
inputFile.close()
outputFile = open('dataset/experiment/sample/' + groupMode + '_' + str(groupSize) + '.sample', 'w')
for groupIndex in range(int(groupSize)):
outputFile.write('Group: ' + str(groupIndex)+'\n')
print(len(groupData[groupIndex]))
for count, tweetID in enumerate(groupData[groupIndex]):
if count % seed == 0:
outputFile.write(groupData[groupIndex][tweetID]+'\t'+str(tweetID)+'\n')
outputFile.close()
def brandLabel(removeOutliers=True):
if removeOutliers:
totalOutputFile = open('dataset/experiment/brandGroup_0.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/brandGroup_0.content', 'w')
statFile = open('dataset/analysis/brandGroup_0.stat', 'w')
else:
totalOutputFile = open('dataset/experiment/brandGroup_0__full' + '.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/brandGroup_0__full' + '.content', 'w')
statFile = open('dataset/analysis/brandGroup_0_full' + '.stat', 'w')
totalData = {}
brandGroupData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
brand = data['brand']
if brand not in brandGroupData:
brandGroupData[brand] = []
brandGroupData[brand].append(tweetID)
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
totalData[tweetID] = temp
inputFile.close()
for brand, tweetIDs in brandGroupData.items():
print('Brand: ' + brand)
groupScoreList = []
IDList = []
for tweetID in tweetIDs:
if tweetID in totalData:
successScore = totalData[tweetID]['success_score']
groupScoreList.append(successScore)
IDList.append(tweetID)
cleanDataList = []
if removeOutliers:
zScores = stats.zscore(groupScoreList)
if len(zScores) != len(groupScoreList):
print ('Z-score Error!')
for index, item in enumerate(IDList):
if removeOutliers:
zScore = float(zScores[index])
if zScore <= 2:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
else:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
print('Group Size: ' + str(len(cleanDataList)))
sorted_cleanDataList = sorted(cleanDataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
cleanSize = len(cleanDataList)
binSize = cleanSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_cleanDataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = totalData[tweetID]
tempData['label'] = labelScore
tempData['group'] = brand
totalOutputFile.write(json.dumps(tempData) + '\n')
contentOutputFile.write(tempData['content']+'\n')
else:
#print threshold
threshold += binSize
labelScore -= 1
statFile.close()
totalOutputFile.close()
contentOutputFile.close()
def groupLabel(groupMode, groupSize, removeOutliers=True):
groupFile = open('dataset/experiment/group_indicies/'+groupMode+'.'+str(groupSize), 'r')
for line in groupFile:
groupData = json.loads(line.strip())
groupFile.close()
if removeOutliers:
totalOutputFile = open('dataset/experiment/labeled_data/'+groupMode+'_'+str(groupSize)+'.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/'+groupMode+'_'+str(groupSize)+'.content', 'w')
statFile = open('dataset/analysis/'+groupMode+'_'+str(groupSize)+'.stat', 'w')
else:
totalOutputFile = open('dataset/experiment/labeled_data/' + groupMode + '_' + str(groupSize) + '_full' + '.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/' + groupMode + '_' + str(groupSize) + '_full' + '.content', 'w')
statFile = open('dataset/analysis/' + groupMode + '_' + str(groupSize) + '_full' + '.stat', 'w')
totalData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
brand = data['brand']
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
totalData[tweetID] = temp
inputFile.close()
for groupIndex in range(int(groupSize)):
print(groupMode+': ' + str(groupIndex))
groupScoreList = []
IDList = []
for tweetID in groupData[str(groupIndex)]:
if tweetID in totalData:
successScore = totalData[tweetID]['success_score']
groupScoreList.append(successScore)
IDList.append(tweetID)
cleanDataList = []
if removeOutliers:
zScores = stats.zscore(groupScoreList)
if len(zScores) != len(groupScoreList):
print('Z-score Error!')
for index, item in enumerate(IDList):
if removeOutliers:
zScore = float(zScores[index])
if zScore <= 2:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
else:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
print('Group Size: ' + str(len(cleanDataList)))
sorted_cleanDataList = sorted(cleanDataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
cleanSize = len(cleanDataList)
binSize = cleanSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_cleanDataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = totalData[tweetID]
tempData['label'] = labelScore
tempData['group'] = groupIndex
totalOutputFile.write(json.dumps(tempData) + '\n')
contentOutputFile.write(tempData['content']+'\n')
else:
#print threshold
threshold += binSize
labelScore -= 1
statFile.close()
totalOutputFile.close()
contentOutputFile.close()
def simpleLabel(groupVersion, removeOutliers=True):
if removeOutliers:
totalOutputFile = open('dataset/experiment/labeled_data/simple_'+str(groupVersion)+'.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/simple_'+str(groupVersion)+'.content', 'w')
statFile = open('dataset/analysis/simple_'+str(groupVersion)+'.stat', 'w')
else:
totalOutputFile = open('dataset/experiment/labeled_data/simple_'+str(groupVersion)+'_full.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/simple_'+str(groupVersion)+'_full.content', 'w')
statFile = open('dataset/analysis/simple_'+str(groupVersion)+'_full.stat', 'w')
totalData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
brand = data['brand']
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
totalData[tweetID] = temp
inputFile.close()
groupScoreList = []
IDList = []
for tweetID in totalData:
successScore = totalData[tweetID]['success_score']
groupScoreList.append(successScore)
IDList.append(tweetID)
cleanDataList = []
if removeOutliers:
zScores = stats.zscore(groupScoreList)
if len(zScores) != len(groupScoreList):
print('Z-score Error!')
for index, item in enumerate(IDList):
if removeOutliers:
zScore = float(zScores[index])
if zScore <= 2:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
else:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
print('Group Size: ' + str(len(cleanDataList)))
sorted_cleanDataList = sorted(cleanDataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
cleanSize = len(cleanDataList)
binSize = cleanSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_cleanDataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = totalData[tweetID]
tempData['label'] = labelScore
tempData['group'] = 0
totalOutputFile.write(json.dumps(tempData) + '\n')
contentOutputFile.write(tempData['content']+'\n')
else:
#print threshold
threshold += binSize
labelScore -= 1
statFile.close()
totalOutputFile.close()
contentOutputFile.close()
def keywordLabel(keyword):
outputFile = open('dataset/experiment/'+keyword+'.labeled', 'w')
statFile = open('dataset/analysis/'+keyword+'.stat', 'w')
tweetData = {}
dataList = []
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
if keyword in text.lower():
brand = data['brand']
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
tweetData[tweetID] = temp
dataList.append({'id': tweetID, 'success_score': successScore})
inputFile.close()
print(len(dataList))
sorted_dataList = sorted(dataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
dataSize = len(dataList)
binSize = dataSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_dataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = tweetData[tweetID]
tempData['label'] = labelScore
tempData['keyword'] = keyword
outputFile.write(json.dumps(tempData) + '\n')
else:
threshold += binSize
labelScore -= 1
statFile.close()
outputFile.close()
def scoreFileBlender():
data = []
listFile = open('brand.list', 'r')
for line in listFile:
brand = line.strip()
inputFile = open('dataset/stats/scoreDist.' + brand, 'r')
for line in inputFile:
data.append(float(line.strip()))
inputFile.close()
listFile.close()
sorted_data = sorted(data, reverse=True)
outputFile = open('dataset/stats/scoreDist.total', 'w')
for num in sorted_data:
outputFile.write(str(num)+'\n')
outputFile.close()
def maxIndex(input, num):
line = {}
for index in range(len(input)):
line[index] = float(input[index])
sorted_line = sorted(line.iteritems(), key=operator.itemgetter(1), reverse=True)
output = []
for i in range(num):
output.append(sorted_line[i][0])
return output
def dataGrouper(groupMode, groupSize, hierarchical=False):
inputFile = open('dataset/experiment/total.json', 'r')
tweetData = []
outputData = {}
for index in range(int(groupSize)):
outputData[str(index)] = []
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
content = text.replace('\r', ' ').replace('\n', ' ')
brand = data['brand']
tweetData.append({'id': tweetID, 'content': content, 'brand': brand})
inputFile.close()
if groupMode == 'brandGroup':
print('running brand grouping...')
brandMapper = {}
groupFile = open('brandGroup.list', 'r')
for index, line in enumerate(groupFile):
brands = line.strip().split()
for brand in brands:
brandMapper[brand] = str(index)
groupFile.close()
for tweet in tweetData:
if tweet['brand'] in brandMapper:
outputData[brandMapper[tweet['brand']]].append(tweet['id'])
elif groupMode == 'topicGroup':
print('running LDA grouping...')
csvFile = open('TMT/LDAinput.csv', 'w')
for tweet in tweetData:
csvFile.write(tweetTextCleaner.tweetCleaner(tweet['content']).replace('"', '\'') + '\n')
csvFile.close()
subprocess.check_output('java -Xmx1024m -jar TMT/tmt-0.4.0.jar TMT/assign.scala', shell=True)
distFile = open('TMTSnapshots/document-topic-distributions.csv', 'r')
topicOut = {}
for line in distFile:
seg = line.strip().split(',')
if seg[1] != 'NaN':
topicOutList = maxIndex(seg[1:], int(groupSize))
topicOut[int(seg[0])] = topicOutList
distFile.close()
for index, value in topicOut.items():
outputData[str(value[0])].append(tweetData[index]['id'])
elif groupMode == 'simGroup_binary':
print('running kmeans clustering with binary representation...')
tweets = []
for tweet in tweetData:
tweets.append(tweetTextCleaner.tweetCleaner(tweet['content']))
vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 1), min_df=1, stop_words='english', binary='True')
matrix = vectorizer.fit_transform(tweets)
print(matrix.shape)
if hierarchical:
print()
#z = cluster.hierarchy.linkage(matrix, 'ward')
else:
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(matrix)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
elif groupMode == 'simGroup_emb':
print('running kmeans clustering with CMU encoding...')
'''
contentFile = open('embedding/CMU_hashtag/tweet.content', 'w')
for tweet in tweetData:
contentFile.write(tweet['content']+'\n')
contentFile.close()
subprocess.check_output('python embedding/CMU_hashtag/preprocess.py', shell=True)
subprocess.check_output('python embedding/CMU_hashtag/encode_char.py embedding/CMU_hashtag/tweet.input embedding/CMU_hashtag/best_model embedding/CMU_hashtag/', shell=True)
'''
embData = numpy.load('embedding/CMU_hashtag/embeddings.npy')
print(len(embData))
if hierarchical:
print()
else:
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(embData)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
outputFile = open('dataset/experiment/group_indicies/'+groupMode + '.' + str(groupSize), 'w')
outputFile.write(json.dumps(outputData))
outputFile.close()
'''
def content2vec(model, content):
words = simpleTokenize(content)
tempList = []
for word in words:
if word in model.vocab:
tempList.append(model[word])
if len(tempList) < 1:
return numpy.zeros(400)
vecSize = len(tempList[0])
sumList = []
for i in range(vecSize):
sumList.append(0.0)
for vec in tempList:
for i in range(vecSize):
sumList[i] += vec[i]
output = []
dataSize = len(tempList)
for value in sumList:
output.append(value/dataSize)
return numpy.array(output)
'''
'''
def dataGrouperKey(groupMode, groupSize):
keyData = {}
keyFile = open('dataset/experiment/parser/total.key', 'r')
for line in keyFile:
if line.strip().startswith(':: '):
keyData[int(line.strip().replace(':: ', ''))] = 'NONE'
else:
temp = line.strip().split(' :: ')
keyData[int(temp[1])] = temp[0]
keyFile.close()
inputFile = open('dataset/experiment/total.json', 'r')
tweetData = []
outputData = {}
for index in range(int(groupSize)):
outputData[str(index)] = []
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
key = keyData[tweetID]
content = text.replace('\r', ' ').replace('\n', ' ')
brand = data['brand']
tweetData.append({'id': tweetID, 'content': content, 'brand': brand, 'key': key})
inputFile.close()
if groupMode == 'topicGroup':
print('running LDA grouping...')
csvFile = open('TMT/LDAinput.csv', 'w')
for tweet in tweetData:
csvFile.write(tweet['key'].replace('"', '\'') + '\n')
csvFile.close()
subprocess.check_output('java -Xmx1024m -jar TMT/tmt-0.4.0.jar TMT/assign.scala', shell=True)
distFile = open('TMTSnapshots/document-topic-distributions.csv', 'r')
topicOut = {}
for line in distFile:
seg = line.strip().split(',')
if seg[1] != 'NaN':
topicOutList = maxIndex(seg[1:], int(groupSize))
topicOut[int(seg[0])] = topicOutList
distFile.close()
for index, value in topicOut.items():
outputData[str(value[0])].append(tweetData[index]['id'])
elif groupMode == 'simGroup_binary':
print('running kmeans clustering with binary representation...')
tweets = []
for tweet in tweetData:
tweets.append(tweet['key'])
vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 1), min_df=1, stop_words='english', binary='True')
matrix = vectorizer.fit_transform(tweets)
print(matrix.shape)
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(matrix)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
elif groupMode == 'simGroup_emb':
w2v = word2vecReader.Word2Vec()
embModel = w2v.loadModel()
contents = []
for tweet in tweetData:
tweetVec = content2vec(embModel, tweet['key'])
contents.append(tweetVec)
matrix = numpy.array(contents)
print(matrix.shape)
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(matrix)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
outputFile = open('dataset/experiment/group_indicies/' + groupMode + '.' + str(groupSize), 'w')
outputFile.write(json.dumps(outputData))
outputFile.close()
'''
def dataAligner(groupMode, groupSize):
tweetData = {}
inputDataFile = open('dataset/experiment/'+groupMode+'_'+str(groupSize)+'.labeled', 'r')
for line in inputDataFile:
temp = json.loads(line.strip())
tweetData[str(temp['id'])] = temp['label']
orderTweetIDList = []
cleanDataFile = open('dataset/experiment/clean.labeled', 'r')
for line in cleanDataFile:
temp = json.loads(line.strip())
orderTweetIDList.append(temp['id'])
if __name__ == "__main__":
label_new(1, 'dataset/commTweets.json')
#label2(1)
#scoreFileBlender()
#dataGrouper('topicGroup', 7.2)
#dataGrouperKey('topicGroup', 2.4)
#groupLabel('topicGroup', 2.4, True)
#simpleLabel(1.1, True)
#groupSampler('simGroup_emb', 5.4, 300)
#groupSampler('topicGroup', 2.2, 3000)
#groupSampler('topicGroup', 2.1, 1000)
#groupSampler('topicGroup', 2.2, 1000)
#brandLabel()
#keywordLabel('trump')
#keywordLabel('iphone')
|
nilq/baby-python
|
python
|
import pytest
from collections import Counter
from asttools import (
quick_parse,
)
from ..pattern_match import (
pattern,
UnhandledPatternError,
config_from_subscript,
split_case_return
)
class Hello:
def __init__(self, greeting):
self.greeting = greeting
class Unhandled:
def __repr__(self):
return 'Unhandled'
def test_single_pattern():
@pattern
def pat(val):
meta[match: val] # noqa: F821
~ 'dale' | "DALE"
~ 'list' | []
~ str | val
~ int | 'int'+str(val)
~ Hello | val.greeting
~ default | 'default_' + str(val) # noqa: F821
obj = Hello("Welcome Friend")
assert pat(obj) == "Welcome Friend"
assert pat('dale') == "DALE"
assert pat('some_string') == "some_string"
assert pat(101) == "int101"
assert pat('list') == []
assert pat(Unhandled()) == 'default_Unhandled'
def test_multi_return():
@pattern
def multi_return(x):
meta[match: x] # noqa: F821
~ float | type(x), x, x
~ int | type(x), x
assert multi_return(1) == (int, 1)
assert multi_return(1.1) == (float, 1.1, 1.1)
def test_when():
@pattern
def multi_return(x):
meta[match: x] # noqa: F821
~ float [when: x > 1] | type(x), x, x # noqa: F821, E211
~ int [when: x > 100 and x < 150] | x, 'Between 100 and 150' # noqa: F821, E211, E501
~ int [when: x > 10] | 'INT OVER 10' # noqa: F821, E211
~ int | type(x), x
assert multi_return(1) == (int, 1)
assert multi_return(11) == "INT OVER 10"
assert multi_return(122) == (122, "Between 100 and 150")
assert multi_return(1.1) == (float, 1.1, 1.1)
with pytest.raises(UnhandledPatternError):
assert multi_return(0.1) == (float, 1.1, 1.1)
def test_config_from_subscript():
node = quick_parse("bob[match: x]").value
meta = config_from_subscript(node)
assert meta['match'][0].id == 'x'
assert Counter(list(meta)) == Counter(['match'])
node = quick_parse("bob[match: x, second: 1]").value
meta = config_from_subscript(node)
assert meta['match'][0].id == 'x'
assert meta['second'][0].n == 1
assert Counter(list(meta)) == Counter(['match', 'second'])
node = quick_parse("bob[match: x, y, second: 1]").value
meta = config_from_subscript(node)
assert meta['match'][0].id == 'x'
assert meta['match'][1].id == 'y'
assert meta['second'][0].n == 1
assert Counter(list(meta)) == Counter(['match', 'second'])
def test_split_case_return():
node = quick_parse("~ x | type(x), y").value
case_nodes, return_nodes = split_case_return(node)
assert len(case_nodes) == 1
assert len(return_nodes) == 2
def test_multi_pattern():
@pattern
def multi(x, y):
meta[match: x, y] # noqa: F821
~ float, 3 | type(x), x, y
~ int, 3 | type(x), x, 'int'
~ int, int | 'INT'
assert multi(1, 2) == 'INT'
assert multi(1, 3) == (int, 1, 'int')
assert multi(1.0, 3) == (float, 1, 3)
def test_pattern_match_doc():
# should ignore doc string.
@pattern
def docstring(x, y):
"""
doc string
"""
meta[match: x, y] # noqa: F821
_missing = object()
def test_pattern_match_object():
# test again object() sentinels
@pattern
def match(x):
meta[match: x] # noqa: F821
~ _missing | "MISSING"
~ default | x # noqa: F821
assert match(_missing) == "MISSING"
assert match(100) == 100
@pattern
def multimatch(x, y):
meta[match: x, y] # noqa: F821
~ 1, _missing | x, "MISSING"
~ default | x, y # noqa: F821
assert multimatch(1, _missing) == (1, "MISSING")
assert multimatch(_missing, 100) == (_missing, 100)
|
nilq/baby-python
|
python
|
import math
from functools import reduce
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import display
from matplotlib.dates import DateFormatter
from scipy.stats import linregress
from utils import get_vlines, fmt_number, fmt_pct
class CovidDataViz(object):
"""
A class to make plots from processed COVID-19 and World Bank data.
"""
def __init__(self, path='../data/processed'):
self.path = path
self.data = dict()
self.data['Confirmed'] = pd.read_csv(f'{path}/confirmed_cases.csv')
self.data['Confirmed chg'] = pd.read_csv(f'{path}/confirmed_cases_daily_change.csv')
self.data['Confirmed t0'] = pd.read_csv(f'{path}/confirmed_cases_since_t0.csv')
self.data['Recovered'] = pd.read_csv(f'{path}/recovered_cases.csv')
self.data['Dead'] = pd.read_csv(f'{path}/dead_cases.csv')
self.data['Active'] = pd.read_csv(f'{path}/active_cases.csv')
self.data['Mortality'] = pd.read_csv(f'{path}/mortality_rate.csv')
self.data['Coordinates'] = pd.read_csv(f'{path}/coordinates.csv')
self.data['Continents'] = pd.read_csv(f'{path}/continents.csv')
self.data['Ctry to cont'] = pd.read_csv(f'{path}/country_to_continent.csv')
self.data['Country stats'] = pd.read_csv(f'{path}/country_stats.csv')
self.data['World bank'] = pd.read_csv(f'{path}/world_bank.csv')
for _, df in self.data.items():
if 'Date' in df.columns:
df['Date'] = pd.to_datetime(df['Date'])
self.all_countries = sorted(set(self.data['Coordinates']['Country']))
self.all_continents = sorted(set(self.data['Continents']['Continent']))
def list_highest_mortality(self, n=10):
"""
Generate a list of countries with the highest moratlity rate.
Notes
-----
mortality = dead / confirmed.
"""
df = self._sort_ctry_stats(stat_name='Mortality', n=n)
return df
def get_country_ts(self, country):
"""
Extract country level cases time series.
"""
dfs = [self.data['Confirmed'][['Date', country]],
self.data['Recovered'][['Date', country]],
self.data['Dead'][['Date', country]],
self.data['Active'][['Date', country]]]
df = reduce(lambda x, y: pd.merge(x, y, on='Date', how='outer'), dfs)
df.columns = ['Date', 'Confirmed', 'Recovered', 'Dead', 'Active']
return df
def get_continent_ts(self, continent):
"""
Get continent level cases time series.
"""
cont = self.data['Continents'].copy()
cont = cont[cont['Continent'] == continent]
cont = pd.merge(self.data['Coordinates'], cont, on='Country')
countries = sorted(list(cont['Country']))
cases = ['Confirmed', 'Recovered', 'Dead', 'Active']
dfs = []
for c in cases:
tmp = self.data[c][countries].sum(axis=1)
tmp.name = c
tmp = tmp.to_frame()
tmp['Date'] = self.data[c]['Date']
dfs.append(tmp)
df = reduce(lambda x, y: pd.merge(x, y, on='Date', how='outer'), dfs)
df = df[['Date'] + cases]
return df
def get_world_ts(self):
"""
Get world level cases time series.
"""
cases = ['Confirmed', 'Recovered', 'Dead', 'Active']
dfs = []
for case in cases:
tmp = self.data[case].drop('Date', axis=1).sum(axis=1)
tmp.name = case
tmp = tmp.to_frame()
tmp['Date'] = self.data[case]['Date']
dfs.append(tmp)
df = reduce(lambda x, y: pd.merge(x, y, on='Date', how='outer'), dfs)
return df
def get_highest_mortality(self, n_countries, min_cases=10 ** 4):
"""
List countries with highest moratlity rate.
"""
df = self.data['Country stats']
df = df[df['Confirmed'] > min_cases]
df = df.sort_values('Mortality', ascending=False).copy()
df = df.reset_index(drop=True)
df = df.head(n_countries)
df = df[['Country', 'Mortality']]
return df
def get_most_cases(self, case_type, n=10):
"""
Get n countries with most cases.
"""
df = self._sort_ctry_stats(stat_name=case_type, n=n)
return df
def plot_world_cases(self):
"""
Create world cases line plot.
"""
df = self.get_world_ts()
self.plot_ts(df=df, title='World', suffix='cases')
def plot_country_cases(self, country):
"""
Create individual country cases line plot.
"""
df = self.get_country_ts(country=country)
self.plot_ts(df, country, 'cases')
def plot_continent_cases(self, continent):
"""
Create continent cases line plot.
"""
df = self.get_continent_ts(continent=continent)
self.plot_ts(df, continent, 'cases')
def plot_ts(self, df, title, suffix):
"""
Draw individual time series as a line plot.
Inputs
------
df : pd.DataFrame
A dataframe with a `Date` column and cases data.
title : str
The title of the plot
Notes
-----
This will create a time series plot of cases. It
will also save the plot to ../img/{title}.png
"""
# Set proper aspect ratio and dpi
width = 1000
height = width / 1.78
dpi = 300
fontsize = 3
fontfamily = 'serif'
plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
ax = plt.subplot(111)
# Extend x axis so that labels fit inside the plot
extend_x_axis = pd.Timedelta('7 days')
# Extend plot by 5% to make space between
# plot and title
extend_y_axis = 0.04
# Disable spines
ax.spines['top'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
# Set spine width
ax.spines['left'].set_linewidth(1/5)
ax.spines['bottom'].set_linewidth(1/5)
# Force ticks to bottom left
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Get min and max values to set limits
# points fit inside the plot.
xmin = df['Date'].min()
xmax = df['Date'].max() + extend_x_axis
ymin = df.drop(['Date'], axis=1).min().min()
ymax = df.drop(['Date'], axis=1).max().max()
yticks, ylabels = get_vlines(ymin, ymax, k=5)
plt.yticks(ticks=yticks, labels=ylabels,
fontsize=fontsize, family=fontfamily)
plt.xticks(fontsize=fontsize, family=fontfamily)
# Display label of every other month
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m'))
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=2))
# Plot horizontal greyed out lines so that people can
# actually see the data without squinting
for y_val in yticks:
ax.plot(df['Date'], np.full((len(df), 1), y_val), c='black',
linestyle='dashed', linewidth=1/6, alpha=3/10)
# User colors from color brewer.
colours = ['#d7191c', '#fdae61', '#a6d96a', '#1a9641']
# Extract list of columns in alphabeticall order
cols = sorted(df.drop('Date', axis=1).columns)
# Plot the actual data
for col,c in zip(cols, colours):
# Line plot
ax.plot(df['Date'], df[col], linewidth=1/3, alpha=9/10, c=c)
# Plot marker at end of x axis
x = df['Date'].tail(1)
y = df[col].tail(1)
ax.scatter(x=x, y=y, linewidth=1/3, c=c, marker='.', alpha=9/10)
# Plot label outside plot
ax.text(x=df['Date'].tail(1) + pd.Timedelta('7 days'),
y=df[col].tail(1), s=col, fontsize=fontsize, c=c,
family=fontfamily, horizontalalignment='left',
verticalalignment='center')
# Display title left aligned to y axis
plt.title(label=title, fontsize=fontsize + 1, family=fontfamily,
weight='bold', loc='center')
# Set plot limits and extend y by 5%
plt.xlim(xmin, xmax)
# Set minimum y value to -2% of ymax so that
plt.ylim(0, (1 + extend_y_axis) * ymax)
plt.tick_params(axis='both', which='both',
bottom=False, top=False,
labelbottom='on', left=False,
right=False, labelleft='on')
plt.tight_layout()
plt.savefig(f'../img/{title.lower()}_{suffix}.png', bbox_inches='tight')
def plot_highest_country_stats(self, statistic, n=10):
"""
Bar plot of countries with the most cases of a certain type.
"""
df = self.get_most_cases(case_type=statistic)
df.loc[df['Country'] == 'United Kingdom', 'Country'] = 'UK'
# Set proper aspect ratio and dpi
width = 1000
height = width / 1.33
dpi = 300
fontsize = 3
fontfamily = 'serif'
plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
ax = plt.subplot(111)
# Spines
ax.spines['top'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(1/5)
ax.spines['bottom'].set_linewidth(1/5)
# Plot
x = df['Country']
y = df[statistic]
ax.bar(x=x, height=y, width=1/2)
# Ticks
plt.xticks(rotation=90, fontsize=fontsize, family=fontfamily)
if statistic == 'Mortality':
ymin, ymax = math.floor(y.min()), y.max()
yticks, ylabels = get_vlines(ymin, ymax, k=5, shift=ymin)
ylabels = [lab+'%' for lab in ylabels]
bar_labels = [ fmt_pct(y) for y in list(df[statistic]) ]
else:
ymin, ymax = 0, y.max()
yticks, ylabels = get_vlines(ymin, ymax, k=5, shift=0)
bar_labels = [ fmt_number(y) for y in list(df[statistic]) ]
plt.tick_params(axis='both', which='both',
bottom=False, top=False,
labelbottom='on', left=False,
right=False, labelleft='on')
plt.yticks(ticks=yticks, labels=ylabels,
fontsize=fontsize, family=fontfamily)
ax.tick_params(width=1/5, color='black')
# Limits
plt.xlim(-1/2, len(df) - 1/2)
plt.ylim(ymin, ymax + (0.02 * ymax))
# Horizontal lines
for y_val in yticks:
ax.plot(np.linspace(-1, len(x), 1000), np.full((1000, 1), y_val), c='black',
linestyle='dashed', linewidth=1/5, alpha=3/10)
# Annotations
rects = ax.patches
for rect, label in zip(rects, bar_labels):
height = rect.get_height()
ax.text(x=rect.get_x() + rect.get_width() / 2,
y=height + (0.02 * ymax), s=label, ha='center', va='bottom',
fontsize=fontsize, family=fontfamily)
# Labels
if statistic == 'Mortality':
plt.ylabel('Moratlity rate in percent', fontsize=fontsize, family=fontfamily)
else:
plt.ylabel('Number of cases', fontsize=fontsize, family=fontfamily)
# Title
plt.title(label=f'{statistic}', fontsize=fontsize + 1,
family=fontfamily, weight='bold', loc='center')
plt.tight_layout()
plt.savefig(fname=f'../img/{statistic.lower()}_cases_most.png',
bbox_inches='tight')
plt.show()
def plot_growth(self, countries, periods, steps=60, save=False):
"""
Plot growth curves, log scale.
Inputs
------
countries : list
List of countries
periods : list of ints
Doubling periods for growth curves.
steps : int
Number of data points to use.
"""
countries = sorted(countries)
# Extract mean and use as starting point for
# exponential growth curves.
a = self.data['Confirmed t0'].mean(axis=1)[0]
b = 2
# List of growth curves
growth = list()
for period in periods:
g = exp_growth(a=a,
b=b,
t=np.arange(steps),
tau=period)
g = np.log(g)
growth.append(list(g))
# Plot
# Set proper aspect ratio and dpi
width = 1000
height = width / 1.33
dpi = 300
fontsize = 3
fontfamily = 'serif'
plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
ax = plt.subplot(111)
ymax = 0
for g,p in zip(growth, periods):
# Draw growth curves
ax.plot(range(steps), g, c='grey', linestyle='dashed',
lw=1/3, alpha=1/2)
if p == 1:
s = f'Double every day'
else:
s = f'Double every {str(p)} days'
# Draw marker
x = steps - 1
y = g[steps - 1]
ax.scatter(x=x, y=y, linewidth=1/12, c='grey', alpha=1/2, marker='.')
# Draw text outside
x = steps
y = g[steps - 1]
ax.text(x=x, y=y, s=s, alpha=1, fontsize=fontsize, c='grey',
family=fontfamily, horizontalalignment='left',
verticalalignment='center', rotation_mode='anchor')
if g[-1] >= ymax:
ymax = g[-1]
# Spines
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Draw country level data
plot_df = self.data['Confirmed t0'][countries].head(steps)
for c in countries:
ax.plot(range(len(plot_df)), np.log(plot_df[c]), label=c, lw=1/3)
# Ticks
plt.xticks(fontsize=fontsize, family=fontfamily)
plt.yticks(fontsize=fontsize, family=fontfamily)
plt.tick_params(axis='both', which='both',
bottom=False, top=False,
labelbottom='on', left=False,
right=False, labelleft='on')
# Spines
for axis in ['top', 'bottom','left', 'right']:
ax.spines[axis].set_linewidth(1/5)
# Limits
plt.xlim(0, steps)
plt.ylim(np.log(a), ymax + 1/2)
# Legend
legend = ax.legend(loc='upper left',
fancybox=False, prop={'family': fontfamily,
'size': fontsize})
legend.get_frame().set_linewidth(1/5)
legend.get_frame().set_edgecolor('black')
# Labels
plt.ylabel(ylabel='Confirmed cases, log scale', fontsize=fontsize,
family=fontfamily)
plt.xlabel(xlabel='Days since 100 cases', fontsize=fontsize,
family=fontfamily)
plt.title(label='Doubling rate', fontsize=fontsize + 1, family=fontfamily,
weight='bold', loc='center')
plt.tight_layout()
if save:
plt.savefig(fname='../img/growth_plot.png', bbox_inches='tight')
plt.show()
def plot_country_cases_chg(self, country, n=7):
"""
Plot country level change in cases with n day moving average.
"""
df = self.data['Confirmed chg'][['Date', country]].copy()
df[f'{n} day average \n of new cases'] = df[country].rolling(n).mean()
df = df.drop(country, axis=1)
self.plot_ts(df=df, title=country, suffix='chg')
def plot_with_slope(self, x, y):
"""
Create scatter plot with regression line and
greyed out R squared.
"""
X = self.data['World bank'][x]
Y = self.data['World bank'][y]
X_reg = np.linspace(np.min(X), np.max(X), 1000)
# Estimate Y = aX +b
a, b, c, p, _ = linregress(X, Y)
# Get r squared
r = c * c
Y_reg = a * X_reg + b
label_reg = f'y = {round(a, 4)}x + {round(b, 4)}'
text_reg = r'$R^{2}$' + f'={round(r, 2)}'# + '\n' + r'$p$-value' + f'={round(p, 2)}'
plt.figure(figsize=(5,5))
plt.scatter(x=X, y=Y, s=4, alpha=2/3)
plt.plot(X_reg, Y_reg,
linewidth=1,
color='black',
label=label_reg)
plt.text(x=(np.min(X) + np.max(X))/2,
y=(np.min(Y) + np.max(Y))/2,
s=text_reg,
alpha=1/4,
fontsize=30,
verticalalignment='center',
horizontalalignment='center')
plt.xlabel(f'{x}')
plt.ylabel(f'{y}')
# plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
def _sort_ctry_stats(self, stat_name, min_cases=5000, n=10):
"""
Sort the dataframe of country statistics using a cutoff
of `min_cases` and return top `n` countries.
"""
df = self.data['Country stats'].copy()
df['Has min cases'] = df['Confirmed'] > min_cases
df = df[df['Has min cases'] == True]
df = df.sort_values(stat_name, ascending=False)
df = df.reset_index(drop=True)
df = df[['Country', stat_name]]
df = df.head(n)
return df
def show_corr_mat(self):
"""
Display colourfull correlation matrix of cases with socioeconomic factors.
"""
C = self.data['World bank'].corr()
C = C.style.background_gradient(cmap='coolwarm')
C = C.set_precision(2)
C = C.set_table_attributes('style="font-size: 13px"')
display(C)
def exp_growth(a, b, t, tau):
"""
Calculate exponential growth.
Parameters
----------
a : int
Initial value.
b : int
Growth factor.
t : int
Time.
tau : int
Time required for increase by factor of b.
Notes
-----
See https://en.wikipedia.org/wiki/Exponential_growth
for details.
"""
return a * np.power(b, t / tau)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
import argparse
import usb.core
import usb.util
import array
import sys
import hashlib
import csv
from progressbar.bar import ProgressBar
class PrecursorUsb:
def __init__(self, dev):
self.dev = dev
self.RDSR = 0x05
self.RDSCUR = 0x2B
self.RDID = 0x9F
self.WREN = 0x06
self.WRDI = 0x04
self.SE4B = 0x21
self.BE4B = 0xDC
self.PP4B = 0x12
self.registers = {}
self.regions = {}
self.gitrev = ''
def register(self, name):
return int(self.registers[name], 0)
def peek(self, addr, display=False):
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
if display == True:
print("0x{:08x}".format(read_data))
return read_data
def poke(self, addr, wdata, check=False, display=False):
if check == True:
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
print("before poke: 0x{:08x}".format(read_data))
data = array.array('B', wdata.to_bytes(4, 'little'))
numwritten = self.dev.ctrl_transfer(bmRequestType=(0x00 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
if check == True:
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
print("after poke: 0x{:08x}".format(read_data))
if display == True:
print("wrote 0x{:08x} to 0x{:08x}".format(wdata, addr))
def burst_read(self, addr, len):
_dummy_s = '\x00'.encode('utf-8')
maxlen = 4096
ret = bytearray()
packet_count = len // maxlen
if (len % maxlen) != 0:
packet_count += 1
for pkt_num in range(packet_count):
cur_addr = addr + pkt_num * maxlen
if pkt_num == packet_count - 1:
if len % maxlen != 0:
bufsize = len % maxlen
else:
bufsize = maxlen
else:
bufsize = maxlen
data = array.array('B', _dummy_s * bufsize)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(cur_addr & 0xffff), wIndex=((cur_addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
if numread != bufsize:
print("Burst read error: {} bytes requested, {} bytes read at 0x{:08x}".format(bufsize, numread, cur_addr))
exit(1)
ret = ret + data
return ret
def burst_write(self, addr, data):
if len(data) == 0:
return
maxlen = 4096
packet_count = len(data) // maxlen
if (len(data) % maxlen) != 0:
packet_count += 1
for pkt_num in range(packet_count):
cur_addr = addr + pkt_num * maxlen
if pkt_num == packet_count - 1:
if len(data) % maxlen != 0:
bufsize = len(data) % maxlen
else:
bufsize = maxlen
else:
bufsize = maxlen
wdata = array.array('B', data[(pkt_num * maxlen):(pkt_num * maxlen) + bufsize])
numwritten = self.dev.ctrl_transfer(bmRequestType=(0x00 | 0x43), bRequest=0,
wValue=(cur_addr & 0xffff), wIndex=((cur_addr >> 16) & 0xffff),
data_or_wLength=wdata, timeout=500)
if numwritten != bufsize:
print("Burst write error: {} bytes requested, {} bytes written at 0x{:08x}".format(bufsize, numwritten, cur_addr))
exit(1)
def ping_wdt(self):
self.poke(self.register('wdt_watchdog'), 1, display=False)
self.poke(self.register('wdt_watchdog'), 1, display=False)
def spinor_command_value(self, exec=0, lock_reads=0, cmd_code=0, dummy_cycles=0, data_words=0, has_arg=0):
return ((exec & 1) << 1 |
(lock_reads & 1) << 24 |
(cmd_code & 0xff) << 2 |
(dummy_cycles & 0x1f) << 11 |
(data_words & 0xff) << 16 |
(has_arg & 1) << 10
)
def flash_rdsr(self, lock_reads):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=lock_reads, cmd_code=self.RDSR, dummy_cycles=4, data_words=1, has_arg=1)
)
return self.peek(self.register('spinor_cmd_rbk_data'), display=False)
def flash_rdscur(self):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.RDSCUR, dummy_cycles=4, data_words=1, has_arg=1)
)
return self.peek(self.register('spinor_cmd_rbk_data'), display=False)
def flash_rdid(self, offset):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, cmd_code=self.RDID, dummy_cycles=4, data_words=offset, has_arg=1)
)
return self.peek(self.register('spinor_cmd_rbk_data'), display=False)
def flash_wren(self):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.WREN)
)
def flash_wrdi(self):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.WRDI)
)
def flash_se4b(self, sector_address):
self.poke(self.register('spinor_cmd_arg'), sector_address)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.SE4B, has_arg=1)
)
def flash_be4b(self, block_address):
self.poke(self.register('spinor_cmd_arg'), block_address)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.BE4B, has_arg=1)
)
def flash_pp4b(self, address, data_bytes):
self.poke(self.register('spinor_cmd_arg'), address)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.PP4B, has_arg=1, data_words=(data_bytes//2))
)
def load_csrs(self):
LOC_CSRCSV = 0x20277000 # this address shouldn't change because it's how we figure out our version number
csr_data = self.burst_read(LOC_CSRCSV, 0x8000)
hasher = hashlib.sha512()
hasher.update(csr_data[:0x7FC0])
digest = hasher.digest()
if digest != csr_data[0x7fc0:]:
print("Could not find a valid csr.csv descriptor on the device, aborting!")
exit(1)
csr_len = int.from_bytes(csr_data[:4], 'little')
csr_extracted = csr_data[4:4+csr_len]
decoded = csr_extracted.decode('utf-8')
# strip comments
stripped = []
for line in decoded.split('\n'):
if line.startswith('#') == False:
stripped.append(line)
# create database
csr_db = csv.reader(stripped)
for row in csr_db:
if len(row) > 1:
if 'csr_register' in row[0]:
self.registers[row[1]] = row[2]
if 'memory_region' in row[0]:
self.regions[row[1]] = [row[2], row[3]]
if 'git_rev' in row[0]:
self.gitrev = row[1]
print("Using SoC {} registers".format(self.gitrev))
# addr is relative to the base of FLASH (not absolute)
def flash_program(self, addr, data, verify=True):
flash_region = int(self.regions['spiflash'][0], 0)
flash_len = int(self.regions['spiflash'][1], 0)
if (addr + len(data) > flash_len):
print("Write data out of bounds! Aborting.")
exit(1)
# ID code check
code = self.flash_rdid(1)
print("ID code bytes 1-2: 0x{:08x}".format(code))
if code != 0x8080c2c2:
print("ID code mismatch")
exit(1)
code = self.flash_rdid(2)
print("ID code bytes 2-3: 0x{:08x}".format(code))
if code != 0x3b3b8080:
print("ID code mismatch")
exit(1)
# block erase
progress = ProgressBar(min_value=0, max_value=len(data), prefix='Erasing ').start()
erased = 0
while erased < len(data):
self.ping_wdt()
if (len(data) - erased >= 65536) and ((addr & 0xFFFF) == 0):
blocksize = 65536
else:
blocksize = 4096
while True:
self.flash_wren()
status = self.flash_rdsr(1)
if status & 0x02 != 0:
break
if blocksize == 4096:
self.flash_se4b(addr + erased)
else:
self.flash_be4b(addr + erased)
erased += blocksize
while (self.flash_rdsr(1) & 0x01) != 0:
pass
result = self.flash_rdscur()
if result & 0x60 != 0:
print("E_FAIL/P_FAIL set on erase, programming may fail, but trying anyways...")
if self.flash_rdsr(1) & 0x02 != 0:
self.flash_wrdi()
while (self.flash_rdsr(1) & 0x02) != 0:
pass
if erased < len(data):
progress.update(erased)
progress.finish()
print("Erase finished")
# program
# pad out to the nearest word length
if len(data) % 4 != 0:
data += bytearray([0xff] * (4 - (len(data) % 4)))
written = 0
progress = ProgressBar(min_value=0, max_value=len(data), prefix='Writing ').start()
while written < len(data):
self.ping_wdt()
if len(data) - written > 256:
chunklen = 256
else:
chunklen = len(data) - written
while True:
self.flash_wren()
status = self.flash_rdsr(1)
if status & 0x02 != 0:
break
self.burst_write(flash_region, data[written:(written+chunklen)])
self.flash_pp4b(addr + written, chunklen)
written += chunklen
if written < len(data):
progress.update(written)
progress.finish()
print("Write finished")
if self.flash_rdsr(1) & 0x02 != 0:
self.flash_wrdi()
while (self.flash_rdsr(1) & 0x02) != 0:
pass
# dummy reads to clear the "read lock" bit
self.flash_rdsr(0)
# verify
self.ping_wdt()
if verify:
print("Performing readback for verification...")
self.ping_wdt()
rbk_data = self.burst_read(addr + flash_region, len(data))
if rbk_data != data:
print("Errors were found in verification, programming failed")
exit(1)
else:
print("Verification passed.")
else:
print("Skipped verification at user request")
self.ping_wdt()
def auto_int(x):
return int(x, 0)
def main():
parser = argparse.ArgumentParser(description="Update/upload to a Precursor device running Xous 0.8/0.9")
parser.add_argument(
"--soc", required=False, help="'Factory Reset' the SoC gateware. Note: this will overwrite any secret keys stored in your device!", type=str, nargs='?', metavar=('SoC gateware file'), const='../precursors/soc_csr.bin'
)
parser.add_argument(
"-s", "--staging", required=False, help="Stage an update to apply", type=str, nargs='?', metavar=('SoC gateware file'), const='../precursors/soc_csr.bin'
)
parser.add_argument(
"-l", "--loader", required=False, help="Loader", type=str, nargs='?', metavar=('loader file'), const='../target/riscv32imac-unknown-xous-elf/release/loader.bin'
)
parser.add_argument(
"-k", "--kernel", required=False, help="Kernel", type=str, nargs='?', metavar=('kernel file'), const='../target/riscv32imac-unknown-xous-elf/release/xous.img'
)
parser.add_argument(
"-e", "--ec", required=False, help="EC gateware", type=str, nargs='?', metavar=('EC gateware package'), const='ec_fw.bin'
)
parser.add_argument(
"-w", "--wf200", required=False, help="WF200 firmware", type=str, nargs='?', metavar=('WF200 firmware package'), const='wf200_fw.bin'
)
parser.add_argument(
"--audiotest", required=False, help="Test audio clip (must be 8kHz WAV)", type=str, nargs='?', metavar=('Test audio clip'), const="testaudio.wav"
)
parser.add_argument(
"--peek", required=False, help="Inspect an address", type=auto_int, metavar=('ADDR')
)
parser.add_argument(
"--poke", required=False, help="Write to an address", type=auto_int, nargs=2, metavar=('ADDR', 'DATA')
)
parser.add_argument(
"--check-poke", required=False, action='store_true', help="Read data before and after the poke"
)
parser.add_argument(
"--config", required=False, help="Print the descriptor", action='store_true'
)
parser.add_argument(
"-i", "--image", required=False, help="Manually specify an image and address. Offset is relative to bottom of flash.", type=str, nargs=2, metavar=('IMAGEFILE', 'ADDR')
)
parser.add_argument(
"--verify", help="Readback verification. May fail for large files due to WDT timeout.", default=False, action='store_true'
)
parser.add_argument(
"--force", help="Ignore gitrev version on SoC and try to burn an image anyways", action="store_true"
)
parser.add_argument(
"--bounce", help="cycle the device through a reset", action="store_true"
)
args = parser.parse_args()
if not len(sys.argv) > 1:
print("No arguments specified, doing nothing. Use --help for more information.")
exit(1)
dev = usb.core.find(idProduct=0x5bf0, idVendor=0x1209)
if dev is None:
raise ValueError('Precursor device not found')
dev.set_configuration()
if args.config:
cfg = dev.get_active_configuration()
print(cfg)
pc_usb = PrecursorUsb(dev)
if args.verify:
verify = True
else:
verify = False
if args.peek:
pc_usb.peek(args.peek, display=True)
# print(burst_read(dev, args.peek, 256).hex())
exit(0)
if args.poke:
addr, data = args.poke
pc_usb.poke(addr, data, check=args.check_poke, display=True)
# import os
# d = bytearray(os.urandom(8000))
# burst_write(dev, addr, d)
# r = burst_read(dev, addr, 8000)
# print(r.hex())
# if d != r:
# print("mismatch")
# else:
# print("match")
exit(0)
pc_usb.load_csrs() # prime the CSR values
if "v0.8" in pc_usb.gitrev:
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
elif "v0.9" in pc_usb.gitrev:
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
elif args.force == True:
# try the v0.9 offsets
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
else:
print("SoC is from an unknow rev '{}', use --force to continue anyways with v0.8 firmware offsets".format(pc_usb.load_csrs()))
exit(1)
vexdbg_addr = int(pc_usb.regions['vexriscv_debug'][0], 0)
pc_usb.ping_wdt()
print("Halting CPU.")
pc_usb.poke(vexdbg_addr, 0x00020000)
if args.image:
image_file, addr_str = args.image
addr = int(addr_str, 0)
print("Burning manually specified image '{}' to address 0x{:08x} relative to bottom of FLASH".format(image_file, addr))
with open(image_file, "rb") as f:
image_data = f.read()
pc_usb.flash_program(addr, image_data, verify=verify)
if args.ec != None:
print("Staging EC firmware package '{}' in SOC memory space...".format(args.ec))
with open(args.ec, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_EC, image, verify=verify)
if args.wf200 != None:
print("Staging WF200 firmware package '{}' in SOC memory space...".format(args.wf200))
with open(args.wf200, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_WF200, image, verify=verify)
if args.staging != None:
print("Programming SoC gateware {}".format(args.soc))
with open(args.staging, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_STAGING, image, verify=verify)
if args.kernel != None:
print("Programming kernel image {}".format(args.kernel))
with open(args.kernel, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_KERNEL, image, verify=verify)
if args.loader != None:
print("Programming loader image {}".format(args.loader))
with open(args.loader, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_LOADER, image, verify=verify)
if args.soc != None:
if args.force == True:
print("Programming SoC gateware {}".format(args.soc))
with open(args.soc, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_SOC, image, verify=verify)
else:
print("This will overwrite any secret keys in your device. Continue? (y/n)")
confirm = input()
if len(confirm) > 0 and confirm.lower()[:1] == 'y':
print("Programming SoC gateware {}".format(args.soc))
with open(args.soc, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_SOC, image, verify=verify)
if args.audiotest != None:
print("Loading audio test clip {}".format(args.audiotest))
with open(args.audiotest, "rb") as f:
image = f.read()
if len(image) >= LEN_AUDIO:
print("audio file is too long, aborting audio burn!")
else:
pc_usb.flash_program(LOC_AUDIO, image, verify=verify)
print("Resuming CPU.")
pc_usb.poke(vexdbg_addr, 0x02000000)
print("Resetting SOC...")
try:
pc_usb.poke(pc_usb.register('reboot_soc_reset'), 0xac, display=False)
except usb.core.USBError:
pass # we expect an error because we reset the SOC and that includes the USB core
# print("If you need to run more commands, please unplug and re-plug your device in, as the Precursor USB core was just reset")
if __name__ == "__main__":
main()
exit(0)
|
nilq/baby-python
|
python
|
from tkinter import *
import math
import numpy as np
import os.path
########################################################
#Reading the output
if os.path.exists('../../build/output/ODE/ODE.txt'):
t, x, y = np.loadtxt('../../build/output/ODE/ODE.txt', skiprows = 0, unpack = True)
else:
print("No output file found")
exit()
########################################################
#Animation class in which I draw and set the positions of the objects
class Animation:
def __init__(self, gw):
#Window
self.window = gw
#Initial conditions
self.xoff, self.yoff = 300, 300
self.angle = 150*math.pi/180
self.sina = math.sin(self.angle)
self.cosa = math.cos(self.angle)
#Rod
self.rodLength = 150
self.rodx0, self.rody0 = self.xoff, self.yoff
self.rx1 = self.rodx0
self.ry1 = self.rody0
self.rx2 = self.xoff + self.rodLength*self.sina
self.ry2 = self.yoff + self.rodLength*self.cosa
#Pendulum
self.bobRadius = 15
self.bobCenter = self.rodLength + self.bobRadius
self.bx1 = self.xoff - self.bobRadius + self.bobCenter*self.sina
self.by1 = self.yoff - self.bobRadius + self.bobCenter*self.cosa
self.bx2 = self.xoff + self.bobRadius + self.bobCenter*self.sina
self.by2 = self.yoff + self.bobRadius + self.bobCenter*self.cosa
#Others
self.step = 0
self.xText = 500
self.yText = 20
# create / fill canvas:
self.cnv = Canvas(gw, bg='white')
self.cnv.pack(fill=BOTH, expand=True)
radius = 4
self.cnv.create_oval(300-radius, 300-radius,
300+radius, 300+radius,
fill='black')
self.bob = self.cnv.create_oval(self.bx1,
self.by1,
self.bx2,
self.by2,
fill='red',
width=2)
self.rod = self.cnv.create_line(self.rx1,
self.ry1,
self.rx2,
self.ry2,
fill='black',
width=4)
self.time = self.cnv.create_text(self.xText,
self.yText,
font=("courier", 15, "bold"),
text='Time = 0 s')
self.animate()
def animate(self):
self.angle = x[self.step]
self.sina = math.sin(self.angle)
self.cosa = math.cos(self.angle)
self.rx1 = self.rodx0
self.ry1 = self.rody0
self.rx2 = self.xoff + self.rodLength*self.sina
self.ry2 = self.yoff + self.rodLength*self.cosa
self.bx1 = self.xoff - self.bobRadius + self.bobCenter*self.sina
self.by1 = self.yoff - self.bobRadius + self.bobCenter*self.cosa
self.bx2 = self.xoff + self.bobRadius + self.bobCenter*self.sina
self.by2 = self.yoff + self.bobRadius + self.bobCenter*self.cosa
self.cnv.itemconfigure(self.time, text= 'Time = {:.1f} s'.format(t[self.step]))
self.step += 1
self.cnv.coords(self.rod,
self.rx1,
self.ry1,
self.rx2,
self.ry2)
self.cnv.coords(self.bob,
self.bx1,
self.by1,
self.bx2,
self.by2)
self.window.update()
#If I reach the last vector element, close the window
if self.step < len(x):
self.cnv.after(10, self.animate)
else:
exit()
#Tkinter project definition
root = Tk()
root.title('Pendulum')
root.geometry('600x600')
root.resizable(False, False)
#Class
a = Animation(root)
#Loop
root.mainloop()
|
nilq/baby-python
|
python
|
'''
@Author: your name
@Date: 2020-05-10 18:23:54
@LastEditors: wei
@LastEditTime: 2020-05-12 14:04:09
@Description: file content
'''
import importlib
from torch.utils.data import DataLoader
def find_dataset_using_name(dataset_name):
"""Find dataset using name
Arguments:
dataset_name {[type]} -- [description]
Returns:
[type] -- [description]
"""
dataset_file_name = 'dataset.' + dataset_name + '_dataset'
dataset_lib = importlib.import_module(dataset_file_name)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in dataset_lib.__dict__.items():
if name.lower() == target_dataset_name.lower():
dataset = cls
if dataset is None:
print('pls check your dataset in this folder')
exit(0)
return dataset
def create_dataset(cfg, mode, transform):
"""Create dataset
Arguments:
cfg {[type]} -- [description]
Returns:
[type] -- [description]
"""
dataset = find_dataset_using_name(cfg.dataset_name)
instance = dataset(cfg, mode, transform)
print("Dataset {} {} was created, there are {} images in all".format(cfg.dataset_name, mode, len(instance)))
dataloader = DataLoader(instance, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers)
return dataloader
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2008 Daniel Truemper truemped@googlemail.com
#
# setup.py 04-Jan-2011
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# under the License.
#
#
from setuptools import setup, find_packages
import re
__version__ = re.search( "__version__\s*=\s*'(.*)'", open('src/spyder/__init__.py').read(), re.M).group(1)
assert __version__
long_description = open("README.rst").read()
assert long_description
tests_require = ['coverage>=3.4', 'nose==1.1.2']
setup(
name = "spyder",
version = __version__,
description = "A python spider",
long_description = long_description,
author = "Daniel Truemper",
author_email = "truemped@googlemail.com",
url = "",
license = "Apache 2.0",
package_dir = { '' : 'src' },
packages = find_packages('src'),
include_package_data = True,
test_suite = 'nose.collector',
install_requires = [
'pyzmq>=2.0.10',
'tornado>=1.1',
'thrift>=0.5.0',
'pycurl>=7.19.0',
'pytz>=2010o',
'brownie>=0.4.1',
],
tests_require = tests_require,
extras_require = {'test': tests_require},
entry_points = {
'console_scripts' : [
'spyder = spyder:spyder_admin_main',
]
},
classifiers = [
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
]
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import math
from cStringIO import StringIO
def show_tree(tree, total_width=36, fill=' '):
"""Pretty-print a tree."""
output = StringIO()
last_row = -1
for i, n in enumerate(tree):
if i:
row = int(math.floor(math.log(i+1, 2)))
else:
row = 0
if row != last_row:
output.write('\n')
columns = 2**row
col_width = int(math.floor((total_width * 1.0) / columns))
output.write(str(n).center(col_width, fill))
last_row = row
print output.getvalue()
print '-' * total_width
print
return
|
nilq/baby-python
|
python
|
def f(x=4, a=[]):
a.append(x)
print(a)
f()
f(2)
f(7, [7, 7])
f("still")
|
nilq/baby-python
|
python
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torchmultimodal.architectures.clip import CLIPArchitecture
from torchmultimodal.modules.encoders.clip_resnet_encoder import ResNetForCLIP
from torchmultimodal.modules.encoders.clip_text_encoder import CLIPTextEncoder
from torchmultimodal.utils.common import get_current_device
from torchvision.models.vision_transformer import VisionTransformer
class TestCLIPModule(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
self.device = get_current_device()
self.context_length = 77
def test_clip_resnet_forward(self):
resnet_encoder = ResNetForCLIP(
layers=(3, 4, 6, 3),
output_dim=12,
heads=10,
width=20,
)
text_encoder = CLIPTextEncoder(
embedding_dim=12,
context_length=self.context_length,
vocab_size=100,
width=512,
heads=8,
layers=12,
)
clip_resnet = CLIPArchitecture(
vision_encoder=resnet_encoder,
text_encoder=text_encoder,
)
clip_resnet = clip_resnet.to(self.device)
self.assertTrue(isinstance(clip_resnet, torch.nn.Module))
text = torch.randint(1, 79, (self.context_length,), dtype=torch.long).unsqueeze(
0
)
image = torch.randn(3, 224, 224).unsqueeze(0)
clip_resnet_scores = clip_resnet(image=image, text=text)
self.assertEqual(clip_resnet_scores["image"].size(), torch.Size((1, 12)))
self.assertEqual(clip_resnet_scores["text"].size(), torch.Size((1, 12)))
def test_clip_vit_forward(self):
vit_encoder = VisionTransformer(
image_size=224,
patch_size=16,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
num_classes=12,
)
text_encoder = CLIPTextEncoder(
embedding_dim=12,
context_length=self.context_length,
vocab_size=100,
width=512,
heads=8,
layers=12,
)
text = torch.randint(1, 79, (self.context_length,), dtype=torch.long).unsqueeze(
0
)
image = torch.randn(3, 224, 224).unsqueeze(0)
clip_vit = CLIPArchitecture(
vision_encoder=vit_encoder, text_encoder=text_encoder
)
clip_vit = clip_vit.to(self.device)
self.assertTrue(isinstance(clip_vit, torch.nn.Module))
clip_vit_scores = clip_vit(image=image, text=text)
self.assertEqual(clip_vit_scores["image"].size(), torch.Size((1, 12)))
self.assertEqual(clip_vit_scores["text"].size(), torch.Size((1, 12)))
|
nilq/baby-python
|
python
|
from .production import *
CONFIG_FILE_IN_USE = get_file_name_only(__file__) # Custom setting
# Custom settings for dynamically-generated config files
PROJECT_NAME = PROJECT_NAME+'-staging'
UWSGI_PORT = 9002
HTTP_PORT = 81
HTTPS_PORT = 444
# Override database setting
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'staging.sqlite3'),
},
}
|
nilq/baby-python
|
python
|
from line_factory.sliding_window.frame import Frame
from line_factory.sliding_window.detection_area import DetectionArea
class SlidingWindowLineDetector:
def __init__(self, sliding_window_container):
self.sliding_window_container = sliding_window_container
def detect(self, bw_image, start_x):
frame = Frame(bw_image)
current_x = start_x
line_pieces = []
image_height = bw_image.shape[0]
windows = self.sliding_window_container.get_windows(image_height)
for window in windows:
detection_boundaries = window.detection_area(current_x)
line_points = frame.get_line_points(detection_boundaries)
detection_area = DetectionArea(current_x, line_points, window.shape)
current_x = detection_area.center_x
line_pieces.append(detection_area)
return line_pieces
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""Alta3 Research - Exploring OpenAPIs with requests"""
# documentation for this API is at
# https://anapioficeandfire.com/Documentation
import pprint
import requests
AOIF_BOOKS = "https://www.anapioficeandfire.com/api/books"
def main():
## Send HTTPS GET to the API of ICE and Fire books resource
gotresp = requests.get(AOIF_BOOKS)
## Decode the response
got_dj = gotresp.json()
## print the response
## using pretty print so we can read it
pprint.pprint(got_dj)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.