id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8026484 | <reponame>irom-lab/AMR-Policies<gh_stars>1-10
import Robot
from Networks import *
import Environment
import warnings
import numpy as np
import Train
import Task
import torch as pt
# Learning Parameters **************************************************************************************************
num_epochs = 300
max_m_dim = 10 # memory
batch_size = 100
output_dim = 5 # obs
input_dim = 5 # actions
horizon = 8
rnn_horizon = 1
lr = 0.1
reg = 0
seed = 553
# Simulation Parameters ************************************************************************************************
robot = Robot.Automaton()
env = Environment.GridWorld(5, robot, empty=True)
map = env.generate_obstacles()
goal = pt.tensor([[5., 13.]]) # Goal in grid world
task = Task.AutGoalNav(map, goal, alpha=0.)
state = task.sample_initial_dist(empty=True)
# Train ****************************************************************************************************************
warnings.filterwarnings("ignore", category=UserWarning)
pt.manual_seed(seed)
net = RNNDiscrete(batch_size, output_dim, max_m_dim, input_dim, rnn_horizon, seed=seed)
Train.train_AMR_discrete_one(env, net, num_epochs, horizon, max_m_dim, batch_size, task, lr, reg, minibatch_size=0,
opt_iters=1, seed=seed)
| StarcoderdataPython |
9741786 | <reponame>xplorfin/prototype
# Copyright (c) 2020 <NAME>
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
from moneysocket.nexus.rendezvous.outgoing import OutgoingRendezvousNexus
from moneysocket.layer.layer import Layer
class OutgoingRendezvousLayer(Layer):
def __init__(self):
super().__init__()
def announce_nexus(self, below_nexus):
rendezvous_nexus = OutgoingRendezvousNexus(below_nexus, self)
self._track_nexus(rendezvous_nexus, below_nexus)
shared_seed = below_nexus.get_shared_seed()
rid = shared_seed.derive_rendezvous_id().hex()
rendezvous_nexus.start_rendezvous(rid, self.rendezvous_finished_cb)
def rendezvous_finished_cb(self, rendezvous_nexus):
self._track_nexus_announced(rendezvous_nexus)
self.send_layer_event(rendezvous_nexus, "NEXUS_ANNOUNCED")
if self.onannounce:
self.onannounce(rendezvous_nexus)
| StarcoderdataPython |
3334516 | import decimal
satoshi_to_btc = decimal.Decimal(1e8)
btc_to_satoshi = 1 / satoshi_to_btc
def convert_to_satoshis(btc):
""" Converts an amount in BTC to satoshis.
This function takes care of rounding and quantization issues
(i.e. IEEE-754 precision/representation) and guarantees the
correct BTC value. Specifically, any floating point digits
beyond 1e-8 will be rounded to the nearest satoshi.
Args:
btc (float): Amount in BTC.
Returns:
int: Amount in satoshis.
"""
# First truncate trailing digits
q = decimal.Decimal(btc).quantize(btc_to_satoshi)
satoshis = int((q * satoshi_to_btc).to_integral_value())
c = decimal.Decimal(satoshis / satoshi_to_btc).quantize(btc_to_satoshi)
if c != q:
raise ValueError("Improper rounding or quantization.")
return satoshis
def convert_to_btc(satoshis):
""" Converts an amount in satoshis to BTC.
The return value of this function should only
be used for display purposes. All internal calculations
should be done using satoshis (integers)
Args:
satoshis (int): Amount in satoshis
Returns:
decimal: Amount in BTC
"""
if not isinstance(satoshis, int):
raise TypeError("satoshis must be an integer.")
return decimal.Decimal(satoshis) / satoshi_to_btc
class BaseWallet(object):
""" An abstract wallet class.
"""
""" The configuration options available for the wallet.
The keys of this dictionary are the available configuration
settings/options for the wallet. The value for each key
represents the possible values for each option.
e.g. {key_style: ["HD","Brain","Simple"], ....}
"""
config_options = {}
@staticmethod
def is_configured():
""" Returns the configuration/initialization status of the
wallet.
Returns:
bool:
True if the wallet has been configured and ready to
use otherwise False
"""
raise NotImplementedError('Abstract class, `is_configured` must be overridden')
@staticmethod
def configure(config_options):
""" Automatically configures the wallet with the provided configuration options
"""
raise NotImplementedError('Abstract class, `auto_configure` must be overridden')
def __init__(self):
super(BaseWallet, self).__init__()
@property
def addresses(self):
""" Gets the address list for the current wallet.
Returns:
list: The current list of addresses in this wallet.
"""
raise NotImplementedError('Abstract class, `addresses` must be overridden')
@property
def current_address(self):
""" Gets the preferred address.
Returns:
str: The current preferred payment address.
"""
raise NotImplementedError('Abstract class, `current_address` must be overridden')
def balance(self):
""" Gets the confirmed balance of the wallet in Satoshi.
Returns:
number: The current confirmed balance.
"""
return self.confirmed_balance()
def confirmed_balance(self):
""" Gets the current confirmed balance of the wallet in Satoshi.
Returns:
number: The current confirmed balance.
"""
raise NotImplementedError('Abstract class `confirmed_balance` must be overridden')
def unconfirmed_balance(self):
""" Gets the current unconfirmed balance of the wallet in Satoshi.
Returns:
number: The current unconfirmed balance.
"""
raise NotImplementedError('Abstract class, `unconfirmed_balance` must be overridden')
def broadcast_transaction(self, tx):
""" Broadcasts the transaction to the Bitcoin network.
Args:
tx (str): Hex string serialization of the transaction
to be broadcasted to the Bitcoin network..
Returns:
str: The name of the transaction that was broadcasted.
"""
raise NotImplementedError('Abstract class, `broadcast_transaction` must be overridden')
def make_signed_transaction_for(self, address, amount):
""" Makes a raw signed unbrodcasted transaction for the specified amount.
Args:
address (str): The address to send the Bitcoin to.
amount (number): The amount of Bitcoin to send.
Returns:
list(dict): A list of dicts containing transaction names
and raw transactions. e.g.: [{"txid": txid0, "txn":
txn_hex0}, ...]
"""
raise NotImplementedError('Abstract class, `make_signed_transaction_for` must be overridden')
def send_to(self, address, amount):
""" Sends Bitcoin to the provided address for the specified amount.
Args:
address (str): The address to send the Bitcoin to.
amount (number): The amount of Bitcoin to send.
Returns:
list(dict): A list of dicts containing transaction names
and raw transactions. e.g.: [{"txid": txid0, "txn":
txn_hex0}, ...]
"""
raise NotImplementedError('Abstract class, `send_to` must be overridden')
| StarcoderdataPython |
11329791 | <reponame>uperetz/AstroTools<gh_stars>0
import re
from numpy import array, finfo
class Fitter(object):
from ._plotdefs import CHANNEL,ENERGY,WAVE
def __init__(self, data = None, resp = None, noinit = False, text = None):
self.axisz = None
self.dataz = None
self.ptype = self.CHANNEL
self.models = []
self.ionlabs = []
self.current = None
self.binfactor = 1
self.result = []
self.xstart = None
self.xstop = None
self.ystart = None
self.ystop = None
self.plotmodel = array(())
self.labelions = False
self.area = array(())
self.eps = finfo(float).eps
self.stat = self.chisq
self.axisOverride=[None,None]
if not noinit: self.initplot()
if data is not None:
self.loadData(data,text)
if resp is not None:
self.loadResp(resp)
#Exceptions
from ._datadefs import dataResponseMismatch, noIgnoreBeforeLoad
from ._modeling import NotAModel
from ._error import errorNotConverging, newBestFitFound
from ._plotdefs import badPlotType, badZoomRange, labelAxis,toggleLog
from ._plotdefs import unlabelAxis, toggleIonLabels, plotEff
#Methods
from ._datadefs import loadResp, loadData, loadBack, loadAncr
from ._datadefs import checkLoaded, transmit, ignore, notice, set_channels, reset
from ._datadefs import untransmit, div, group, updateIonLabels
from ._modeling import chisq,reduced_chisq,append,delete,cstat
from ._modeling import activate,nameModel,energies,tofit,toMinimize,fit
from ._error import error
from ._plotdefs import zoomto,rebin,setplot,plot,shift,removeShift
from ._plotdefs import initplot,plotModel, plotDiv, toggle_area
#Model wrappers
def thaw(self, *params):
self.current.thaw(*params)
def freeze(self, *params):
self.current.freeze(*params)
def getThawed(self):
return self.current.getThawed()
def getParams(self):
return self.current.getParams()
def printParams(self):
self.current.printParams()
def initArgs(self):
return self.current.initArgs()
def tie(self,what,to):
self.current.tie(what,to)
def is_tied(self,index,param):
return self.current.is_tied(index,param)
def setp(self,pDict):
self.current.setp(pDict)
def setStat(self,s):
self.stat = s
def calc(self,pDict = {}):
self.setp(pDict)
self.result = self.tofit(self.energies())
self.plot()
| StarcoderdataPython |
1788707 | # DESCRIPTION: Tests the performance of the engine.
# 4920646f6e5c2774206361726520696620697420776f726b73206f6e20796f7572206d61636869
# 6e652120576520617265206e6f74207368697070696e6720796f7572206d616368696e6521
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import time, unittest
from lib import chessboard, core, engine, movegenerator, pieces, usercontrol
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
class TestMoveGenerator(unittest.TestCase):
"""Tests the performance of the move generator."""
def setUp(self):
self.numberofloops = 10
self.board = chessboard.ChessBoard()
self.board[27] = pieces.KingPiece('white')
self.board[45] = pieces.KingPiece('black')
self.generator = movegenerator.MoveGenerator(self.board)
return None
def test_basicmoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.basicmoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_pawnpushmoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.pawnpushmoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_pawncapturemoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.pawncapturemoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_castlemoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.castlemoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_enpassantmoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.enpassantmoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_onlylegalmoves(self):
moves = self.generator.basicmoves('white')
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.onlylegalmoves('white', moves)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_illegalmove(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.illegalmove((27, 36), 'white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_kingincheck(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.kingincheck('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_generatemovelist(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.generatemovelist('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_initialise_and_generate(self):
with Timer() as t:
for x in xrange(self.numberofloops):
movegenerator.MoveGenerator(self.board).generatemovelist('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
class TestChessboard(unittest.TestCase):
"""Tests the performance of the chessboard."""
def setUp(self):
self.numberofloops = 10
self.board = chessboard.ChessBoard()
self.board.setupnormalboard()
return None
def test_duplicateboard(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.board.duplicateboard()
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_move(self):
with Timer() as t:
for x in xrange(self.numberofloops/2):
self.board.move(12, 28)
self.board.move(28, 12)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
class TestNodeAndTree(unittest.TestCase):
"""Looks at the node and tree in the recursive search."""
def setUp(self):
self.numberofloops = 10
self.node = engine.Node
return None
@unittest.skip("Under redevelopment.")
def test_node_noparent(self):
# Set up the board state.
state = chessboard.ChessBoard()
state[5] = pieces.RookPiece('white')
# Then time it.
with Timer() as t:
for x in xrange(self.numberofloops):
engine.Node(None, (1, 5), state)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
@unittest.skip("Under redevelopment.")
def test_node_parent(self):
# Make the parent first.
parentstate = chessboard.ChessBoard()
parentstate[1] = pieces.RookPiece('white')
parent = engine.Node(None, (0, 1), parentstate)
# Set up the board state.
state = chessboard.ChessBoard()
state[5] = pieces.RookPiece('white')
# Then time it.
with Timer() as t:
for x in xrange(self.numberofloops):
engine.Node(parent, (1, 5), state)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
@unittest.skip("Under redevelopment.")
def test_tree_initialise(self):
with Timer() as t:
for x in xrange(self.numberofloops):
engine.TreeStructure()
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
@unittest.skip("Under redevelopment.")
def test_tree_addnode(self):
# Initilise Tree
tree = engine.TreeStructure()
# Make the parent first.
parentstate = chessboard.ChessBoard()
parentstate[1] = pieces.RookPiece('white')
parent = engine.Node(None, (0, 1), parentstate)
# Set up the board state.
state = chessboard.ChessBoard()
state[5] = pieces.RookPiece('white')
child = engine.Node(parent, (1, 5), state)
with Timer() as t:
for x in xrange(10000):
tree.addnode(child)
print '\n\t=> elapsed time for 10000 loops: %s s' % t.secs
return None
class TestEngine(unittest.TestCase):
"""Looks at the search and evaluate of the engine and how fast it is."""
def setUp(self):
self.search = engine.ChessEngine()
return None
def test_search(self):
return None
def test_evaluate(self):
return None
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
3298030 | <reponame>eggarcia28/itp-u4-c2-hangman-game
from .exceptions import *
from random import choice
# Complete with your own, just for fun :)
LIST_OF_WORDS = ['python', 'javascript', 'linux', 'computer', 'programming', 'windows' ]
def _get_random_word(list_of_words):
if list_of_words:
return choice(list_of_words)
raise InvalidListOfWordsException
def _mask_word(word):
if word:
return '*' * len(word)
raise InvalidWordException
def _uncover_word(answer_word, masked_word, character):
#Exceptions
if not answer_word or not masked_word:
raise InvalidWordException
if len(answer_word) != len(masked_word):
raise InvalidWordException
if len(character) > 1:
raise InvalidGuessedLetterException
#Uncover Word
character = character.lower()
answer_word = answer_word.lower()
if character not in answer_word:
return masked_word
#character in answer_word
masked_list = [char for char in masked_word] # a list of masked and unmasked characters
for index, char in enumerate(answer_word):
if char == character:
masked_list[index] = character
masked_word = ''.join(masked_list)
return masked_word
def guess_letter(game, letter):
#Exceptions/Check if Game has already been finished
if letter.lower() in game['previous_guesses']:
raise InvalidGuessedLetterException
if game['remaining_misses'] == 0:
raise GameFinishedException
if game['answer_word'] == game['masked_word']:
raise GameFinishedException
#Guess Letter
game['previous_guesses'].append(letter.lower())
if letter.lower() in game['answer_word'].lower():
game['masked_word'] = _uncover_word(game['answer_word'], game['masked_word'], letter)
else:
game['remaining_misses'] -= 1
#Game States/Check if Game was Won or Lost
if game['remaining_misses'] == 0:
raise GameLostException
if game['answer_word'] == game['masked_word']:
raise GameWonException
def start_new_game(list_of_words=None, number_of_guesses=5):
if list_of_words is None:
list_of_words = LIST_OF_WORDS
word_to_guess = _get_random_word(list_of_words)
masked_word = _mask_word(word_to_guess)
game = {
'answer_word': word_to_guess,
'masked_word': masked_word,
'previous_guesses': [],
'remaining_misses': number_of_guesses,
}
return game
| StarcoderdataPython |
3469668 | <filename>application/sample/views.py
# -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
from archon import *
from manager import Manager
#===============================================================================
# Create your views here.
#===============================================================================
@pageview(Manager)
def core(R, M, V):
pass
@pageview(Manager)
def deco(R, M, V):
pass
@pageview(Manager)
def action(R, M, V):
pass
table_data1 = [
["Airi","Satou","Accountant","Tokyo","28th Nov 08","$162,700"],
["Angelica","Ramos","Chief Executive Officer (CEO)","London","9th Oct 09","$1,200,000"],
["Ashton","Cox","Junior Technical Author","San Francisco","12th Jan 09","$86,000"],
["Bradley","Greer","Software Engineer","London","13th Oct 12","$132,000"],
["Brenden","Wagner","Software Engineer","San Francisco","7th Jun 11","$206,850"],
["Brielle","Williamson","Integration Specialist","New York","2nd Dec 12","$372,000"],
["Bruno","Nash","Software Engineer","London","3rd May 11","$163,500"],
["Caesar","Vance","Pre-Sales Support","New York","12th Dec 11","$106,450"],
["Cara","Stevens","Sales Assistant","New York","6th Dec 11","$145,600"],
["Cedric","Kelly","Senior Javascript Developer","Edinburgh","29th Mar 12","$433,060"]
]
table_data2 = [
[STRONG().html("airi"),"satou","Accountant","Tokyo","28th Nov 08","$162,700"],
[PARA().html(SMALL().html("angelica"), STRONG().html('!!!')),"ramos","Chief Executive Officer (CEO)","London","9th Oct 09","$1,200,000"],
[GET('/sample/samples/table').html("ashton"),"cox","Junior Technical Author","San Francisco","12th Jan 09","$86,000"],
["bradley","greer","Software Engineer","London","13th Oct 12","$132,000"],
["brenden","wagner","Software Engineer","San Francisco","7th Jun 11","$206,850"],
["brielle","williamson","Integration Specialist","New York","2nd Dec 12","$372,000"],
["bruno","nash","Software Engineer","London","3rd May 11","$163,500"],
["caesar","vance","Pre-Sales Support","New York","12th Dec 11","$106,450"],
["cara","stevens","Sales Assistant","New York","6th Dec 11","$145,600"],
["cedric","kelly","Senior Javascript Developer","Edinburgh","29th Mar 12","$433,060"]
]
@TABLE.ASYNC.pageview()
def sub_table(R, M, V):
print 'Draw :', R.Draw
print 'Leng :', R.Length
print 'Start :', R.Start
print 'Search :', R.Search
print 'OrderC :', R.OrderCol
print 'OrderD :', R.OrderDir
total = 57
count = 47
ret = TABLE.ASYNCDATA(R.Draw, total, count)
if R.Start == 0:
for td in table_data1: ret.Record(*td)
else:
for td in table_data2: ret.Record(*td)
return ret
@pageview(Manager, sub_table=sub_table)
def table(R, M, V):
basic = TABLE.BASIC('First Name', '<NAME>', 'Position', 'Office', 'Start Data', 'Salary')
for record in table_data1: basic.Record(*record)
V.Page.html(basic)
asyn = TABLE.ASYNC('sample/samples/table/sub_table', 'First Name', '<NAME>', 'Position', 'Office', 'Start Data', 'Salary')
V.Page.html(asyn)
flip = TABLE.FLIP('First Name', 'Last Name', '+Position', '+Office', '+Start Data', 'Salary')
for record in table_data1: flip.Record(*record)
V.Page.html(flip)
@pageview(Manager)
def dimple(R, M, V):
labels = ['A', 'B', 'C', 'D']
data1 = [None, 10, 30, 90]
data2 = [30, 2, 100, 20]
data3 = [70, None, 80, 50]
options1 = {
'pivot' : True,
'stack' : True,
'xkey' : 'Category',
'ykey' : 'Value',
'legend' : True,
}
options2 = {
'legend' : True,
}
chart = CHART.LINE
V.Page.html(chart(*labels).Data('Line1', *data3))
V.Page.html(chart(*labels, **options1).Data('Line1', *data3))
V.Page.html(chart(*labels, **options2).Data('Line1', *data3))
V.Page.html(chart(*labels, **CHART.THEME_HEALTH).Data('Line1', *data3))
V.Page.html(chart(*labels, **CHART.THEME_UTIL).Data('Line1', *data3))
V.Page.html(chart(*labels).Data('Line1', *data1).Data('Line2', *data2).Data('Line3', *data3))
V.Page.html(chart(*labels, **options1).Data('Line1', *data1).Data('Line2', *data2).Data('Line3', *data3))
V.Page.html(chart(*labels, **options2).Data('Line1', *data1).Data('Line2', *data2).Data('Line3', *data3))
@pageview(Manager)
def peity(R, M, V):
V.Page.html(
DIV().html(
FIGURE.LINE(*[100, 4, 20, 50, 70, 10, 30], height=100, width=200),
FIGURE.BAR(*[100, 4, 20, 50, 70, 10, 30], height=100, width=200),
FIGURE.PIE(1,5, height=100, width=100),
FIGURE.DONUT(1,5, height=100, width=100)
)
)
V.Page.html(
DIV().html(
FIGURE.LINE(*[100, 4, 20, 50, 70, 10, 30], height=100, width=200, **FIGURE.THEME_HEALTH),
FIGURE.BAR(*[100, 4, 20, 50, 70, 10, 30], height=100, width=200, **FIGURE.THEME_UTIL),
FIGURE.PIE(20, 80, height=100, width=100, color=FIGURE.COLOR_HEALTH),
FIGURE.DONUT(20, 80, height=100, width=100, color=FIGURE.COLOR_UTIL)
)
)
V.Page.html(
DIV().html(
FIGURE.PIE(0, 100, height=100, width=100, **FIGURE.THEME_HEALTH),
FIGURE.PIE(25, 75, height=100, width=100, **FIGURE.THEME_HEALTH),
FIGURE.PIE(50, 50, height=100, width=100, **FIGURE.THEME_HEALTH),
FIGURE.PIE(75, 25, height=100, width=100, **FIGURE.THEME_HEALTH),
FIGURE.PIE(100, 0, height=100, width=100, **FIGURE.THEME_HEALTH),
)
)
V.Page.html(
DIV().html(
FIGURE.DONUT(0, 100, height=100, width=100, hole=10, **FIGURE.THEME_UTIL),
FIGURE.DONUT(25, 75, height=100, width=100, hole=20, **FIGURE.THEME_UTIL),
FIGURE.DONUT(50, 50, height=100, width=100, **FIGURE.THEME_UTIL),
FIGURE.DONUT(75, 25, height=100, width=100, **FIGURE.THEME_UTIL),
FIGURE.DONUT(100, 0, height=100, width=100, **FIGURE.THEME_UTIL),
)
)
@pageview(Manager)
def arbor(R, M, V):
topo = TOPO(height=0)
topo.Node('Test1', 'Test1OK')
topo.Node('Test2', 'Test2')
topo.Edge('Test1', 'Test2')
V.Page.html(topo)
@pageview(Manager)
def justgage(R, M, V):
gauge = GAUGE('Test1', 40)
V.Page.html(gauge)
@pageview(Manager)
def html(R, M, V):
pass | StarcoderdataPython |
4853789 | #!/usr/bin/env python3.8
# Copyright 2022 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import urllib.parse
def generate_omaha_client_config(configs):
packages = []
for config in configs:
package = {}
url = config['url']
query = urllib.parse.urlparse(url).query
if 'hash' in urllib.parse.parse_qs(query):
raise ValueError(f"pinned URL not allowed: {url}")
package['url'] = url
if 'flavor' in config:
package['flavor'] = config['flavor']
channel_config = {'channels': []}
if 'default_channel' in config:
default_channel = config['default_channel']
assert any(
default_channel in realm['channels']
for realm in config['realms'])
channel_config['default_channel'] = default_channel
for realm in config['realms']:
for channel in realm['channels']:
channel_config['channels'].append(
{
'name': channel,
'repo': channel,
'appid': realm['app_id'],
})
package['channel_config'] = channel_config
packages.append(package)
return {'packages': packages}
def generate_pkg_resolver_config(configs):
packages = []
for config in configs:
package = {}
package['url'] = config['url']
if 'executable' in config:
package['executable'] = config['executable']
packages.append(package)
return {'packages': packages}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--out-omaha-client-config",
type=argparse.FileType('w'),
help="path to the generated eager package config file for omaha-client",
)
parser.add_argument(
"--out-pkg-resolver-config",
type=argparse.FileType('w'),
help="path to the generated eager package config file for pkg-resolver",
)
parser.add_argument(
"eager_package_config_files",
nargs='+',
type=argparse.FileType('r'),
help="JSON config files, one for each eager package",
)
args = parser.parse_args()
configs = [json.load(f) for f in args.eager_package_config_files]
omaha_client_config = generate_omaha_client_config(configs)
json.dump(omaha_client_config, args.out_omaha_client_config, sort_keys=True)
pkg_resolver_config = generate_pkg_resolver_config(configs)
json.dump(pkg_resolver_config, args.out_pkg_resolver_config, sort_keys=True)
if __name__ == "__main__":
main()
| StarcoderdataPython |
9650828 | <filename>helper/auth.py<gh_stars>1-10
from passlib.context import CryptContext
class AuthHandler():
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def get_password_hash(self, password):
return self.pwd_context.hash(password)
def verify_password(self, plain_password, hashed_password):
return self.pwd_context.verify(plain_password, hashed_password) | StarcoderdataPython |
5033026 | <filename>home/hairygael/GESTURES/brooke3.py
def brooke3():
i01.attach()
fullspeed()
gestureforlondon4()
sleep(2)
i01.detach()
sleep(30)
brooke4() | StarcoderdataPython |
4892261 | <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
# read the contents of the README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'Readme.rst'), encoding='utf-8') as f:
long_description = f.read()
requires = [
"aiohttp>=3.6.2",
"bibtexparser>=1.1.0",
"crossref-commons-reverse>=0.0.7.1",
"fuzzywuzzy>=0.18.0",
"isbnlib>=3.10.3",
"Jinja2>=2.11.1",
"nameparser>=1.0.6",
"python-dateutil>=2.8.1",
"ratelimit>=2.2.1",
"python-Levenshtein>=0.12.0"
]
test_requires = [
"pytest>=5.4.3",
"pytest-asyncio>=0.12.0",
"pytest-datadir-ng>=1.1.1",
"aioresponses>=0.6.4"
]
setup(
name="bibchex",
version="0.1.6",
packages=find_packages(),
setup_requires=["pytest-runner>=5.2"],
install_requires=requires,
tests_require=requires + test_requires,
python_requires='>=3.6',
author="<NAME>",
author_email="<EMAIL>",
description="Check your BibTeX files for consistency and sanity.",
long_description=long_description,
long_description_content_type='text/x-rst',
keywords="bibtex latex bibliography",
url="http://github.com/tinloaf/bibchex/", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/tinloaf/bibchex/issues",
"Documentation": "https://tinloaf.github.io/bibchex/",
},
classifiers=[
"License :: OSI Approved :: MIT License",
],
entry_points={
'console_scripts': [
"bibchex = bibchex.__main__:main"
]
},
package_data={
"": ["data/*"],
}
)
| StarcoderdataPython |
339970 | """
Copyright (c) 2019 <NAME>
For suggestions and questions:
<<EMAIL>>
This file is distributed under the terms of the same license,
as the Kivy framework.
"""
def crop_image(cutting_size, path_to_image, path_to_save_crop_image,
corner=0, blur=0, corner_mode='all'):
"""Call functions of cropping/blurring/rounding image.
cutting_size: size to which the image will be cropped;
path_to_image: path to origin image;
path_to_save_crop_image: path to new image;
corner: value of rounding corners;
blur: blur value;
corner_mode: 'all'/'top'/'bottom' - indicates which corners to round out;
"""
im = _crop_image(cutting_size, path_to_image, path_to_save_crop_image)
if corner:
im = add_corners(im, corner, corner_mode)
if blur:
im = add_blur(im, blur)
try:
im.save(path_to_save_crop_image)
except IOError:
im.save(path_to_save_crop_image, 'JPEG')
def add_blur(im, mode):
from PIL import ImageFilter
im = im.filter(ImageFilter.GaussianBlur(mode))
return im
def _crop_image(cutting_size, path_to_image, path_to_save_crop_image):
from PIL import Image, ImageOps
image = Image.open(path_to_image)
image = ImageOps.fit(image, cutting_size)
image.save(path_to_save_crop_image)
return image
def add_corners(im, corner, corner_mode):
def add_top_corners():
alpha.paste(circle.crop((0, 0, corner, corner)), (0, 0))
alpha.paste(circle.crop(
(corner, 0, corner * 2, corner)), (w - corner, 0))
print(corner)
def add_bottom_corners():
alpha.paste(circle.crop(
(0, corner, corner, corner * 2)), (0, h - corner))
alpha.paste(
circle.crop((corner, corner, corner * 2, corner * 2)), (w - corner, h - corner))
print(corner)
from PIL import Image, ImageDraw
circle = Image.new('L', (corner * 2, corner * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, corner * 2, corner * 2), fill=255)
alpha = Image.new('L', im.size, 255)
w, h = im.size
if corner_mode == 'all':
add_top_corners()
add_bottom_corners()
elif corner_mode == 'top':
add_top_corners()
if corner_mode == 'bottom':
add_bottom_corners()
im.putalpha(alpha)
return im
| StarcoderdataPython |
4875088 | <reponame>loonydev/smartt
# -*- coding: utf-8 -*-
from __future__ import print_function
import sqlite3
import pyxhook
import time
import pyperclip
import requests
from lxml import html
import time
def create_db_file(db_name):
con = sqlite3.connect(db_name,check_same_thread=False)
cur = con.cursor()
return con,cur
def get_word(word,cursor):
cursor.execute("SELECT ANS FROM dictionary WHERE KEY LIKE '"+word+"'")
result=cursor.fetchall()
return (result)
def get_word_api_reverso(word):
url = 'http://context.reverso.net/%D0%BF%D0%B5%D1%80%D0%B5%D0%B2%D0%BE%D0%B4/%D0%B0%D0%BD%D0%B3%D0%BB%D0%B8%D0%B9%D1%81%D0%BA%D0%B8%D0%B9-%D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B8%D0%B9/'+word+'%3F'
headers = {
'Host': 'context.reverso.net',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'close',
'Upgrade-Insecure-Requests': '1'
}
sta=time.time()
response = requests.get(url, headers=headers)
print(time.time()-sta)
tree = html.fromstring(response.content)
word_list = tree.xpath('//div[@class = "translation ltr dict no-pos"]')
example_list_en = tree.xpath('.//div[@class="example"]/div[@class="src ltr"]/span[@class="text"]')#.find('//span[@class="text"]')
example_list_ru = tree.xpath('.//div[@class="example"]/div[@class="trg ltr"]/span[@class="text"]')
print(word_list)
for i in range(len(example_list_en)):
#print('-----')
print(str(example_list_en[i].text)+str(example_list_en[i].find('em').text))
print(str(example_list_ru[i].text)+str(example_list_ru[i].find('a').find('em').text))
#print(ell.find('em').text)
# out_test=open('out_test.txt','w')
# out_test.write(response.content)
# out_test.close()
#return (result)
# def get_all_ans(string):
# all_answer=[]
# index=0
# len_ex=1
# while index<len(string):
# index=string.find('>',index)
# if index ==-1:
# break
# index_next_ex=string.find('\n',index+len_ex)
# if(index_next_ex==-1):
# all_example.append(string[index+len_ex:])
# else:
# all_example.append(string[index+len_ex:index_next_ex])
# index=index+len_ex
def get_all_by_word(string,req,next):
all_answer=[]
index=0
len_ex=len(req)
while index<len(string):
index=string.find(req,index)
if index ==-1:
break
index_next_ex=string.find(next,index+len_ex)
if(index_next_ex==-1):
all_answer.append(string[index+len_ex:])
else:
all_answer.append(string[index+len_ex:index_next_ex])
index=index+len_ex
return all_answer
# def get_all_ex(string):
# all_example=[]
# index=0
# len_ex=5
# len_an=2
# while index<len(string):
# index=string.find('_Ex:\n',index)
# if index ==-1:
# break
# index_next_ex=string.find('\n',index+len_ex)
# #index_next_an=string.find('>',index+len_ex)
# if(index_next_ex==-1):
# all_example.append(string[index+len_ex:])
# else:
# all_example.append(string[index+len_ex:index_next_ex])
# index=index+len_ex
# for i in all_example:
# print('-----')
# print(i)
def clear_line(data,req):
out_data=[]
len_req=len(req)
for line in data:
index=line.find(req)
if(index!=-1):
out_data.append(line[index+len_req:])
else:
out_data.append(line)
return out_data
def pretty_string_gen(string):
synonims=[]
print('---------------------------------------------')
all_example=get_all_by_word(string,'_Ex:\n ','\n')
all_answer=get_all_by_word(string,'> ','\n')
print("Ответы")
for i in range(len(all_answer)):
print(all_answer[i])
print("\nПримеры")
for i in range(len(all_example)):
print(all_example[i])
def kbevent(event):
global running,prev_key,cursor
# print key info
#print(event.Ascii)
if((prev_key==227) and(event.Ascii==99)):
# result=get_word(pyperclip.paste(),cursor)
# for res in result:
# print('##')
# for i in res:
# #print(i)
# pretty_string_gen(i)
get_word_api_reverso(pyperclip.paste())
print('------------------------------------------------------------')
#pyperclip.copy('')
prev_key=event.Ascii
# If the ascii value matches spacebar, terminate the while loop
if event.Ascii == 32:
running = False
prev_key=0
connection,cursor=create_db_file("test.db")
# Create hookmanager
hookman = pyxhook.HookManager()
# Define our callback to fire when a key is pressed down
hookman.KeyDown = kbevent
# Hook the keyboard
hookman.HookKeyboard()
# Start our listener
hookman.start()
# Create a loop to keep the application running
running = True
while running:
time.sleep(0.1)
# Close the listener when we are done
hookman.cancel() | StarcoderdataPython |
4911629 | <reponame>Ehsan-Tavan/twitter_crawlers
import os
from data_loader import load_data
from configuration import get_config
from crawler import CrawlKeyWords, CrawlUserTweets, load_saved_users
from utils import extract_key_words, filter_tweets, save_tweets
ARGS = get_config()
def main():
data = load_data(os.path.join(ARGS.data_dir, ARGS.key_words_path))
key_words = extract_key_words(data, ARGS.key_words_header)
crawl_key_words = CrawlKeyWords(ARGS)
crawl_key_words.work_flow(key_words)
user_names = crawl_key_words.user_names
crawl_user_tweets = CrawlUserTweets(ARGS)
filtered_data = load_data(os.path.join(ARGS.data_dir, ARGS.filter_key_words_path))
filtered_key_words = extract_key_words(filtered_data, ARGS.filter_key_words_header)
crawled_users = load_saved_users(ARGS)
tweets = list()
dates = list()
for users in user_names:
for user in users:
if user not in crawled_users:
try:
crawl_user_tweets.work_flow(user)
tweets = crawl_user_tweets.crawled_data["tweet"]
dates = crawl_user_tweets.crawled_data["date"]
tweets, dates = filter_tweets(tweets, dates, filtered_key_words)
tweets = list(filter(None, tweets))
dates = list(filter(None, dates,))
except Exception as e:
print(e)
save_tweets(tweets, dates, os.path.join(ARGS.data_dir, ARGS.final_tweets_path))
if __name__ == "__main__":
main()
| StarcoderdataPython |
6650292 | import onnx
import numpy
from .base_operator import QuantOperatorBase
from ..quant_utils import attribute_to_kwarg, ms_domain, QuantType
from onnx import onnx_pb as onnx_proto
'''
Quantize LSTM
'''
class LSTMQuant(QuantOperatorBase):
def __init__(self, onnx_quantizer, onnx_node):
super().__init__(onnx_quantizer, onnx_node)
def quantize(self):
'''
parameter node: LSTM node.
parameter new_nodes_list: List of new nodes created before processing this node.
return: a list of nodes in topological order that represents quantized Attention node.
'''
node = self.node
assert (node.op_type == "LSTM")
if (not self.quantizer.is_valid_quantize_weight(node.input[1]) or
not self.quantizer.is_valid_quantize_weight(node.input[2])):
super().quantize()
return
model = self.quantizer.model
W = model.get_initializer(node.input[1])
R = model.get_initializer(node.input[2])
if (len(W.dims) != 3 or len(R.dims) != 3):
super().quantize()
return
[W_num_dir, W_4_hidden_size, W_input_size] = W.dims
[R_num_dir, R_4_hidden_size, R_hidden_size] = R.dims
if self.quantizer.is_per_channel():
del W.dims[0]
del R.dims[0]
W.dims[0] = W_num_dir * W_4_hidden_size
R.dims[0] = R_num_dir * R_4_hidden_size
quant_input_weight_tuple = self.quantizer.quantize_weight_per_channel(node.input[1], onnx_proto.TensorProto.INT8, 0)
quant_recurrent_weight_tuple = self.quantizer.quantize_weight_per_channel(node.input[2], onnx_proto.TensorProto.INT8, 0)
W_quant_weight = model.get_initializer(quant_input_weight_tuple[0])
R_quant_weight = model.get_initializer(quant_recurrent_weight_tuple[0])
W_quant_array = onnx.numpy_helper.to_array(W_quant_weight)
R_quant_array = onnx.numpy_helper.to_array(R_quant_weight)
W_quant_array = numpy.reshape(W_quant_array, (W_num_dir, W_4_hidden_size, W_input_size))
R_quant_array = numpy.reshape(R_quant_array, (R_num_dir, R_4_hidden_size, R_hidden_size))
W_quant_array = numpy.transpose(W_quant_array, (0, 2, 1))
R_quant_array = numpy.transpose(R_quant_array, (0, 2, 1))
W_quant_tranposed = onnx.numpy_helper.from_array(W_quant_array, quant_input_weight_tuple[0])
R_quant_tranposed = onnx.numpy_helper.from_array(R_quant_array, quant_recurrent_weight_tuple[0])
model.remove_initializers([W_quant_weight, R_quant_weight])
model.add_initializer(W_quant_tranposed)
model.add_initializer(R_quant_tranposed)
W_quant_zp = model.get_initializer(quant_input_weight_tuple[1])
R_quant_zp = model.get_initializer(quant_recurrent_weight_tuple[1])
W_quant_scale = model.get_initializer(quant_input_weight_tuple[2])
R_quant_scale = model.get_initializer(quant_recurrent_weight_tuple[2])
if self.quantizer.is_per_channel():
W_quant_zp.dims[:] = [W_num_dir, W_4_hidden_size]
R_quant_zp.dims[:] = [R_num_dir, R_4_hidden_size]
W_quant_scale.dims[:] = [W_num_dir, W_4_hidden_size]
R_quant_scale.dims[:] = [R_num_dir, R_4_hidden_size]
inputs = []
input_len = len(node.input)
inputs.extend([node.input[0]])
inputs.extend([quant_input_weight_tuple[0], quant_recurrent_weight_tuple[0]])
inputs.extend([node.input[3]if input_len > 3 else ""])
inputs.extend([node.input[4]if input_len > 4 else ""])
inputs.extend([node.input[5]if input_len > 5 else ""])
inputs.extend([node.input[6]if input_len > 6 else ""])
inputs.extend([node.input[7]if input_len > 7 else ""])
inputs.extend([quant_input_weight_tuple[2], quant_input_weight_tuple[1], quant_recurrent_weight_tuple[2], quant_recurrent_weight_tuple[1]])
kwargs = {}
for attribute in node.attribute:
kwargs.update(attribute_to_kwarg(attribute))
kwargs["domain"] = ms_domain
quant_lstm_name = "" if node.name == "" else node.name + "_quant"
quant_lstm_node = onnx.helper.make_node("DynamicQuantizeLSTM", inputs, node.output, quant_lstm_name, **kwargs)
self.quantizer.new_nodes += [quant_lstm_node]
| StarcoderdataPython |
3392539 | """Defines helper functions for use throughout the application.
Copyright 2020 <NAME>, The Paperless Permission Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from io import BytesIO, StringIO
from csv import DictReader
import logging
def bytes_io_to_string_io(bytes_io):
return StringIO(bytes_io.getvalue().decode())
def bytes_io_to_tsv_dict_reader(bytes_io):
return DictReader(bytes_io_to_string_io(bytes_io), delimiter='\t')
def disable_logging(f):
def wrapper(*args):
logging.disable(logging.WARNING)
result = f(*args)
logging.disable(logging.NOTSET)
return result
return wrapper
| StarcoderdataPython |
3207529 | #Package configuration information
import os.path
import sys
import re
import importlib
import tempfile
import logging
logger = logging.getLogger("QGL")
# Where to store AWG data
if os.getenv('AWG_DIR'):
AWGDir = os.getenv('AWG_DIR')
else:
logger.warning("AWG_DIR environment variable not defined. Unless otherwise specified, using temporary directory for AWG sequence file outputs.")
AWGDir = tempfile.mkdtemp(prefix="AWG")
# The db file, where the channel libraries are stored
db_resource_name = None
# The config file (executed upon channel library loading)
# config_file = None
# plotting options
plotBackground = '#EAEAF2'
gridColor = None
# select pulse library (standard or all90)
pulse_primitives_lib = "standard"
# select a CNOT implementation (a name of a Pulse function that implements
# CNOT in your gate set, e.g. CNOT_simple or CNOT_CR).
# This default can be overridden on a per-Edge case as a channel property
cnot_implementation = "CNOT_CR"
def load_config():
global config_file
if os.getenv('BBN_CONFIG'):
try:
config_file = os.getenv("BBN_CONFIG")
sys.path.append(os.path.dirname(config_file))
importlib.import_module(os.path.splitext(os.path.basename(config_file))[0])
except:
raise Exception(f"Could not import/execute the BBN_CONFIG {os.getenv('BBN_CONFIG')}")
def load_db():
global db_resource_name
if os.getenv('BBN_DB'):
db_resource_name = os.getenv("BBN_DB")
return db_resource_name | StarcoderdataPython |
4864316 | """
LeetCode Problem: 59. Spiral Matrix II
Link: https://leetcode.com/problems/spiral-matrix-ii/
Language: Python
Written by: <NAME>
Time Complexity: O(n^2)
Space Complexity: O(n^2)
"""
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
if n == 1:
return [[1]]
matrix = [ [i for i in range(j, j+n)] for j in range(1, n*n +1, n) ]
rows, columns = n, n
forwards, rights = 0, 0
Rflag, cFlag = False, False
i, j = 0, 0
index = 1 # maximum number of traversals
while index < n*n +1:
# Top traversal
if Rflag == False and cFlag == False:
if j == columns-1:
cFlag = True
matrix[i][j] = index
forwards += 1 # We shrink the top bounds
i += 1 # To avoid duplications
else:
matrix[i][j] = index
j += 1
# Right traversal
elif Rflag == False and cFlag == True:
if i == rows - 1:
Rflag = True
matrix[i][j] = index
columns -= 1 # We shrink the right bounds
j -= 1 # To avoid duplications
else:
matrix[i][j] = index
i += 1
# Bottom traversal
elif Rflag == True and cFlag == True:
if j == rights:
cFlag = False
matrix[i][j] = index
rows -= 1 # We shrink the bottom bounds
i -= 1 # To avoid duplications
else:
matrix[i][j] = index
j -= 1
# Left traversal
else:
if i == forwards:
Rflag = False
matrix[i][j] = index
rights += 1 # We shrink the left bounds
j += 1 # To avoid duplications
else:
matrix[i][j] = index
i -= 1
index += 1
return matrix | StarcoderdataPython |
4930997 | <filename>modules/SenseHatDisplay/test/UnitTests.py
import unittest
import json
import sys
sys.path.insert(0, '../')
import app.MessageParser
class UnitTests(unittest.TestCase):
def test_HighestProbabilityTagMeetingThreshold(self):
MessageParser = app.MessageParser.MessageParser()
message1 = json.loads(
"{\"iteration\": \"\",\"id\": \"\",\"predictions\": [{\"probability\": 0.3,\"tagName\": \"Apple\",\"tagId\": \"\",\"boundingBox\": null},{\"probability\": 0.4,\"tagName\": \"Banana\",\"tagId\": \"\",\"boundingBox\": null}],\"project\": \"\",\"created\": \"2019-12-10T04:37:49.657555\"}")
self.assertEqual(
MessageParser.highestProbabilityTagMeetingThreshold(message1, 0.5), 'none')
message2 = json.loads(
"{\"iteration\": \"\",\"id\": \"\",\"predictions\": [{\"probability\": 0.5,\"tagName\": \"Apple\",\"tagId\": \"\",\"boundingBox\": null},{\"probability\": 0.4,\"tagName\": \"Banana\",\"tagId\": \"\",\"boundingBox\": null}],\"project\": \"\",\"created\": \"2019-12-10T04:37:49.657555\"}")
self.assertEqual(MessageParser.highestProbabilityTagMeetingThreshold(
message2, 0.3), 'Apple')
message3 = json.loads(
"{\"iteration\": \"\",\"id\": \"\",\"predictions\": [{\"probability\": 0.038001421838998795,\"tagName\": \"Apple\",\"tagId\": \"\",\"boundingBox\": null},{\"probability\": 0.38567957282066345,\"tagName\": \"Banana\",\"tagId\": \"\",\"boundingBox\": null}],\"project\": \"\",\"created\": \"2019-12-10T04:37:49.657555\"}")
self.assertEqual(MessageParser.highestProbabilityTagMeetingThreshold(
message3, 0.3), 'Banana')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3463699 | <reponame>pmbrull/OpenMetaWrapper
"""OpenMetadata Catalogue API Wrapper"""
__version__ = "0.0.1"
| StarcoderdataPython |
1808129 | from flask import *
from datetime import datetime
import config
from app import db, utils
from .model import User
from ..changelog.model import ChangeLog
from ..transaction.model import Transaction
from ..trade.model import Trade
import re
from flask_cors import CORS
mod_user = Blueprint('user', __name__)
CORS(mod_user)
@mod_user.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
username = request.form['username']
password = request.form['password']
user = User.query.filter(User.username == username).first()
if user is None or not user.check_password(password):
return make_response('Invalid Username or Password', 400, None)
session['loggedin'] = True
session['user_id'] = user.id
session['username'] = user.username
session['usertype'] = user.user_type
if user.user_type == User.CLIENT:
return redirect('/client/profile')
elif user.user_type == User.TRADER:
return redirect('/trader/profile')
elif user.user_type == User.MANAGER:
return redirect('/manager/profile')
return render_template('login.html')
@mod_user.route('/register', methods=['GET', 'POST'])
def register():
msg = ''
if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form:
username = request.form['username']
password = request.form['password']
email = request.form['email']
first_name = request.form['firstname']
last_name = request.form['lastname']
mobile_phone = request.form['mobilephone']
street = request.form['street']
city = request.form['city']
state = request.form['state']
zipcode = request.form['zipcode']
user = User.query.filter(User.username == username).first()
if user:
msg = 'User already exists !'
elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
msg = 'Invalid email address !'
elif not re.match(r'[A-Za-z0-9]+', username):
msg = 'Username must contain only characters and numbers !'
elif not re.match(r'[0-9]{10}', mobile_phone):
msg = 'Phone number must be of only 10 digits !'
elif not username or not password or not email or not first_name or not last_name or not mobile_phone:
msg = 'Please fill out the form !'
else:
user = None
if session['usertype'] == User.MANAGER:
user_type = int(request.form['usertype'])
print("usertype::: ", user_type)
user = User(username, email, first_name, last_name, password, mobile_phone, user_type, street, city,
state, zipcode)
print(user.level)
else:
user = User(username, email, first_name, last_name, password, mobile_phone, User.CLIENT, street, city,
state, zipcode)
db.session.add(user)
db.session.commit()
msg = 'You have successfully registered !'
if session['usertype'] == User.MANAGER:
return redirect('/manager/profile')
return redirect('/login')
elif request.method == 'POST':
msg = 'Please fill out the form !'
if session['usertype'] == User.MANAGER:
return render_template('trader_client')
return render_template('register.html', msg=msg)
@mod_user.route('/logout')
def logout():
if 'username' in session:
session.pop('username')
session.pop('user_id')
session.pop('usertype')
session.pop('loggedin')
return redirect('/')
@mod_user.route('/profile')
def get_user_profile():
if 'username' in session:
if session['usertype'] == User.CLIENT:
return redirect('/client/profile')
elif session['usertype'] == User.TRADER:
return redirect('/trader/profile')
elif session['usertype'] == User.MANAGER:
return redirect('/manager/profile')
return redirect('/login')
@mod_user.route('/client/profile', methods=['GET'])
def client_profile():
if 'username' not in session or session['usertype'] != User.CLIENT:
abort(403)
user_ = User.query.filter(User.username == session['username']).first()
deposits_ = Transaction.query.filter_by(client_id=user_.id, xid_type='add_fund').order_by(
Transaction.timestamp.desc())
trades_ = Trade.query.filter_by(client_id=user_.id).order_by(Trade.timestamp.desc())
return render_template('client/profile.html', user=user_, deposits=deposits_, trades=trades_)
@mod_user.route('/client/deposit_fiat', methods=['GET', 'POST'])
def deposit_fiat():
if 'username' not in session or session['usertype'] != User.CLIENT:
abort(403)
if request.method == 'POST':
client_id = session['user_id']
amount = float(request.form['fiat_deposit_amount'])
user = User.query.filter(User.id == client_id).first()
# user.fiat_balance += amount
transaction = Transaction(xid_type='add_fund',
status='pending',
client_id=user.id,
timestamp=datetime.now(),
fiat_amount=amount)
db.session.add(transaction)
db.session.flush()
changelog = ChangeLog(timestamp=datetime.now(),
xid=transaction.id,
status='pending',
xid_type='add_fund',
client_id=user.id)
db.session.add(changelog)
db.session.commit()
return redirect('/client/profile')
return render_template('client/deposit_fiat.html')
@mod_user.route('/client/trade', methods=['GET', 'POST'])
def trade():
if 'username' not in session or session['usertype'] != User.CLIENT:
abort(403)
exchange_rate = utils.get_current_rate()
user = User.query.filter(User.id == session['user_id']).first()
account_ = {
'bitcoin_balance': user.bitcoin_balance,
'fiat_balance': user.fiat_balance,
'level': user.level,
'commission': config.commission_rates[user.level],
'exchange_rate': exchange_rate
}
if request.method == 'POST':
password = request.form['password']
if not user.check_password(password):
abort(403)
valid = True
btc_amount = float(request.form['bitcoin_amount'])
type_of_trade = request.form['buysell']
commission_type = request.form['commissiontype']
btc_total = 0.0
usd_total = 0.0
total_usd_amount = btc_amount * exchange_rate
commission = config.commission_rates[user.level] * total_usd_amount / 100
if type_of_trade == 'buy':
usd_total = total_usd_amount
else:
btc_total = btc_amount
if commission_type == 'usd':
usd_total += commission
else:
btc_total += commission / exchange_rate
if usd_total > user.fiat_balance:
valid = False
if btc_total > user.bitcoin_balance:
valid = False
if not valid:
return render_template('client/trade.html', title='Client Trading', account=account_)
trade_ = Trade(xid_type=type_of_trade,
status='pending',
client_id=user.id,
timestamp=datetime.now(),
fiat_amount=total_usd_amount,
bitcoin_amount=btc_amount,
exchange_rate=exchange_rate,
commission=commission,
commission_type=commission_type)
db.session.add(trade_)
db.session.flush()
changelog = ChangeLog(timestamp=datetime.now(),
xid=trade_.id,
status='pending',
xid_type=type_of_trade,
client_id=user.id)
db.session.add(changelog)
db.session.commit()
return redirect('/client/profile')
return render_template('client/trade.html', title='Client Trading', account=account_)
| StarcoderdataPython |
35749 | <filename>finitewave/core/command/__init__.py
from finitewave.core.command.command import Command
from finitewave.core.command.command_sequence import CommandSequence
| StarcoderdataPython |
6482709 | <reponame>r-woo/elfai
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from elf.options import auto_import_options, PyOptionSpec
from rlpytorch import Model
from elfgames.go.multiple_prediction import MultiplePrediction
class Model_Policy(Model):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addBoolOption(
'bn',
'toggles batch norm',
True)
spec.addBoolOption(
'leaky_relu',
'toggles leaky ReLU',
True)
spec.addIntOption(
'num_layer',
'number of layers',
39)
spec.addIntOption(
'dim',
'model dimension',
128)
return spec
@auto_import_options
def __init__(self, option_map, params):
super().__init__(option_map, params)
self.board_size = params["board_size"]
self.num_future_actions = params["num_future_actions"]
self.num_planes = params["num_planes"]
# print("#future_action: " + str(self.num_future_actions))
# print("#num_planes: " + str(self.num_planes))
# Simple method. multiple conv layers.
self.convs = []
self.convs_bn = []
last_planes = self.num_planes
for i in range(self.options.num_layer):
conv = nn.Conv2d(last_planes, self.options.dim, 3, padding=1)
conv_bn = (nn.BatchNorm2d(self.options.dim)
if self.options.bn
else lambda x: x)
setattr(self, "conv" + str(i), conv)
self.convs.append(conv)
setattr(self, "conv_bn" + str(i), conv_bn)
self.convs_bn.append(conv_bn)
last_planes = self.options.dim
self.final_conv = nn.Conv2d(
self.options.dim, self.num_future_actions, 3, padding=1)
# Softmax as the final layer
self.softmax = nn.Softmax(dim=1)
self.relu = nn.LeakyReLU(0.1) if self.options.leaky_relu else nn.ReLU()
def forward(self, x):
s = self._var(x["s"])
for conv, conv_bn in zip(self.convs, self.convs_bn):
s = conv_bn(self.relu(conv(s)))
output = self.final_conv(s)
pis = []
d = self.board_size * self.board_size
for i in range(self.num_future_actions):
pis.append(self.softmax(output[:, i].contiguous().view(-1, d)))
return dict(pis=pis, pi=pis[0])
# Format: key, [model, method]
Models = {
"df_policy": [Model_Policy, MultiplePrediction]
}
| StarcoderdataPython |
6482649 | <filename>server/plugins/munkiversion/munkiversion.py
from django.db.models import Count
import sal.plugin
class MunkiVersion(sal.plugin.Widget):
description = 'Chart of installed versions of Munki'
supported_os_families = [sal.plugin.OSFamilies.darwin]
def get_context(self, queryset, **kwargs):
context = self.super_get_context(queryset, **kwargs)
munki_info = queryset.values('munki_version').annotate(
count=Count('munki_version')).order_by('munki_version')
context['data'] = munki_info
return context
def filter(self, machines, data):
machines = machines.filter(munki_version=data)
title = 'Machines running version {} of MSC'.format(data)
return machines, title
| StarcoderdataPython |
1847499 | import os
from setuptools import setup
def localopen(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='pymps',
version='0.1',
description='Libary to parse fixed-format MPS files.',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['pymps'],
include_package_data=True,
zip_safe=False,
keywords=['netlib', 'linear programming'],
url='https://github.com/simpleroseinc/pymps',
long_description=localopen('README.md').read(),
install_requires=['numpy', 'pandas'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| StarcoderdataPython |
341118 | import sublime
import sublime_plugin
import os
from .logging import debug, printf
from .configurations import config_for_scope, is_supported_view
from .workspace import get_project_path
from .types import ClientStates
from .sessions import create_session, Session
# typing only
from .rpc import Client
from .settings import ClientConfig, settings
assert Client and ClientConfig
try:
from typing import Any, List, Dict, Tuple, Callable, Optional, Set
assert Any and List and Dict and Tuple and Callable and Optional and Set
assert Session
except ImportError:
pass
clients_by_window = {} # type: Dict[int, Dict[str, Session]]
class CodeIntelTextCommand(sublime_plugin.TextCommand):
def is_visible(self, event=None):
return is_supported_view(self.view)
def has_client_with_capability(self, capability):
session = session_for_view(self.view)
if session and session.has_capability(capability):
return True
return False
def window_configs(window: sublime.Window) -> 'Dict[str, Session]':
if window.id() in clients_by_window:
return clients_by_window[window.id()]
else:
# debug("no configs found for window", window.id())
return {}
def is_ready_window_config(window: sublime.Window, config_name: str):
configs = window_configs(window)
if config_name not in configs:
return False
if configs[config_name].state == ClientStates.READY:
return True
return False
# Startup
def can_start_config(window: sublime.Window, config_name: str):
return config_name not in window_configs(window)
def get_window_env(window: sublime.Window, config: ClientConfig):
# Create a dictionary of Sublime Text variables
variables = window.extract_variables()
# Expand language server command line environment variables
expanded_args = list(
sublime.expand_variables(os.path.expanduser(arg), variables)
for arg in config.binary_args
)
# Override OS environment variables
env = os.environ.copy()
for var, value in config.env.items():
# Expand both ST and OS environment variables
env[var] = os.path.expandvars(sublime.expand_variables(value, variables))
return expanded_args, env
def start_window_config(window: sublime.Window, project_path: str, config: ClientConfig,
on_created: 'Callable'):
args, env = get_window_env(window, config)
config.binary_args = args
session = create_session(config, project_path, env, settings,
on_created=on_created,
on_ended=lambda: on_session_ended(window, config.name))
clients_by_window.setdefault(window.id(), {})[config.name] = session
debug("{} client registered for window {}".format(config.name, window.id()))
def on_session_ended(window: sublime.Window, config_name: str):
configs = window_configs(window)
del configs[config_name]
if not configs:
debug("all clients unloaded")
if clients_unloaded_handler:
clients_unloaded_handler(window.id())
def set_config_stopping(window: sublime.Window, config_name: str):
window_configs(window)[config_name].state = ClientStates.STOPPING
def client_for_closed_view(view: sublime.View) -> 'Optional[Client]':
return _client_for_view_and_window(view, sublime.active_window())
def client_for_view(view: sublime.View) -> 'Optional[Client]':
return _client_for_view_and_window(view, view.window())
def session_for_view(view: sublime.View) -> 'Optional[Session]':
return _session_for_view_and_window(view, view.window())
def _session_for_view_and_window(view: sublime.View, window: 'Optional[sublime.Window]') -> 'Optional[Session]':
if not window:
debug("no window for view", view.file_name())
return None
if view.size() > 1000000:
printf("file is too big, ignoring!")
return False
config = config_for_scope(view)
if not config:
debug("config not available for view", view.file_name())
return None
window_config_states = window_configs(window)
if config.name not in window_config_states:
debug(config.name, "not available for view",
view.file_name(), "in window", window.id())
return None
else:
session = window_config_states[config.name]
if session.state == ClientStates.READY:
return session
else:
return None
def _client_for_view_and_window(view: sublime.View, window: 'Optional[sublime.Window]') -> 'Optional[Client]':
session = _session_for_view_and_window(view, window)
if session:
if session.client:
return session.client
else:
debug(session.config.name, "in state", session.state, " for view",
view.file_name())
return None
else:
debug('no session found')
return None
# Shutdown
def remove_window_client(window: sublime.Window, config_name: str):
del clients_by_window[window.id()][config_name]
def unload_all_clients():
for window in sublime.windows():
for config_name, session in window_configs(window).items():
if session.client:
if session.state == ClientStates.STARTING:
session.end()
else:
debug('ignoring unload of session in state', session.state)
else:
debug('ignoring session of config without client')
closing_window_ids = set() # type: Set[int]
def check_window_unloaded():
global clients_by_window
open_window_ids = list(window.id() for window in sublime.windows())
iterable_clients_by_window = clients_by_window.copy()
for id, window_clients in iterable_clients_by_window.items():
if id not in open_window_ids and window_clients:
if id not in closing_window_ids:
closing_window_ids.add(id)
debug("window closed", id)
for closed_window_id in closing_window_ids:
unload_window_sessions(closed_window_id)
closing_window_ids.clear()
def unload_window_sessions(window_id: int):
window_configs = clients_by_window.pop(window_id, {})
for config_name, session in window_configs.items():
window_configs[config_name].state = ClientStates.STOPPING
debug("unloading session", config_name)
session.end()
def unload_old_clients(window: sublime.Window):
project_path = get_project_path(window)
configs = window_configs(window)
for config_name, session in configs.items():
if session.client and session.state == ClientStates.READY and session.project_path != project_path:
debug('unload', config_name, 'project path changed from',
session.project_path, 'to', project_path)
session.end()
clients_unloaded_handler = None # type: Optional[Callable]
def register_clients_unloaded_handler(handler: 'Callable'):
global clients_unloaded_handler
clients_unloaded_handler = handler
| StarcoderdataPython |
11234901 | """
验证手机号是否注册了163邮箱
add spider by judy
2019/01/04
更新下载统一为ha
目前看来网易邮箱的只有cookie下载的方式
modify by judy 2020/04/13
cookie中出现两个NTES_SESS
"""
import base64
import json
import re
import time
import traceback
import urllib.parse
from datetime import datetime
import pytz
import requests
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
from commonbaby.httpaccess import ResponseIO, Response
from commonbaby.helpers.helper_str import substring
from idownclient.clientdatafeedback import CONTACT_ONE, CONTACT, EML, Folder, PROFILE
from .spidermailbase import SpiderMailBase
class Spider163(SpiderMailBase):
def __init__(self, task, appcfg, clientid):
super(Spider163, self).__init__(task, appcfg, clientid)
self._html = None
self.sid = None
self._pubkey = '''-----<KEY>
-----END RSA PUBLIC KEY-----'''
def _check_registration(self):
"""
查询手机号是否注册了163邮箱
# 中国的手机号需要加上+86
:return:
"""
res = False
isreg = re.compile(r'{"code":201,', re.S)
ua = "http://reg.email.163.com/unireg/call.do?cmd=register.entrance&from=163mail_right"
url = "http://reg.email.163.com/unireg/call.do?cmd=added.mobilemail.checkBinding"
data = {
'mobile': self.task.account
}
s = requests.Session()
r = s.get(ua)
response = s.post(url=url, data=data)
signup = isreg.search(response.text)
if signup:
# 匹配的注册信息返回True
# print(signup.group())
res = True
return res
def _get_sid(self):
res = None
sid = self._ha.cookies.get('Coremail')
if not sid:
return res
if '%' in sid:
sid = substring(sid, '%', '%')
return sid
def _get_rsa_string(self, text: str):
keyPub = RSA.importKey(self._pubkey)
cipher = PKCS1_v1_5.new(keyPub)
cipher_text = cipher.encrypt(text.encode())
b64_text = base64.b64encode(cipher_text)
outtext = b64_text.decode('utf-8')
return outtext
def _pwd_login(self) -> bool:
res = False
try:
# 获取pkid
url = 'https://mimg.127.net/p/freemail/index/unified/static/2020/js/163.4f169806760c6b91df41.js'
headers = '''
Referer: https://mail.163.com/
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'''
resp_js = self._ha.getstring(url, headers=headers)
pkid = re.search(r'promark:"(.*?)"', resp_js).group(1)
# 获取cookie,不然gt_url返回不正确
ini_url = 'https://dl.reg.163.com/dl/ini?pd=mail163&pkid={}&pkht=mail.163.com'.format(pkid)
self._ha.getstring(ini_url)
# 获取tk参数
un = urllib.parse.quote_plus(self.task.account)
gt_url = 'https://dl.reg.163.com/dl/gt?un={}&pkid={}&pd=mail163'.format(un, pkid)
headers = '''
Accept: */*
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9
Connection: keep-alive
Host: dl.reg.163.com
Sec-Fetch-Dest: document
Sec-Fetch-Mode: navigate
Sec-Fetch-Site: none
Sec-Fetch-User: ?1
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36
'''
resp_json = self._ha.getstring(gt_url, headers=headers)
tk = re.search(r'"tk":"(.*?)"', resp_json).group(1)
# 提交账号密码
l_url = 'https://dl.reg.163.com/dl/l'
payload = {
'channel': '0',
'd': '10',
'domains': '',
'l': '0',
'pd': 'mail163',
'pkid': pkid,
'pw': self._get_rsa_string(self.task.password),
'pwdKeyUp': '1',
'rtid': 'AQZZzu1OKtfWaoPpHX7El7w6Odib91fc',
't': str(int(datetime.now(pytz.timezone('Asia/Shanghai')).timestamp() * 1000)),
'tk': tk,
'topURL': 'https://mail.163.com/',
'un': self.task.account
}
headers = '''
Accept: */*
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9
Connection: keep-alive
Host: dl.reg.163.com
Origin: https://dl.reg.163.com
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36
'''
# 加0.5等待时间,不然ret会返回409
time.sleep(0.5)
resp_json = self._ha.getstring(l_url, req_data='', json=payload, headers=headers)
ret = json.loads(resp_json)['ret']
if ret != '201':
if ret == '445':
msg = "Pwd login error: {}".format(ret) + ' 需要验证码'
self._logger.error(msg)
self._write_log_back(msg)
else:
self._logger.error("Pwd login error: {}".format(ret))
return res
# 登陆主页
ntesdoor_url = 'https://mail.163.com/entry/cgi/ntesdoor?'
data = '''
style: -1
df: mail163_letter
allssl: true
net:
language: -1
from: web
race:
iframe: 1
url2: https://mail.163.com/errorpage/error163.htm
product: mail163'''
headers = '''
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
accept-encoding: gzip, deflate, br
accept-language: zh-CN,zh;q=0.9
cache-control: max-age=0
origin: https://mail.163.com
referer: https://mail.163.com/
sec-fetch-dest: iframe
sec-fetch-mode: navigate
sec-fetch-site: same-origin
sec-fetch-user: ?1
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'''
self._ha._managedCookie.add_cookies(".mail.163.com", f"nts_mail_user={self.task.account}:-1:1")
resp: Response = self._ha.get_response(ntesdoor_url, req_data=data, headers=headers)
res = self._cookie_login()
except Exception as ex:
self._logger.error("Pwd login error, err: {}".format(ex))
self._write_log_back("账密登录失败: {}".format(ex.args))
return res
def _cookie_login(self) -> bool:
res = False
try:
self._ha._managedCookie.add_cookies('163.com', self.task.cookie)
self.sid = self._get_sid()
if self.sid is None:
self._logger.error("Coremail.Cookie not found")
return res
url = f'https://mail.163.com/js6/main.jsp?sid={self.sid}&df=mail163_letter'
headers = '''User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'''
html = self._ha.getstring(url, headers=headers)
if not html.__contains__('已发送'):
return res
self._userid = substring(html, "uid=", "&")
if not self._userid:
return res
res = True
except Exception:
self._logger.error(f"Cookie login error, err:{traceback.format_exc()}")
return res
def _get_profile(self) -> iter:
self.__before_download()
if self._html is None:
if self.sid is None:
self._logger.error("Invalid cookie")
url = "https://mail.163.com/js6/main.jsp"
querystring = {"sid": self.sid, "df": "mail163_letter"}
headers = '''
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: no-cache
Connection: keep-alive
Host: mail.163.com
Pragma: no-cache
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36
'''
# response = requests.request("GET", url, headers=headers, params=querystring)
res_text = self._ha.getstring(url, headers=headers, params=querystring)
if "已发送" in res_text:
self._html = res_text
try:
re_uid = re.compile('uid:\'(.+?)\',suid')
uid = re_uid.search(self._html)
self._userid = uid.group(1)
re_username = re.compile("'true_name':'(.+?)'")
u_name = re_username.search(self._html)
p_data = PROFILE(self._clientid, self.task, self.task.apptype, self._userid)
if u_name:
p_data.nickname = u_name.group(1)
yield p_data
except Exception:
self._logger.error(f"Get profile info error, err:{traceback.format_exc()}")
def _get_contacts(self) -> iter:
# sid = self._get_sid()
if self.sid is None:
self._logger.error("Invalid cookie")
url = "https://mail.163.com/contacts/call.do"
querystring = {"uid": self._userid, "sid": self.sid, "from": "webmail",
"cmd": "newapi.getContacts", "vcardver": "3.0", "ctype": "all",
"attachinfos": "yellowpage,frequentContacts", "freContLim": "20"}
payload = '''order=[{"field":"N","desc":"false"}]'''
headers = f'''
Accept: */*
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: no-cache
Connection: keep-alive
Content-Length: 36
Content-type: application/x-www-form-urlencoded
Host: mail.163.com
Origin: https://mail.163.com
Pragma: no-cache
Referer: https://mail.163.com/js6/main.jsp?sid={self.sid}&df=mail163_letter
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36
'''
try:
# response = requests.request("POST", url, data=payload, headers=headers, params=querystring)
res_test = self._ha.getstring(url, req_data=payload, headers=headers, params=querystring)
res_text_json = json.loads(res_test)
if res_text_json.get('code') != 200:
self._logger.error(f"Get contacts error, err:{traceback.format_exc()}")
return
con_list = res_text_json.get('data').get('contacts')
if con_list is None or len(con_list) == 0:
return
groups_dict = {}
groups = res_text_json.get('data').get('groups')
for group in groups:
group_cid = group['CID']
group_name = group['N']
groups_dict[group_cid] = group_name
con_all = CONTACT(self._clientid, self.task, self.task.apptype)
for one in con_list:
name = one.get('FN')
mail = one.get('EMAIL;type=INTERNET;type=pref')
con_one = CONTACT_ONE(self._userid, mail, self.task, self.task.apptype)
con_one.email = mail
con_one.nickname = name
if 'GROUPING' in one:
# 一个人可能有多个分组,用空格隔开
groupings = one.get('GROUPING').split(';')
for i in range(len(groupings)):
groupings[i] = groups_dict[groupings[i]]
group_names = ' '.join(groupings)
con_one.group = '=?utf-8?b?' + str(base64.b64encode(group_names.encode('utf-8')), 'utf-8')
con_all.append_innerdata(con_one)
if con_all.innerdata_len > 0:
yield con_all
except Exception:
self._logger.error(f"Get contact error, err:{traceback.format_exc()}")
def _get_folders(self) -> iter:
try:
if self._html is None:
# sid = self._get_sid()
if self.sid is None:
self._logger.error("Invalid cookie")
url = "https://mail.163.com/js6/main.jsp"
querystring = {"sid": self.sid, "df": "mail163_letter"}
headers = '''
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: no-cache
Connection: keep-alive
Host: mail.163.com
Pragma: no-cache
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36
'''
# response = requests.request("GET", url, headers=headers, params=querystring)
res_text = self._ha.getstring(url, headers=headers, params=querystring)
if "已发送" in res_text:
self._html = res_text
re_id = re.compile("'id':(\d+),")
ids_all = re_id.findall(self._html)
re_name = re.compile("'name':'(.+?)',")
name_all = re_name.findall(self._html)
for folder_num in range(len(ids_all)):
folder = Folder()
folder.folderid = ids_all[folder_num]
folder.name = name_all[folder_num]
yield folder
except Exception:
self._logger.error(f"Get folder info error, err:{traceback.format_exc()}")
def _get_mails(self, folder: Folder) -> iter:
get_mail_num = 0
url = "https://mail.163.com/js6/s"
# sid = self._get_sid()
if self.sid is None:
self._logger.error("Invalid cookie")
# querystring = {"sid": self.sid, "func": "mbox:listMessages",
# "LeftNavfolder1Click": "1", "mbox_folder_enter": folder.folderid}
querystring = {"sid": self.sid, "func": "mbox:listMessages"}
while True:
payload_data = '<?xml version="1.0"?><object><int name="fid">{}</int>' \
'<string name="order">date</string><boolean name="desc">true</boolean>' \
'<int name="limit">50</int><int name="start">{}</int><boolean name="skipLockedFolders">' \
'false</boolean><string name="topFlag">top</string><boolean name="returnTag">' \
'true</boolean><boolean name="returnTotal">true</boolean></object>'.format(folder.folderid,
get_mail_num)
get_mail_num += 50
payload_url = urllib.parse.quote_plus(payload_data).replace('+', '%20')
payload = 'var=' + payload_url
headers = f'''
Accept: text/javascript
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: no-cache
Connection: keep-alive
Content-Length: 539
Content-type: application/x-www-form-urlencoded
Host: mail.163.com
Origin: https://mail.163.com
Pragma: no-cache
Referer: https://mail.163.com/js6/main.jsp?sid={self.sid}&df=mail163_letter
Sec-Fetch-Dest: empty
Sec-Fetch-Mode: cors
Sec-Fetch-Site: same-origin
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36
'''
try:
# response = requests.request("POST", url, data=payload, headers=headers, params=querystring)
re_text = self._ha.getstring(url, req_data=payload, headers=headers, params=querystring)
re_mailid = re.compile("'id':'(.+?)',")
mailidall = re_mailid.findall(re_text)
for id_one in range(len(mailidall)):
mail_id = mailidall[id_one]
eml = EML(self._clientid, self.task, self._userid, mail_id, folder, self.task.apptype)
eml_info = self.__download_eml(mail_id)
eml.io_stream = eml_info[0]
eml.stream_length = eml_info[1]
yield eml
re_total = re.compile('\'total\':(\d+)')
total_res = re_total.search(re_text)
if total_res:
total = total_res.group(1)
if int(total) <= get_mail_num:
break
else:
self._logger.error("Cant get all email, something wrong")
break
except Exception:
self._logger.error(f"Get email info error, err:{traceback.format_exc()}")
def __download_eml(self, mail_id):
url = "https://mail.163.com/js6/read/readdata.jsp"
# sid = self._get_sid()
if self.sid is None:
self._logger.error("Invalid cookie")
querystring = {"sid": self.sid, "mid": mail_id,
"mode": "download", "action": "download_eml"}
headers = f'''
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: no-cache
Connection: keep-alive
Host: mail.163.com
Pragma: no-cache
Referer: https://mail.163.com/js6/main.jsp?sid={self.sid}&df=mail163_letter
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36
'''
# eml = self._ha.get_response_stream(url, headers=headers, params=querystring)
# return eml
resp = self._ha.get_response(url, headers=headers, params=querystring)
stream_length = resp.headers.get('Content-Length', 0)
eml = ResponseIO(resp)
return eml, stream_length
def __before_download(self):
"""
设置邮箱已发送邮件的保存机制
:return:
"""
url = "https://mail.163.com/js6/s"
# sid = self._get_sid()
if self.sid is None:
self._logger.error("Invalid cookie")
querystring = {"sid": self.sid, "func": "user:setAttrs"}
payload = 'var=%3C%3Fxml%20version%3D%221.0%22%3F%3E%3Cobject%3E%3Cobject%20name%3D%22' \
'attrs%22%3E%3Cint%20name%3D%22save_sent%22%3E1%3C%2Fint%3E%3C%2Fobject%3E%3C%2Fobject%3E'
headers = f'''
Accept: text/javascript
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: no-cache
Connection: keep-alive
Content-Length: 163
Content-type: application/x-www-form-urlencoded
Host: mail.163.com
Origin: https://mail.163.com
Pragma: no-cache
Referer: https://mail.163.com/js6/main.jsp?sid={self.sid}&df=mail163_letter
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36
'''
try:
# response = requests.request("POST", url, data=payload, headers=headers, params=querystring)
res_text = self._ha.getstring(url=url, req_data=payload, headers=headers, params=querystring)
if 'S_OK' in res_text:
# 设置成功
self._logger.info("Download attachment settings are successful.")
except Exception:
self._logger.error(f"Before donwload setting error, err:{traceback.format_exc()}")
finally:
return
| StarcoderdataPython |
6503985 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import wiringpi
import time
def usleep(x):
return time.sleep(x / 1000000.0)
class Lcd1602:
RS = 0
RW = 1
STRB = 2
LED = 3
D4 = 4
D5 = 5
D6 = 6
D7 = 7
LCD_BLINK_CTRL = 0x01
LCD_CURSOR_CTRL = 0x02
LCD_DISPLAY_CTRL = 0x04
LCD_CLEAR = 0x01
LCD_HOME = 0x02
LCD_ENTRY = 0x04
LCD_CTRL = 0x08
LCD_CDSHIFT = 0x10
LCD_FUNC = 0x20
LCD_CGRAM = 0x40
LCD_DGRAM = 0x80
LCD_FUNC_F = 0x04
LCD_FUNC_N = 0x08
LCD_FUNC_DL = 0x10
LCD_CDSHIFT_RL = 0x04
LCD_ENTRY_ID = 0x02
_rows = 2
_cols = 16
_bits = 4
_cx = 0
_cy = 0
_rowOff = [0x00, 0x40, 0x14, 0x54]
_lcdControl = 0
old = 0 # static variable
def __init__(self, addr, rows=2, cols=16, bits=4):
self.lcd1602(addr, rows, cols, bits)
def lcd1602(self, addr, rows=2, cols=16, bits=4):
self.fd = wiringpi.wiringPiI2CSetup(addr)
self._rows = rows
self._cols = cols
self._bits = bits
for i in range(8):
self.digitalWrite(i, wiringpi.LOW)
self.digitalWrite(self.LED, wiringpi.HIGH) # turn on LCD backlight
self.digitalWrite(self.RW, wiringpi.LOW) # allow writing to LCD
self.digitalWrite(self.RS, wiringpi.LOW)
self.digitalWrite(self.RS, wiringpi.LOW)
self.digitalWrite(self.STRB, wiringpi.LOW)
self.digitalWrite(self.STRB, wiringpi.LOW)
for i in range(self._bits):
dataPin = self.D4 + i
self.digitalWrite(dataPin + i, wiringpi.LOW)
self.digitalWrite(dataPin + i, wiringpi.LOW)
func = self.LCD_FUNC | self.LCD_FUNC_DL # Set 8-bit mode 3 times
self.put4Command(func >> 4)
usleep(35000)
self.put4Command(func >> 4)
usleep(35000)
self.put4Command(func >> 4)
usleep(35000)
func = self.LCD_FUNC # 4th set: 4-bit mode
self.put4Command(func >> 4)
usleep(35000)
func |= self.LCD_FUNC_N
self.putCommand(func)
usleep(35000)
self.lcdDisplay(True)
self.lcdCursor(False)
self.lcdCursorBlink(False)
self.lcdClear()
self.putCommand(self.LCD_ENTRY | self.LCD_ENTRY_ID)
self.putCommand(self.LCD_CDSHIFT | self.LCD_CDSHIFT_RL)
def lcdPosition(self, x, y):
if (x > self._cols) or (x < 0):
return
if (y > self._rows) or (y < 0):
return
self.putCommand(x + (self.LCD_DGRAM | self._rowOff[y]))
self._cx = x
self._cy = y
def lcdPutchar(self, data):
self.digitalWrite(self.RS, 1)
self.sendDataCmd(data)
self._cx += 1
if self._cx >= self._cols:
self._cx = 0
self._cy += 1
if self._cy >= self._rows:
self._cy = 0
self.putCommand(self._cx + (self.LCD_DGRAM | self._rowOff[self._cy]))
def lcdPuts(self, string):
chars = bytearray(string.encode("sjis"))
for n in range(len(chars)):
self.lcdPutchar(chars[n])
def digitalWrite(self, pin, value):
bit = 1 << (pin & 7)
if value == wiringpi.LOW:
self.old &= ~bit
else:
self.old |= bit
wiringpi.wiringPiI2CWrite(self.fd, self.old)
def lcdDisplay(self, state):
if state:
self._lcdControl |= self.LCD_DISPLAY_CTRL
else:
self._lcdControl &= ~self.LCD_DISPLAY_CTRL
self.digitalWrite(self.LED, wiringpi.LOW) # turn off LCD backlight
self.putCommand(self.LCD_CTRL | self._lcdControl)
def lcdCursor(self, state):
if state:
self._lcdControl |= self.LCD_CURSOR_CTRL
else:
self._lcdControl &= ~self.LCD_CURSOR_CTRL
self.putCommand(self.LCD_CTRL | self._lcdControl)
def lcdCursorBlink(self, state):
if state:
self._lcdControl |= self.LCD_BLINK_CTRL
else:
self._lcdControl &= ~self.LCD_BLINK_CTRL
self.putCommand(self.LCD_CTRL | self._lcdControl)
def lcdClear(self):
self.putCommand(self.LCD_CLEAR)
self.putCommand(self.LCD_HOME)
self._cx = self._cy = 0
usleep(5000)
def strobe(self):
self.digitalWrite(self.STRB, wiringpi.HIGH)
usleep(50)
self.digitalWrite(self.STRB, wiringpi.LOW)
usleep(50)
def sendDataCmd(self, data):
for i in range(4):
d = wiringpi.HIGH if (data & (0x10 << i)) else wiringpi.LOW
self.digitalWrite(self.D4 + i, d)
self.strobe()
for i in range(4):
d = wiringpi.HIGH if (data & (0x01 << i)) else wiringpi.LOW
self.digitalWrite(self.D4 + i, d)
self.strobe()
def putCommand(self, command):
self.digitalWrite(self.RS, wiringpi.LOW)
self.sendDataCmd(command)
usleep(2000)
def put4Command(self, command):
self.digitalWrite(self.RS, wiringpi.LOW)
for i in range(4):
self.digitalWrite(
self.D4 + i, wiringpi.HIGH if (command & (1 << i)) else wiringpi.LOW
)
self.strobe()
| StarcoderdataPython |
5111810 | <filename>var/spack/repos/builtin/packages/py-x21/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack.package import *
class PyX21(PythonPackage):
"""Used for unpacking this author's obfuscated libraries"""
homepage = "https://pypi.org/project/x21/"
list_url = "https://pypi.org/simple/x21/"
def url_for_version(self, version):
url = "https://pypi.io/packages/cp{1}/x/x21/x21-{0}-cp{1}-cp{1}{2}-{3}.whl"
if sys.platform == 'darwin':
platform_string = "macosx_10_9_x86_64"
elif sys.platform.startswith('linux'):
platform_string = "manylinux_2_17_x86_64.manylinux2014_x86_64"
py_ver = Version(version.string.split('y')[1])
return url.format(version.string.split('-')[0],
py_ver.joined,
'm' if py_ver == Version('3.7') else '',
platform_string)
if sys.platform == 'darwin':
version('0.2.6-py3.7',
sha256='7367b7c93fba520e70cc29731baec5b95e7be32d7615dad4f1f034cd21c194bd',
expand=False)
version('0.2.6-py3.8',
sha256='bbbfdb6b56562ecc81f0dc39e009713157011fbb50d47353eb25f633acf77204',
expand=False)
version('0.2.6-py3.9',
sha256='d7b4f06a71ac27d05ae774752b3ca396134916427f371b5995b07f0f43205043',
expand=False)
version('0.2.6-py3.10',
sha256='2cbda690757f1fc80edfe48fcb13f168068f1784f0cb8c300a0d8051714d0452',
expand=False)
elif sys.platform.startswith('linux'):
version('0.2.6-py3.7',
sha256='8b35248d0b049dd09985d1a45c6fa36dd39db2c9d805a96028ec3bf9dc80e0dd',
expand=False)
version('0.2.6-py3.8',
sha256='64275052bcda784395bc613f750b8b5a6b1ddbfa4e7a590cb8e209543f0ca0c4',
expand=False)
version('0.2.6-py3.9',
sha256='e20b29650fcbf0be116ac93511033bf10debc76261b7350e018ff91b92ff950d',
expand=False)
version('0.2.6-py3.10',
sha256='7c5c58ff6dc81caac6815578f78cf545e719beb0bf4017f77120d38025d2bc7d',
expand=False)
depends_on('python@3.7.0:3.7', type=('build', 'run'), when='@0.2.6-py3.7')
depends_on('python@3.8.0:3.8', type=('build', 'run'), when='@0.2.6-py3.8')
depends_on('python@3.9.0:3.9', type=('build', 'run'), when='@0.2.6-py3.9')
depends_on('python@3.10.0:3.10', type=('build', 'run'), when='@0.2.6-py3.10')
depends_on('py-pynacl', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-tomli', type=('build', 'run'))
depends_on('py-tomli-w', type=('build', 'run'))
| StarcoderdataPython |
223850 | <reponame>clegg89/altaudit<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 clegg <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
Gem and Enchant Lookup Tables
"""
"""
To save some API calls we're going to keep this data here. It'll cover most gems.
"""
gem_lookup = {
# Old Gems - Not listed. Quality 1
# 9.0 Uncommon Gems
173121 : { 'quality' : 2, 'name' : 'Deadly Jewel Doublet', 'icon' : 'inv_jewelcrafting_90_cutuncommon_orange', 'stat' : '+12 Critical Strike' },
173122 : { 'quality' : 2, 'name' : 'Quick Jewel Doublet', 'icon' : 'inv_jewelcrafting_90_cutuncommon_yellow', 'stat' : '+12 Haste' },
173123 : { 'quality' : 2, 'name' : 'Versatile Jewel Doublet', 'icon' : 'inv_jewelcrafting_90_cutuncommon_blue', 'stat' : '+12 Versatility' },
173124 : { 'quality' : 2, 'name' : 'Masterful Jewel Doublet', 'icon' : 'inv_jewelcrafting_90_cutuncommon_purple', 'stat' : '+12 Mastery' },
173125 : { 'quality' : 2, 'name' : 'Revitalizing Jewel Doublet', 'icon' : 'inv_jewelcrafting_90_cutuncommon_red', 'stat' : '+133 health every 10 seconds for each gem socketed' },
173126 : { 'quality' : 2, 'name' : 'Straddling Jewel Doublet', 'icon' : 'inv_jewelcrafting_90_cutuncommon_green', 'stat' : '+13 speed for each gem socketed' },
# 9.0 Rare Gems
173127 : { 'quality' : 3, 'name' : 'Deadly Jewel Cluster', 'icon' : 'inv_jewelcrafting_90_rarecut_orange', 'stat' : '+16 Critical Strike' },
173128 : { 'quality' : 3, 'name' : 'Quick Jewel Cluster', 'icon' : 'inv_jewelcrafting_90_rarecut_yellow', 'stat' : '+16 Haste' },
173129 : { 'quality' : 3, 'name' : 'Versatile Jewel Cluster', 'icon' : 'inv_jewelcrafting_90_rarecut_blue', 'stat' : '+16 Versatility' },
173130 : { 'quality' : 3, 'name' : 'Masterful Jewel Cluster', 'icon' : 'inv_jewelcrafting_90_rarecut_purple', 'stat' : '+16 Mastery' }
}
"""
There does not seem to be any actual API for looking up enchants by ID. So this is all we have
The name is now provided by the Equipment Profile API, so it is unused. Leaving b/c it helps when reading.
"""
enchant_lookup = {
# Cloak
6202 : { 'quality' : 3, 'name' : "Fortified Speed", 'description' : "+20 Stamina and +30 Speed" },
6203 : { 'quality' : 3, 'name' : "Fortified Avoidance", 'description' : "+20 Stamina and +30 Avoidance" },
6204 : { 'quality' : 3, 'name' : "Fortified Leech", 'description' : "+20 Stamina and +30 Leech" },
6208 : { 'quality' : 3, 'name' : "Soul Vitality", 'description' : "+30 Stamina" },
# Chest
6216 : { 'quality' : 2, 'name' : "Sacred Stats", 'description' : "+20 Primary Stat" },
6213 : { 'quality' : 3, 'name' : "Eternal Bulwark", 'description' : "+25 Armor and +20 Strength/Agility" },
6214 : { 'quality' : 3, 'name' : "Eternal Skirmish", 'description' : "+20 Strength/Agility and Shadow Damage Auto-Attack" },
6217 : { 'quality' : 3, 'name' : "Eternal Bounds", 'description' : "+20 Intellect and +6% Mana" },
6230 : { 'quality' : 3, 'name' : "Eternal Stats", 'description' : "+30 Primary Stat" },
6265 : { 'quality' : 3, 'name' : "Eternal Insight", 'description' : "+20 Intellect and Shadow Damage Spells" },
# Primary
## Wrist
6219 : { 'quality' : 2, 'name' : "Illuminated Soul", 'description' : "+10 Intellect" },
6220 : { 'quality' : 3, 'name' : "Eternal Intellect", 'description' : "+15 Intellect" },
## Hands
6209 : { 'quality' : 2, 'name' : "Strength of Soul", 'description' : "+10 Strength" },
6210 : { 'quality' : 3, 'name' : "Eternal Strength", 'description' : "+15 Strength" },
## Feet
6212 : { 'quality' : 2, 'name' : "Agile Soul", 'description' : "+10 Agility" },
6211 : { 'quality' : 3, 'name' : "Eternal Agility", 'description' : "+15 Agility" },
# Ring
# Bargain
6163 : { 'quality' : 2, 'name' : "Bargain of Critical Strike", 'description' : "+12 Critical Strike" },
6165 : { 'quality' : 2, 'name' : "Bargain of Haste", 'description' : "+12 Haste" },
6167 : { 'quality' : 2, 'name' : "Bargain of Mastery", 'description' : "+12 Mastery" },
6169 : { 'quality' : 2, 'name' : "Bargain of Versatility", 'description' : "+12 Versatility" },
# Tenet
6164 : { 'quality' : 3, 'name' : "Tenet of Critical Strike", 'description' : "+16 Critical Strike" },
6166 : { 'quality' : 3, 'name' : "Tenet of Haste", 'description' : "+16 Haste" },
6168 : { 'quality' : 3, 'name' : "Tenet of Mastery", 'description' : "+16 Mastery" },
6170 : { 'quality' : 3, 'name' : "Tenet of Versatility", 'description' : "+16 Versatility" },
# Weapons - All Valid
# Engineering
6195 : { 'quality' : 3, 'name' : "Infra-green Reflex Sight", 'description' : "Occasionally increase Haste by 303 for 12 sec" },
6196 : { 'quality' : 3, 'name' : "Optical Target Embiggener", 'description' : "Occasionally increase Critical Strike by 303 for 12 sec" },
# Enchant
6223 : { 'quality' : 3, 'name' : "Lightless Force", 'description' : "Chance to send out a wave of Shadow energy, striking 5 enemies" },
6226 : { 'quality' : 3, 'name' : "Eternal Grace", 'description' : "Sometimes cause a burst of healing on the target of your helpful spells and abilities" },
6227 : { 'quality' : 3, 'name' : "Ascended Vigor", 'description' : "Sometimes increase your healing received by 12% for 10 sec" },
6228 : { 'quality' : 3, 'name' : "Sinful Revelation", 'description' : "Your attacks sometimes cause enemies to suffer an additional 6% damage from you for 10 sec" },
6229 : { 'quality' : 3, 'name' : "Celestial Guidance", 'description' : "Sometimes increase your primary stat by 5%" },
# DK Runeforges
3370 : { 'quality' : 4, 'name' : "Rune of the Razorice", 'description' : "Causes extra weapon damage as Frost damage and increases enemies' vulnerability to your Frost attacks by 3%, stacking up to 5 times" },
3847 : { 'quality' : 4, 'name' : "Rune of the Stoneskin Gargoyle", 'description' : "Increase Armor by 5% and all stats by 5%" },
3368 : { 'quality' : 4, 'name' : "Rune of the Fallen Crusader", 'description' : "Chance to heal for 6% and increases total Strength by 15% for 15 sec." },
6241 : { 'quality' : 4, 'name' : "Rune of Sanguination", 'description' : "Cuases Death Strike to deal increased the target's missing health. When you fall below 35% health, you heal for 48% of your maximum health over 8 sec" },
6242 : { 'quality' : 4, 'name' : "Rune of Spellwarding", 'description' : "Deflect 3% of all spell damage. Taking magic damage has a chance to create a shield that absorbs magic damage equal to 10% of your max health. Damaging the shield causes enemies' cast speed to be reduced by 10% for 6 sec" },
6243 : { 'quality' : 4, 'name' : "Rune of Hysteria", 'description' : "Increases maximum Runic Power by 20 and attacks have a chance to increase Runic Power generation by 20% for 8 sec" },
6244 : { 'quality' : 4, 'name' : "Rune of Unending Thirst", 'description' : "Increase movement speed by 5%. Killing an enemy causes you to heal for 5% of your max health and gain 10% Haste and movement speed" },
6245 : { 'quality' : 4, 'name' : "Rune of the Apocalypse", 'description' : "Your ghoul's attacks have a chance to apply a debuff to the target" }
}
| StarcoderdataPython |
10604 | <filename>tests/routes/test_hackers.py<gh_stars>0
# flake8: noqa
import json
from src.models.hacker import Hacker
from tests.base import BaseTestCase
from datetime import datetime
class TestHackersBlueprint(BaseTestCase):
"""Tests for the Hackers Endpoints"""
"""create_hacker"""
def test_create_hacker(self):
now = datetime.now()
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "<EMAIL>",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
self.assertEqual(res.status_code, 201)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_json(self):
res = self.client.post(
"/api/hackers/", data={"hacker": json.dumps({})}, content_type="multipart/form-data"
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
def test_create_hacker_duplicate_user(self):
now = datetime.now()
Hacker.createOne(
email="<EMAIL>"
)
res = self.client.post(
"/api/hackers/",
data={"hacker": json.dumps(
{
"email": "<EMAIL>",
"date": now.isoformat(),
}
)},
content_type="multipart/form-data",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 409)
self.assertIn(
"Sorry, that email already exists.", data["description"]
)
self.assertEqual(Hacker.objects.count(), 1)
def test_create_hacker_invalid_datatypes(self):
res = self.client.post(
"/api/hackers/",
data=json.dumps(
{"email": "notanemail"}
),
content_type="application/json",
)
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(data["name"], "Bad Request")
self.assertEqual(Hacker.objects.count(), 0)
"""get_all_hackers"""
def test_get_all_hackers(self):
Hacker.createOne(
email="<EMAIL>"
)
Hacker.createOne(
email="<EMAIL>",
)
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 201)
self.assertEqual(data["hackers"][0]["email"], "<EMAIL>")
self.assertEqual(data["hackers"][1]["email"], "<EMAIL>")
def test_get_all_hackers_not_found(self):
res = self.client.get("/api/hackers/get_all_hackers/")
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 404)
self.assertEqual(data["name"], "Not Found")
| StarcoderdataPython |
3434735 | <reponame>eflows4hpc/compss<filename>tests/sources/local/python/1_decorator_prolog_epilog/src/modules/testPrologEpilog.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench Tasks
========================
"""
# Imports
import unittest
import os
from pycompss.api.task import task
from pycompss.api.api import compss_barrier as cb, compss_wait_on as cwo
from pycompss.api.binary import binary
from pycompss.api.mpi import mpi
from pycompss.api.mpmd_mpi import mpmd_mpi
from pycompss.api.prolog import prolog
from pycompss.api.parameter import *
from pycompss.api.epilog import epilog
@prolog(binary="echo", params="just a prolog")
@epilog(binary="echo", params="just an epilog")
@task()
def basic():
return True
@prolog(binary="date", params="-d {{a}}")
@epilog(binary="date", params="-d {{b}}")
@mpmd_mpi(runner="mpirun",
programs=[
dict(binary="echo", processes=2, params="program 1"),
dict(binary="echo", processes=2, params="program 2")
])
@task()
def params_mpmd(a, b):
pass
@prolog(binary="date", params="-date", fail_by_exit_value=False)
@epilog(binary="date", params="-date", fail_by_exit_value=False)
@task()
def skip_failure():
return True
@prolog(binary="date", params="-wrong", fail_by_exit_value=False)
@mpi(runner="mpirun", binary="echo", params="prolog failed successfully")
@task(returns=1)
def mpi_skip_failure():
pass
@prolog(binary="date", params="-wrong", fail_by_exit_value=False)
@binary(binary="echo", params="prolog failed successfully",
fail_by_exit_value=False)
@task(returns=1)
def mpi_skip_failure():
pass
@prolog(binary="cat", params="{{p_file}}")
@epilog(binary="cat", params="{{e_file}}")
@task(p_file=FILE_IN, e_file=FILE_IN)
def file_in(p_file, e_file):
return 1
@epilog(binary=os.getcwd() + "/src/misc/hello.sh",
params="{{text}} {{file_out}}")
@task(returns=1, file_out=FILE_OUT)
def std_out(ret_value, text, file_out):
return ret_value
@prolog(binary="echo", params="{{a}}_{{b}}")
@epilog(binary="echo", params="{{c}}_{{d}}")
@task(returns=4)
def task_1(a, b, c, d):
return a, b, c, d
@prolog(binary="echo", params="prolog_{{b}}")
@epilog(binary="echo", params="epilog_{{d}}")
@mpi(binary="echo", runner="mpirun", params="mpi_{{a}}")
@task(returns=1)
def task_2(a, b, c, d):
pass
class TestPrologEpilog(unittest.TestCase):
def testFBEV(self):
ev = cwo(skip_failure())
self.assertTrue(ev, "ERROR: Prolog / Epilog failure shouldn't have "
"stopped the task execution")
def testStdOutFile(self):
text = "some text for epilog"
outfile = "src/misc/outfile"
ret_val = cwo(std_out(10, text, outfile))
self.assertEqual(10, ret_val, "ERROR: testStdOutFile return value "
"is NOT correct ")
def testFileInParam(self):
p_file = "src/misc/p_file"
e_file = "src/misc/e_file"
ret = cwo(file_in(p_file, e_file))
self.assertEqual(ret, 1, "ERROR: testFileInParam ret value NOT correct")
def testParams(self):
params_mpmd("next monday", "next friday")
cb()
def testBasic(self):
ret = basic()
self.assertTrue(ret)
def testMpiSkipFailure(self):
cwo(mpi_skip_failure())
cb()
def testOutParam(self):
t_1 = task_1("AAA", "BBB", "CCC", "DDD")
t_2 = cwo(task_2(*t_1))
self.assertEqual(t_2, 0, "ERROR: testOutParam exit value not 0.")
| StarcoderdataPython |
3447215 | #!/usr/bin/python3
#!python3
#encoding:utf-8
import os.path
import subprocess
import dataset
import database.src.Database
import database.src.account.Main
import cui.register.github.api.v3.authorizations.Authorizations
import web.sqlite.Json2Sqlite
class Main:
def __init__(self, path_dir_db):
self.path_dir_db = path_dir_db
self.j2s = web.sqlite.Json2Sqlite.Json2Sqlite()
def Insert(self, args):
print('Account.Insert')
print(args)
print('-u: {0}'.format(args.username))
print('-p: {0}'.format(args.password))
print('-m: {0}'.format(args.mailaddress))
print('-s: {0}'.format(args.ssh_public_key_file_path))
print('-t: {0}'.format(args.two_factor_secret_key))
print('-r: {0}'.format(args.two_factor_recovery_code_file_path))
print('--auto: {0}'.format(args.auto))
db = database.src.Database.Database()
db.Initialize()
print(db.account['Accounts'].find_one(Username=args.username))
if None is db.account['Accounts'].find_one(Username=args.username):
print('aaaaaaaaaaaaaaaaaaaaaaaaaa')
# 1. APIでメールアドレスを習得する。https://developer.github.com/v3/users/emails/
# 2. Tokenの新規作成
auth = cui.register.github.api.v3.authorizations.Authorizations.Authorizations(args.username, args.password)
token_repo = auth.Create(args.username, args.password, scopes=['repo'])
token_delete_repo = auth.Create(args.username, args.password, scopes=['delete_repo'])
token_user = auth.Create(args.username, args.password, scopes=['user'])
token_public_key = auth.Create(args.username, args.password, scopes=['admin:public_key'])
# 3. SSH鍵の新規作成
# 4. 全部成功したらDBにアカウントを登録する
# args.mailaddress = APIで取得する
db.account['Accounts'].insert(self.__CreateRecordAccount(args))
account = db.account['Accounts'].find_one(Username=args.username)
if None is not args.two_factor_secret_key:
db.account['AccessTokens'].insert(self.__CreateRecordTwoFactor(account['Id'], args))
db.account['AccessTokens'].insert(self.__CreateRecordToken(account['Id'], token_repo))
db.account['AccessTokens'].insert(self.__CreateRecordToken(account['Id'], token_delete_repo))
db.account['AccessTokens'].insert(self.__CreateRecordToken(account['Id'], token_user))
db.account['AccessTokens'].insert(self.__CreateRecordToken(account['Id'], token_public_key))
# 作成したアカウントのリポジトリDB作成や、作成にTokenが必要なライセンスDBの作成
db.Initialize()
return db
def __CreateRecordAccount(self, args):
return dict(
Username=args.username,
MailAddress=args.mailaddress,
Password=<PASSWORD>,
CreateAt="1970-01-01T00:00:00Z"
)
# 作成日時はAPIのuser情報取得によって得られる。
def __CreateRecordToken(self, account_id, j):
return dict(
AccountId=account_id,
IdOnGitHub=j['id'],
Note=j['note'],
AccessToken=j['token'],
Scopes=self.j2s.ArrayToString(j['scopes'])
)
def __CreateRecordTwoFactor(self, account_id, args):
return dict(
AccountId=account_id,
Secret=args.args.two_factor_secret_key
)
"""
def __BoolToInt(self, bool_value):
if True == bool_value:
return 1
else:
return 0
def __ArrayToString(self, array):
ret = ""
for v in array:
ret = v + ','
return ret[:-1]
"""
"""
# アカウントDBが存在しないなら作成する
path_db_account = os.path.join(self.path_dir_db, 'GitHub.Accounts.sqlite3')
if not(os.path.isfile(path_db_account)):
db = database.src.account.Main.Main(self.path_dir_db)
db.Create()
db_account = dataset.connect('sqlite:///' + path_db_account)
print(path_db_account)
# DBから探す。指定ユーザ名のアカウントが存在するか否かを。
account = db_account['Accounts'].find_one(Username=args.username)
auth = cui.register.github.api.v3.authorizations.Authorizations.Authorizations(args.username, args.password)
auth.Create(args.username, args.password, scopes=['public_repo'])
if None is account:
auth = cui.register.github.api.v3.authorizations.Authorizations.Authorizations(args.username, args.password)
# 1. APIでメールアドレスを習得する。https://developer.github.com/v3/users/emails/
# 2. Tokenの新規作成
auth.Create(scopes=['public_repo'])
auth.Create(scopes=['delete_repo'])
auth.Create(scopes=['read:public_key'])
auth.Create(scopes=['write:public_key'])
# user(read:user, user:email, user:follow)
# repo(repo:status, repo_deployment, public_repo)
# auth.Create(args.username, args.password, scopes=['user:email'])
# auth.Create(args.username, args.password, scopes=['repo'])
# auth.Create(args.username, args.password, scopes=['repo', 'public_repo'])
# auth.Create(args.username, args.password, scopes=['admin:public_key', 'read:public_key'])
# auth.Create(args.username, args.password, scopes=['admin:public_key', 'write:public_key'])
# 3. SSH鍵の新規作成
# 4. 全部成功したらDBにアカウントを登録する
# アカウントDBにレコードが1つでもある。かつ、ライセンスDBが存在しないなら
# * LicenseマスターDB作成
# 各ユーザのリポジトリDB作成
for account in db_account['Accounts'].find():
# まだ該当ユーザのリポジトリDBが存在しないなら作成する
pass
# * Otherリポジトリは作成しなくていい。今のところ使わないから。
"""
def Update(self, args):
print('Account.Update')
print(args)
print('-u: {0}'.format(args.username))
print('-p: {0}'.format(args.password))
print('-m: {0}'.format(args.mailaddress))
print('-s: {0}'.format(args.ssh_public_key_file_path))
print('-t: {0}'.format(args.two_factor_secret_key))
print('-r: {0}'.format(args.two_factor_recovery_code_file_path))
print('--auto: {0}'.format(args.auto))
def Delete(self, args):
print('Account.Delete')
print(args)
print('-u: {0}'.format(args.username))
print('--auto: {0}'.format(args.auto))
def Tsv(self, args):
print('Account.Tsv')
print(args)
print('path_file_tsv: {0}'.format(args.path_file_tsv))
print('--method: {0}'.format(args.method))
| StarcoderdataPython |
9679868 | #!/usr/bin/env python3
import os
import sys
import subprocess
from jinja2 import Environment, FileSystemLoader
image_name=sys.argv[1]
image_is_cached=True if sys.argv[2] == 'cached' else False
# Load jinja template file
TEMPLATE_FILE = 'Dockerfile.template'
template_loader = FileSystemLoader(searchpath='.')
template_env = Environment(loader=template_loader)
template = template_env.get_template(TEMPLATE_FILE)
arch_pkgs = []
filename_pkg_explicit_internal = os.environ['HOME'] \
+ '/Documents/misc/arch/packages_explicit_internal'
with open(filename_pkg_explicit_internal) as f:
content = f.read()
arch_pkgs.extend(content.split('\n'))
del content
filename_pkg_explicit_external = os.environ['HOME'] \
+ '/Documents/misc/arch/packages_explicit_external'
with open(filename_pkg_explicit_external) as f:
content = f.read()
arch_pkgs.extend(content.split('\n'))
del content
filename_exclusions = os.environ['HOME'] \
+ f'/Documents/misc/arch/{image_name}-docker-image-package-exclusions.txt'
with open(filename_exclusions) as f:
content = f.read()
exclusions = content.split('\n')[:-1]
del content
# print(f'{exclusions=}')
def item_included(item, exclusions):
for exclusion in exclusions:
if item.startswith(exclusion):
return False
return True
tmp_split = []
# print(f'{arch_pkgs=}')
for item in arch_pkgs:
if item_included(item, exclusions) and item:
tmp_split.append(item)
# print(f'DEBUG included: {item}')
# else:
# print(f'DEBUG excluded: {item}')
# print(f'{tmp_split=}')
arch_pkgs = tmp_split
del tmp_split
image = 'docker.io/archlinux:base'
yay_install="""
# ----------
# yay cannot be run as root!
#
# taken from: https://github.com/justin8/docker-makepkg/blob/master/Dockerfile
#
ADD sudoers /etc/sudoers
RUN useradd -m -d /build build-user || true
WORKDIR /build
RUN sudo -u build-user git clone https://aur.archlinux.org/yay.git && cd yay && sudo -u build-user makepkg -sri --noconfirm && cd - && rm -rf yay
# ----------
#
"""
if image_is_cached:
yay_install = ''
image = f'localhost/{image_name}'
with open('Dockerfile', 'w') as f:
f.write(template.render(image=image,
arch_pkgs=' '.join(arch_pkgs),
yay_install=yay_install ))
| StarcoderdataPython |
1977303 | <reponame>Tom-Li1/games
import random, time, sys
#==============导入适用模块,进入函数区域===============
def drawBoard(board):
print(board[7] + '|' + board[8] + '|' + board[9])
print('-+-+-')
print(board[4] + '|' + board[5] + '|' + board[6])
print('-+-+-')
print(board[1] + '|' + board[2] + '|' + board[3])
def inputPlayerLetter():
letter = ''
while letter != 'O' or letter != 'X':
print('Do you want to be X or O?')
letter = input('>>>').upper()
if letter == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
if random.randint(0, 1) == 0:
return 'player'
else:
return 'computer'
def makeMove(board, letter, move):
board[move] = letter
def isWinner(bo, le):
return ((bo[7] == le and bo[8] == le and bo[9] == le) or
(bo[4] == le and bo[5] == le and bo[6] == le) or
(bo[1] == le and bo[2] == le and bo[3] == le) or
(bo[7] == le and bo[4] == le and bo[1] == le) or
(bo[5] == le and bo[8] == le and bo[2] == le) or
(bo[3] == le and bo[6] == le and bo[9] == le) or
(bo[7] == le and bo[5] == le and bo[3] == le) or
(bo[1] == le and bo[5] == le and bo[9] == le))
def getBoardCopy(board):
boardCopy = []
for i in board:
boardCopy.append(i)
return boardCopy
def isSpaceFree(board, move):
return board[move] == ' '
def getPlayerMove(board):
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move)):
print("What is your next move? (1-9)")
move = input('>>>')
return int(move)
def chooseRandomMoveFormList(board, moveList):
possibleMoves = []
for i in moveList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def getComputerMove(board, computerLetter):
if computerLetter == 'X':
playerLetter = 'O'
else:
playerLetter = 'X'
for i in range (1, 10):
boardCopy = getBoardCopy(board)
if isSpaceFree(boardCopy, i):
makeMove(boardCopy, computerLetter, i)
if isWinner(boardCopy, computerLetter):
return i
for i in range (1, 10):
boardCopy = getBoardCopy(board)
if isSpaceFree(boardCopy, i):
makeMove(boardCopy, playerLetter, i)
if isWinner(boardCopy, playerLetter):
return i
move = chooseRandomMoveFormList(board, [1, 3, 7, 9])
if move != None:
return move
if isSpaceFree(board, 5):
return 5
return chooseRandomMoveFormList(board, [2, 4, 6, 8])
def isBoardFull(board):
for i in range(1, 10):
if isSpaceFree(board, i):
return False
return True
#==================函数区域结束,进入游戏主循环======================
print('''
_______ _____ _____ _______ _____ _______ ____ ______
|__ __|_ _/ ____|__ __|/\ / ____|__ __/ __ \| ____|
| | | || | | | / \ | | | | | | | | |__
| | | || | | | / /\ \| | | | | | | | __|
| | _| || |____ | |/ ____ \ |____ | | | |__| | |____
|_| |_____\_____| |_/_/ \_\_____| |_| \____/|______|
''')
string_1 = "Have you ever played a simple game that called the name showed above?\nNow you can play with a computer.\n\
Choose your letter and try to put them in the game board look like '#'.\nWhen three same letter in a same line, the letter's user will win.\n\
The side that go first will be chose randomly.\n\n====================GOOD LUCK HAVE FUN===================="
for i in range(len(string_1)):
print(string_1[i], end = '')
time.sleep(0.01)
sys.stdout.flush()
print()
while True:
theBoard = [' '] * 10
playerLetter, computerLetter = inputPlayerLetter()
turn = whoGoesFirst()
print('The ' + turn + ' will go first.')
gameIsPlaying = True
while gameIsPlaying:
if turn == 'player':
drawBoard(theBoard)
move = getPlayerMove(theBoard)
makeMove(theBoard, playerLetter, move)
if isWinner(theBoard, playerLetter):
drawBoard(theBoard)
print("Hooray! U have won the game!")
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print("This game is a tie.")
break
else:
turn = 'computer'
else:
move = getComputerMove(theBoard, computerLetter)
makeMove(theBoard, computerLetter, move)
if isWinner(theBoard, computerLetter):
drawBoard(theBoard)
print('Computer has won the game! You are sillier than a computer.')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print("This game is a tie.")
break
else:
turn = 'player'
print('Would u want to play again?(Yes / No)')
if not input('>>>').lower().startswith('y'):
break
| StarcoderdataPython |
3271484 | #Author: <NAME> <<EMAIL>>
#Based on the utorrent maraschino module
from flask import render_template
from datetime import timedelta
from maraschino import app, logger
from maraschino.tools import *
from rtorrent import RTorrent
def log_error(ex):
logger.log('RTORRENTDL :: EXCEPTION - %s' % ex, 'DEBUG')
@app.route('/xhr/rtorrentdl/')
@requires_auth
def xhr_rtorrentdl():
# url qualification
def url_qualify(url_proto,url_host,url_port):
url_host_part=str(url_host).partition('/')
# validate host (kinda... just make sure it's not empty)
if url_host_part[0]:
# validate port
if not url_port:
# for http and https we can assume default service ports
if url_proto == 'http':
url_port = 80
elif url_proto == 'https':
url_port = 443
else:
raise Exception('port must be defined for protocol %s' % (url_proto))
else:
try:
url_port=int(url_port)
except ValueError:
raise Exception('port must be a numeric value')
url_qualified='%s://%s:%i%s%s' % (
url_proto,
url_host_part[0],
url_port,
url_host_part[1],
url_host_part[2]
)
return url_qualified
else:
raise Exception('invalid host: %s' % (url_host[0]))
# initialize empty list, which will be later populated with listing
# of active torrents
torrentlist = list()
# connection flag
connected = False
# global rates
down_rate = 0.0
up_rate = 0.0
rtorrent_url = None
rtorrent_user = None
rtorrent_password = <PASSWORD>
try:
if get_setting_value('rtorrent_host') is not None:
rtorrent_url = url_qualify(
get_setting_value('rtorrent_proto'),
get_setting_value('rtorrent_host'),
get_setting_value('rtorrent_port')
)
except Exception as ex:
log_error(ex)
try:
if rtorrent_url:
# user/password login is not implemented for scgi
if get_setting_value('rtorrent_proto') != 'scgi':
rtorrent_user = get_setting_value('rtorrent_user')
rtorrent_password = get_setting_value('rtorrent_password')
client = RTorrent(rtorrent_url,rtorrent_user,rtorrent_password,True)
if client is not None:
connected = True
down_rate = client.get_down_rate()
up_rate = client.get_up_rate()
# loop through each job and add all torrents to torrentlist()
for torrent in client.get_torrents():
# friendly status and time left
time_left = -1
if torrent.complete:
if torrent.active:
status = 'seeding'
else:
status = 'done'
else:
if torrent.active:
if torrent.down_rate > 0:
time_left = str(timedelta(seconds = round(float(torrent.left_bytes) / torrent.down_rate)))
status = 'leeching'
else:
status = 'waiting'
else:
status = 'inactive'
# get torrent file list
# FIXME takes too much time and is not used anyway for now
#torrent_filelist = []
#for file_current in torrent.get_files():
# torrent_filelist.append(os.path.join(torrent.directory,file_current.path))
# what's left?
progress = float(100.0 / torrent.size_bytes * (torrent.size_bytes-torrent.left_bytes))
# append to list
torrentlist.append({
'name': torrent.name,
'info_hash': torrent.info_hash,
'status': status,
'state': torrent.state,
'progress': progress,
'time_left': time_left,
'down_rate': torrent.down_rate,
'up_rate': torrent.up_rate,
'ratio': torrent.ratio
# 'folder': torrent.directory,
# 'files': '|'.join(torrent_filelist)
})
# no torrents -> empty list
if not torrentlist.__len__():
torrentlist = None
except Exception as ex:
log_error(ex)
torrentlist = None
return render_template('rtorrentdl.html',
connected = connected,
torrentlist_scroll = get_setting_value('rtorrent_list_scroll'),
torrentlist = torrentlist,
down_rate = down_rate,
up_rate = up_rate
)
| StarcoderdataPython |
5024102 | <reponame>exenGT/pymatgen
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
This module implements reading and writing of ShengBTE CONTROL files.
"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from monty.dev import requires
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.io.vasp import Kpoints
try:
import f90nml
except ImportError:
f90nml = None
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__email__ = "<EMAIL>, <EMAIL>"
__date__ = "June 27, 2019"
class Control(MSONable, dict):
"""
Class for reading, updating, and writing ShengBTE CONTROL files.
See https://bitbucket.org/sousaw/shengbte/src/master/ for more
detailed description and default values of CONTROL arguments.
"""
required_params = [
"nelements",
"natoms",
"ngrid",
"lattvec",
"types",
"elements",
"positions",
"scell",
]
allocations_keys = ["nelements", "natoms", "ngrid", "norientations"]
crystal_keys = [
"lfactor",
"lattvec",
"types",
"elements",
"positions",
"masses",
"gfactors",
"epsilon",
"born",
"scell",
"orientations",
]
params_keys = [
"t",
"t_min",
"t_max",
"t_step",
"omega_max",
"scalebroad",
"rmin",
"rmax",
"dr",
"maxiter",
"nticks",
"eps",
]
flags_keys = [
"nonanalytic",
"convergence",
"isotopes",
"autoisotopes",
"nanowires",
"onlyharmonic",
"espresso",
]
def __init__(self, ngrid: Optional[List[int]] = None, temperature: Union[float, Dict[str, float]] = 300, **kwargs):
"""
Args:
ngrid: Reciprocal space grid density as a list of 3 ints.
temperature: The temperature to calculate the lattice thermal
conductivity for. Can be given as a single float, or a dictionary
with the keys "min", "max", "step".
**kwargs: Other ShengBTE parameters. Several parameters are required
for ShengBTE to run - we have listed these parameters below:
- nelements (int): number of different elements in the compound
- natoms (int): number of atoms in the unit cell
- lattvec (size 3x3 array): real-space lattice vectors, in units
of lfactor
- lfactor (float): unit of measurement for lattice vectors (nm).
I.e., set to 0.1 if lattvec given in Angstrom.
- types (size natom list): a vector of natom integers, ranging
from 1 to nelements, assigning an element to each atom in the
system
- elements (size natom list): a vector of element names
- positions (size natomx3 array): atomic positions in lattice
coordinates
- scell (size 3 list): supercell sizes along each crystal axis
used for the 2nd-order force constant calculation
"""
super().__init__()
if ngrid is None:
ngrid = [25, 25, 25]
self["ngrid"] = ngrid
if isinstance(temperature, (int, float)):
self["t"] = temperature
elif isinstance(temperature, dict):
self["t_min"] = temperature["min"]
self["t_max"] = temperature["max"]
self["t_step"] = temperature["step"]
else:
raise ValueError("Unsupported temperature type, must be float or dict")
self.update(kwargs)
@classmethod
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. Please get it at https://pypi.org/project/f90nml.",
)
def from_file(cls, filepath: str):
"""
Read a CONTROL namelist file and output a 'Control' object
Args:
filepath: Path of the CONTROL file.
Returns:
'Control' object with parameters instantiated.
"""
nml = f90nml.read(filepath)
sdict = nml.todict()
all_dict: Dict[str, Any] = {}
all_dict.update(sdict["allocations"])
all_dict.update(sdict["crystal"])
all_dict.update(sdict["parameters"])
all_dict.update(sdict["flags"])
all_dict.pop("_start_index") # remove unnecessary cruft
return cls.from_dict(all_dict)
@classmethod
def from_dict(cls, control_dict: Dict):
"""
Write a CONTROL file from a Python dictionary. Description and default
parameters can be found at
https://bitbucket.org/sousaw/shengbte/src/master/.
Note some parameters are mandatory. Optional parameters default here to
None and will not be written to file.
Args:
control_dict: A Python dictionary of ShengBTE input parameters.
"""
return cls(**control_dict)
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. Please get it at https://pypi.org/project/f90nml.",
)
def to_file(self, filename: str = "CONTROL"):
"""
Writes ShengBTE CONTROL file from 'Control' object
Args:
filename: A file name.
"""
for param in self.required_params:
if param not in self.as_dict():
warnings.warn(f"Required parameter '{param}' not specified!")
alloc_dict = _get_subdict(self, self.allocations_keys)
alloc_nml = f90nml.Namelist({"allocations": alloc_dict})
control_str = str(alloc_nml) + "\n"
crystal_dict = _get_subdict(self, self.crystal_keys)
crystal_nml = f90nml.Namelist({"crystal": crystal_dict})
control_str += str(crystal_nml) + "\n"
params_dict = _get_subdict(self, self.params_keys)
params_nml = f90nml.Namelist({"parameters": params_dict})
control_str += str(params_nml) + "\n"
flags_dict = _get_subdict(self, self.flags_keys)
flags_nml = f90nml.Namelist({"flags": flags_dict})
control_str += str(flags_nml) + "\n"
with open(filename, "w") as file:
file.write(control_str)
@classmethod
def from_structure(cls, structure: Structure, reciprocal_density: Optional[int] = 50000, **kwargs):
"""
Get a ShengBTE control object from a structure.
Args:
structure: A structure object.
reciprocal_density: If not None, the q-point grid ("ngrid") will be
set using this density.
kwargs: Additional options to be passed to the Control constructor.
See the docstring of the __init__ method for more details
Returns:
A ShengBTE control object.
"""
elements = list(map(str, structure.composition.elements))
unique_nums = np.unique(structure.atomic_numbers)
types_dict = dict(zip(unique_nums, range(len(unique_nums))))
types = [types_dict[i] + 1 for i in structure.atomic_numbers]
control_dict = {
"nelements": structure.ntypesp,
"natoms": structure.num_sites,
"norientations": 0,
"lfactor": 0.1,
"lattvec": structure.lattice.matrix.tolist(),
"elements": elements,
"types": types,
"positions": structure.frac_coords.tolist(),
}
if reciprocal_density:
kpoints = Kpoints.automatic_density(structure, reciprocal_density)
control_dict["ngrid"] = kpoints.kpts[0]
control_dict.update(**kwargs)
return Control(**control_dict)
def get_structure(self) -> Structure:
"""
Get a pymatgen Structure from a ShengBTE control object.
The control object must have the "lattvec", "types", "elements", and
"positions" settings otherwise an error will be thrown.
Returns:
The structure.
"""
required = ["lattvec", "types", "elements", "positions"]
if not all(r in self for r in required):
raise ValueError("All of ['lattvec', 'types', 'elements', 'positions'] must be in control object")
unique_elements = self["elements"]
n_unique_elements = len(unique_elements)
element_map = dict(zip(range(1, n_unique_elements + 1), unique_elements))
species = [element_map[i] for i in self["types"]]
cell = np.array(self["lattvec"])
if "lfactor" in self:
cell *= self["lfactor"] * 10 # to nm then to Angstrom
return Structure(cell, species, self["positions"])
def as_dict(self):
"""
Returns: MSONAble dict
"""
return dict(self)
def _get_subdict(master_dict, subkeys):
"""Helper method to get a set of keys from a larger dictionary"""
return {k: master_dict[k] for k in subkeys if k in master_dict and master_dict[k] is not None}
| StarcoderdataPython |
8103840 | import json
import logging
from os import path
from pathlib import Path
import time
from reconstruction import reconstruct
import fnmatch
from pprint import pprint
from extension import reconstruct_slices, addition
from futils import timeit
from tqdm import tqdm
from matching import Library, Sequence, match_library
# Logging configuration
current_file = path.basename(__file__).split(".")[0]
LYCOPENE = True
@timeit
def match_libs(seq, libs, threshold=0.5):
"""
Match libs with the sequence
"""
result = []
for lib in tqdm(libs):
pop = {}
pop["library"] = lib["name"]
# See algo1_lycopene for the parameter below in the template
# threshold = lib["score_threshold"]
candidates = match_library(seq, Library(lib), threshold, direction53=True)
cl = []
for candidate in candidates:
for c in candidate:
cl.append(
{
"name": c.name,
"score": c.score,
"start": c.start,
"length": c.length,
"end": c.end,
}
)
pop["candidates"] = cl
result.append(pop)
return result
def get_sequences(dir_dict):
"""
Return list of sequences
"""
SEQUENCES_EXTENSION = dir_dict["extension"]
SEQUENCES_PATH = dir_dict["sequences_path"]
seq_dir_names = dir_dict["seq_dir_names"]
sequences = []
for seq_dir in seq_dir_names:
seqs = Path(path.join(SEQUENCES_PATH, seq_dir)).rglob(
"*{0}".format(SEQUENCES_EXTENSION)
)
sequences.append(seqs)
return sequences
def get_slices_libs(template):
"""
Get slices libraries
Args:
template (dict): Template JSON data as a dict structure
Returns:
dict of slices libraries
"""
slices_libs = {}
for sli in template["template_slices"]:
libs = []
for pos in sli["template_slice"]:
lib = template["template"]["structure"][pos - 1]["library_source"]
libs.append(template["component_sources"][lib])
slices_libs[sli["name"]] = libs
return slices_libs
@timeit
def iter_all_seq(
input_sequences,
template_json_file,
match_output_filename,
reconstruction_output_filename,
extension_output_filename,
threshold=0.99,
):
"""
Iterate over sequences
Args:
input_sequences (dict): Input dictionary with info about the input sequences:
output_filename (str): Output filename
Example:
input_sequences = {
'extension' = ".seq"
'sequences_path' = "/data/Imperial/src/lyc-basic-ass-ind/"
'seq_dir_names' = ["output"]
}
"""
# Get sequences to match
sequences = get_sequences(input_sequences)
# Get the filenames in a list and not this freakin generator
seq_filenames = []
for seq in sequences:
for filename in seq:
seq_filenames.append(filename)
# Get sequences TU in order of template
with open(template_json_file) as json_file:
template = json.load(json_file)
# Get order of target sequences from the template slices order
if LYCOPENE:
slice_primer = {
"revE": "CrtE",
"revI": "CrtI",
"revB": "CrtB",
}
else:
slice_primer = {
'TU-1': "tu1",
'TU-2': "tu2",
'TU-3': "tu3",
'TU-4': "tu4",
'TU-5': "tu5",
}
slices = template['template_slices']
order_of_targets_paths = []
order_of_targets_names = []
for slice in slices:
name = slice['name']
primer = slice_primer[name]
for f in seq_filenames:
pattern = "*"+primer+"*"
print(f, pattern)
print("Name: ",f.name.split('.')[0])
if f.match(pattern):
order_of_targets_names.append(f.name.split('.')[0])
order_of_targets_paths.append(f)
print(order_of_targets_paths)
print(order_of_targets_paths)
# Loop over the sequences
r = []
for filename in order_of_targets_paths:
sq = Sequence(filename)
json_to_output = {}
json_to_output["target"] = sq.name
# Logging
logging.info(f"Target sequence: {sq.name}")
# Get libs from template
libs = get_slices_libs(template)
# Primer
print(sq.name)
#TODO name shit
if LYCOPENE:
primer = sq.name.split("_")[-2] # "BASIC_construct_UTR1-RBS-A12-UTR2-RBS-A12-UTR3-RBS-A12_CrtI_01"
print("PRIMER:", primer)
if primer == "CrtE":
libs_to_match = libs['revE']
elif primer == "CrtB":
libs_to_match = libs['revB']
elif primer == "CrtI":
libs_to_match = libs['revI']
else:
raise OSError("Primer not found",sq.name)
else:
primer = sq.name.split("_")[2] # "vio_000_tu5"
if primer == "tu1":
libs_to_match = libs['TU-1']
elif primer == "tu2":
libs_to_match = libs['TU-2']
elif primer == "tu3":
libs_to_match = libs['TU-3']
elif primer == "tu4":
libs_to_match = libs['TU-4']
elif primer == "tu5":
libs_to_match = libs['TU-5']
else:
raise OSError("Primer not found",sq.name)
# Match sequence
matches = match_libs(sq, libs_to_match, threshold=threshold)
json_to_output["matches"] = matches
r.append(json_to_output)
# Write output result in JSON file
with open(match_output_filename, "w") as filename:
json.dump(r, filename, indent=2, separators=(",", ":"))
# Reconstruction
reconstruction_result = reconstruct(r)
# Write reconstruction result in JSON file
with open(reconstruction_output_filename, "w") as filename:
json.dump(reconstruction_result, filename, indent=2, separators=(",", ":"))
# Extension and Addition to reconstruct the full pathway
print(reconstruction_result)
ex = reconstruct_slices(reconstruction_result, template, order_of_targets_names)
print(ex)
ex_res = {
'target_slices': order_of_targets_names,
'full_reconstruction': ex
}
with open(extension_output_filename , "w") as filename:
json.dump(ex_res, filename, indent=2, separators=(",", ":"))
def run_test(test_params):
"""
Run tests
"""
targets = test_params["targets"]
candidates = test_params["candidates"]
nbloop = test_params["nbloop"]
test_id = test_params["id"]
test_name = test_params["name"]
threshold = test_params["threshold"]
OUTPUT_DIR = "output_results/"
# Logging configuration
timestr = time.strftime("%Y%m%d-%H%M%S")
logging.basicConfig(
format="%(asctime)s:%(levelname)s: %(message)s",
filename=f"logs/{current_file}-{test_id}-{timestr}.log",
encoding="utf-8",
level=logging.DEBUG,
)
msg = f"{test_name} - Targets:{targets['seq_dir_names']} - Template:{path.basename(candidates)} - Nb runs: {nbloop}"
logging.info(msg)
# Iterate and match libs over the input sequences above
for i in range(nbloop):
msg = f"Test run: ({i+1}/{nbloop})"
logging.info(msg)
# Match results filename
match_output_filename = f"{timestr}-matching-results-{current_file}-{test_id}-run-{i+1}-from-{nbloop}.json"
match_output_filename = path.join(OUTPUT_DIR, match_output_filename)
# Reconstruction results filename
reconstruction_output_filename = f"{timestr}-reconstruction-{current_file}-{test_id}-run-{i+1}-from-{nbloop}.json"
reconstruction_output_filename = path.join(
OUTPUT_DIR, reconstruction_output_filename
)
# Final results filename
extension_output_filename = f"{timestr}-extension-{current_file}-{test_id}-run-{i+1}-from-{nbloop}.json"
extension_output_filename = path.join(
OUTPUT_DIR, extension_output_filename
)
# Iterate and match libs
iter_all_seq(
targets,
candidates,
match_output_filename,
reconstruction_output_filename,
extension_output_filename,
threshold,
)
# /////////////////////////////////////////////////////////////////////////////////////////////
# /////////////////////////////////////////////////////////////////////////////////////////
def test_algo1_1_target_hard_th99():
"""
Test Algo1
1 Target Violacein (B0030, B0030, B0030, B0030, B0030) (hard)
Candidate Template
Threshold 0.99
"""
test_params = {
"name": "Algo1 - 1 Target (5*RBS B0030) - 1 against All - Threshold 0.99 ",
"id": "vio-1-target-B0030-B0030-B0030-B0030-B0030-th99",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_one_hard/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02.json",
"nbloop": 10,
"threshold": 0.99,
}
run_test(test_params)
def test_algo1_1_target_easy_th75():
"""
Test Algo1
1 Target Violacein (B0030, B0031, B0032, B0033, B0064) (easy)
Candidate Template
Threshold 0.75
"""
test_params = {
"name": "Algo1 - 1 Target (RBS30,31,32,33,64) - 1 against all - Threshold 0.75 ",
"id": "vio-1-target-B0030-B0031-B0032-B0033-B0064-th75",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_one_easy/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02.json",
"nbloop": 10,
"threshold": 0.75,
}
run_test(test_params)
def test_algo1_1_target_easy_th99():
"""
Test Algo1
1 Target RBS 30 31 32 33 64 (easy)
Candidate Template
Threshold 0.99
"""
test_params = {
"name": "Algo1 - 1 Target (RBS30,31,32,33,64) - 1 against all - Threshold 0.99 ",
"id": "vio-1-target-B0030-B0031-B0032-B0033-B0064-th99",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_one_easy/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02.json",
"nbloop": 10,
"threshold": 0.99,
}
run_test(test_params)
# /////////////////////////////////////////////////////////////////////////////////////////
def test_algo1_1_target_1to1_hard_th75():
"""
Test Algo1
1 Target Violacein (B0030, B0030, B0030, B0030, B0030) (hard)
Candidate Template
Threshold 0.75
"""
test_params = {
"name": "Algo1 - 1 Target (5*RBS B0030) - 1 against 1 - Threshold 0.75",
"id": "vio-1-target-B0030-B0030-B0030-B0030-B0030-1to1-th75",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_one_hard/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02_one_hard.json",
"nbloop": 10,
"threshold": 0.75,
}
run_test(test_params)
def test_algo1_1_target_1to1_hard_th99():
"""
Test Algo1
1 Target Violacein (B0030, B0030, B0030, B0030, B0030) (hard)
Candidate Template
Threshold 0.99
"""
test_params = {
"name": "Algo1 - 1 Target (5*RBS B0030) - 1 against 1 - Threshold 0.99",
"id": "vio-1-target-B0030-B0030-B0030-B0030-B0030-1to1-th99",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_one_hard/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02_one_hard.json",
"nbloop": 10,
"threshold": 0.99,
}
run_test(test_params)
def test_algo1_1_target_1to1_easy_th75():
"""
Test Algo1
1 Target Violacein (B0030, B0031, B0032, B0033, B0064) (easy)
Candidate Template
Threshold 0.75
"""
test_params = {
"name": "Algo1 - 1 Target (RBS30,31,32,33,64) - 1 against 1 - Threshold 0.75",
"id": "vio-1-target-B0030-B0031-B0032-B0033-B0064-1to1-th75",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_one_easy/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02_one_easy.json",
"nbloop": 10,
"threshold": 0.75,
}
run_test(test_params)
def test_algo1_1_target_1to1_easy_th99():
"""
Test Algo1
1 Target RBS 30 31 32 33 64 (easy)
Candidate Template
Threshold 0.99
"""
test_params = {
"name": "Algo1 - 1 Target (RBS30,31,32,33,64) - 1 against 1 - Threshold 0.99",
"id": "vio-1-target-B0030-B0031-B0032-B0033-B0064-1to1-th99",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_one_easy/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02_one_easy.json",
"nbloop": 10,
"threshold": 0.99,
}
run_test(test_params)
# /////////////////////////////////////////////////////////////////////////////////////////
def test_algo1_2_targets_th99():
"""
Test Algo1
2 Targets
Candidate Template
Threshold 0.75
Candidate Template
"""
test_params = {
"name": "Algo1 - 2 Targets - Candidate Template",
"id": "2-targets-template-th99",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_two/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02.json",
"nbloop": 10,
"threshold": 0.99,
}
run_test(test_params)
def test_algo1_2_targets_th75():
"""
Test Algo1
2 Targets
Candidate Template
Threshold 0.75
"""
test_params = {
"name": "Algo1 - 2 Targets - Candidate Template",
"id": "2-targets-template-th75",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_two/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02.json",
"nbloop": 10,
"threshold": 0.75,
}
run_test(test_params)
def test_algo1_10_targets():
"""
Test Algo1
10 Target
Candidate Template
"""
test_params = {
"name": "Algo1 - 10 Targets - Candidate Template",
"id": "target-10-template",
"targets": {
"extension": ".seq",
"sequences_path": "/data/Imperial/src/violacein-basic-ass",
"seq_dir_names": ["output/rbs_ten/"],
},
"candidates": "/data/Imperial/src/matching/templates/template_violacein_02.json",
"nbloop": 1,
}
run_test(test_params)
def main():
"""
Main
"""
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
3526996 | <reponame>ATrain951/01.python-com_Qproject
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'3',
'5',
'2 6 2 1 7',
'4',
'15 2 1 3',
'5',
'2 4 12 4 7',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'4 1\n' +
'Motu\n' +
'1 3\n' +
'Patlu\n' +
'3 2\n' +
'Motu\n')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9765172 | #########################################################
#Copyright (c) 2020-present, drliang219
#All rights reserved.
#
#This source code is licensed under the BSD-style license found in the
#LICENSE file in the root directory of this source tree.
##########################################################
#########################################################
#:Date: 2017/12/13
#:Version: 1
#:Authors:
# - <NAME> <<EMAIL>>
# - LSC <<EMAIL>>
#:Python_Version: 2.7
#:Platform: Unix
#:Description:
# This is a class which detects whether computing nodes happens error or not.
##########################################################
import time
import threading
import logging
import ConfigParser
import os
import FailureType
import InstanceState
import LayerConfig
from FaultDetectionStrategy import FaultDetectionStrategy
from RESTClient import RESTClient
from NovaClient import NovaClient
class DetectionThread(threading.Thread):
def __init__(self, cluster_name, node, polling_interval, protected_layers_string, instance_update_queue):
threading.Thread.__init__(self)
self.node = node
self.__node_name = self.node.get_name()
# init config parser
config = ConfigParser.RawConfigParser()
config.read('/etc/hass.conf')
# init a logger for this thread
self.__logger = logging.getLogger('{}'.format(self.__node_name))
self.__logger.setLevel(config.get("log", "level"))
# -- create file handler which logs even debug messages
fh = logging.FileHandler('{}{}.log'.format(config.get("log", "folder_path"), self.__node_name)
)
fh.setLevel(logging.DEBUG)
# -- create formatter and add it to the handlers
formatter = logging.Formatter("%(asctime)s [%(levelname)s] : %(message)s")
fh.setFormatter(formatter)
# -- add the handlers to the logger
self.__logger.addHandler(fh)
self.__logger.info("-- finish init {} logger --".format(self.__node_name))
self.cluster_name = node.cluster_name
self.ipmi_status = node.ipmi_status
self.polling_interval = polling_interval
self.cluster_protected_layers_string = protected_layers_string
self.loop_exit = False
self.fault_detection_strategy = FaultDetectionStrategy(node, self.cluster_protected_layers_string, instance_update_queue)
self.server = RESTClient.get_instance()
self.__nova_client = NovaClient.get_instance()
def run(self):
counter = 1
while not self.loop_exit:
try:
state = None
if self.node.get_status() == FailureType.HEALTH:
# step 1: check whether fault occurs, and, step 2: check whether the fault is permanent fault
state = self.fault_detection_strategy.detect()
# for merged layer function: the system will merge layer when some layer detectors are disabled
counter += 1
if counter >= 5:
self.fault_detection_strategy.check_protected_layers_detector(self.cluster_protected_layers_string)
counter = 1
else:
while True:
state = self.fault_detection_strategy.detect_host_level_highest_layer()
if state == FailureType.HEALTH:
self.node.set_status(state)
break
except Exception as e:
self.__logger.error("DT: run exception: "+str(e))
continue
# when a fault occurs and is a permanent fault
if isinstance(state, tuple) and state[0] in FailureType.FAIL_LEVEL:
# step 3: recover the permanent fault
self.__logger.warning("node({}): DetectionThread (run) - detection result (fault type, instance name): {}".format(self.__node_name, str(state)))
try:
recovery_list = self._get_recovery_list(state[0])
for fail_type in recovery_list:
recovery_result = self._recover(fail_type, state[1])
if recovery_result:
host_level_fault_list = FailureType.FAIL_LEVEL[LayerConfig.HOST_LEVEL_RANGE[0]: LayerConfig.HOST_LEVEL_RANGE[1]+1]
if not set(recovery_list).isdisjoint(host_level_fault_list):
self.fault_detection_strategy.setup_libvirt_detector(self.cluster_protected_layers_string)
break
except Exception as e:
self.__logger.error("node({}): DetectionThread, run - Exception : {}".format(self.__node_name, str(e)))
self.stop()
self.server.update_db()
# check instance state to update instance information in the cluster, such as VM deletection and VM migration
self.__update_instance_information_in_cluster()
time.sleep(self.polling_interval)
def stop(self):
self.loop_exit = True
def _recover(self, fault_type, failed_component):
result = self.server.recover(fault_type, self.cluster_name, failed_component)
if result: # recover success
# if fault is at node level
if fault_type in FailureType.FAIL_LEVEL[LayerConfig.HOST_LEVEL_RANGE[0]:LayerConfig.HOST_LEVEL_RANGE[1]+1]:
self.node.set_status(FailureType.HEALTH)
# if fault is at VM level
elif fault_type in FailureType.FAIL_LEVEL[LayerConfig.INSTANCE_LEVEL_RANGE[0]:LayerConfig.INSTANCE_LEVEL_RANGE[1]+1]:
self.fault_detection_strategy.set_instance_to_default(failed_component)
else: # recover fail
self.__logger.error("recover fail , change node status")
# if fault is at node level
if fault_type in FailureType.FAIL_LEVEL[LayerConfig.HOST_LEVEL_RANGE[0]:LayerConfig.HOST_LEVEL_RANGE[1]+1]:
self.node.set_status(fault_type+" and recover fail")
# if fault is at VM level
elif fault_type in FailureType.FAIL_LEVEL[LayerConfig.INSTANCE_LEVEL_RANGE[0]:LayerConfig.INSTANCE_LEVEL_RANGE[1]+1]:
self.fault_detection_strategy.set_instance_state(failed_component, fault_type)
return result
def _get_recovery_list(self, state):
fail_level_list = FailureType.FAIL_LEVEL
temp_layer = self.fault_detection_strategy.get_active_layers()
sub_temp_layer = temp_layer[:fail_level_list.index(state)]
rev_sub_temp_layer = sub_temp_layer[::-1]
index = rev_sub_temp_layer.find("1")
if index < 0:
return fail_level_list[:fail_level_list.index(state)+1]
else:
return fail_level_list[len(sub_temp_layer)-index:fail_level_list.index(state)+1]
def __update_instance_host_on_controller(self, instance_id):
res = self.server.update_instance_host(self.cluster_name, instance_id)
if "succeed" in str(res):
self.__logger.info("DetectionThread - update instance host success")
else:
self.__logger.warning("DetectionThread - update instance host response: {}".format(res))
def __delete_ha_instance_on_controller(self, instance_id):
res = self.server.delete_instance(self.cluster_name, instance_id)
self.__logger.info("DetectionThread - delete HA Instance response: {}".format(res))
def __update_instance_information_in_cluster(self):
instance_name_list = self.fault_detection_strategy.get_instance_name_list()
for instance_name in instance_name_list:
instance_state = self.fault_detection_strategy.get_instance_state(instance_name)
instance_id = self.fault_detection_strategy.get_instance_id(instance_name)
if instance_state in InstanceState.VM_DESTROYED:
try:
self.__nova_client._get_vm(instance_id)
continue
except Exception as e:
self.__logger.info("DetectionThread, __update_instance_information_in_cluster - Remove instance from cluster")
self.fault_detection_strategy.set_instance_state(instance_name, InstanceState.VM_DELETED)
self.__delete_ha_instance_on_controller(instance_id)
elif instance_state in InstanceState.VM_MIGRATING:
self.__logger.info("DetectionThread, __update_instance_information_in_cluster - {} is migrating ...".format(instance_name))
self.__update_instance_host_on_controller(instance_id)
elif instance_state in InstanceState.VM_MIGRATED:
self.__logger.info("DetectionThread, __update_instance_information_in_cluster - {} is migrated ".format(instance_name))
self.__update_instance_host_on_controller(instance_id)
if __name__ == "__main__":
pass
| StarcoderdataPython |
4902869 | import numpy as np
import pandas as pd
from interface import implements
from six import viewvalues
from toolz import groupby, merge
from .base import PipelineLoader
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.frame import DataFrameLoader
from zipline.pipeline.loaders.utils import (
next_event_indexer,
previous_event_indexer,
)
def required_event_fields(next_value_columns, previous_value_columns):
"""
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``.
"""
# These metadata columns are used to align event indexers.
return {
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
}.union(
# We also expect any of the field names that our loadable columns
# are mapped to.
viewvalues(next_value_columns),
viewvalues(previous_value_columns),
)
def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
"""
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
)
class EventsLoader(implements(PipelineLoader)):
"""
Base class for PipelineLoaders that supports loading the next and previous
value of an event field.
Does not currently support adjustments.
Parameters
----------
events : pd.DataFrame
A DataFrame representing events (e.g. share buybacks or
earnings announcements) associated with particular companies.
``events`` must contain at least three columns::
sid : int64
The asset id associated with each event.
event_date : datetime64[ns]
The date on which the event occurred.
timestamp : datetime64[ns]
The date on which we learned about the event.
next_value_columns : dict[BoundColumn -> str]
Map from dataset columns to raw field names that should be used when
searching for a next event value.
previous_value_columns : dict[BoundColumn -> str]
Map from dataset columns to raw field names that should be used when
searching for a previous event value.
"""
def __init__(self,
events,
next_value_columns,
previous_value_columns):
validate_column_specs(
events,
next_value_columns,
previous_value_columns,
)
events = events[events[EVENT_DATE_FIELD_NAME].notnull()]
# We always work with entries from ``events`` directly as numpy arrays,
# so we coerce from a frame to a dict of arrays here.
self.events = {
name: np.asarray(series)
for name, series in (
events.sort_values(EVENT_DATE_FIELD_NAME).iteritems()
)
}
# Columns to load with self.load_next_events.
self.next_value_columns = next_value_columns
# Columns to load with self.load_previous_events.
self.previous_value_columns = previous_value_columns
def split_next_and_previous_event_columns(self, requested_columns):
"""
Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event
"""
def next_or_previous(c):
if c in self.next_value_columns:
return 'next'
elif c in self.previous_value_columns:
return 'previous'
raise ValueError(
"{c} not found in next_value_columns "
"or previous_value_columns".format(c=c)
)
groups = groupby(next_or_previous, requested_columns)
return groups.get('next', ()), groups.get('previous', ())
def next_event_indexer(self, dates, sids):
return next_event_indexer(
dates,
sids,
self.events[EVENT_DATE_FIELD_NAME],
self.events[TS_FIELD_NAME],
self.events[SID_FIELD_NAME],
)
def previous_event_indexer(self, dates, sids):
return previous_event_indexer(
dates,
sids,
self.events[EVENT_DATE_FIELD_NAME],
self.events[TS_FIELD_NAME],
self.events[SID_FIELD_NAME],
)
def load_next_events(self, domain, columns, dates, sids, mask):
if not columns:
return {}
return self._load_events(
name_map=self.next_value_columns,
indexer=self.next_event_indexer(dates, sids),
domain=domain,
columns=columns,
dates=dates,
sids=sids,
mask=mask,
)
def load_previous_events(self, domain, columns, dates, sids, mask):
if not columns:
return {}
return self._load_events(
name_map=self.previous_value_columns,
indexer=self.previous_event_indexer(dates, sids),
domain=domain,
columns=columns,
dates=dates,
sids=sids,
mask=mask,
)
def _load_events(self,
name_map,
indexer,
domain,
columns,
dates,
sids,
mask):
def to_frame(array):
return pd.DataFrame(array, index=dates, columns=sids)
assert indexer.shape == (len(dates), len(sids))
out = {}
for c in columns:
# Array holding the value for column `c` for every event we have.
col_array = self.events[name_map[c]]
if not len(col_array):
# We don't have **any** events, so return col.missing_value
# every day for every sid. We have to special case empty events
# because in normal branch we depend on being able to index
# with -1 for missing values, which fails if there are no
# events at all.
raw = np.full(
(len(dates), len(sids)), c.missing_value, dtype=c.dtype
)
else:
# Slot event values into sid/date locations using `indexer`.
# This produces a 2D array of the same shape as `indexer`,
# which must be (len(dates), len(sids))`.
raw = col_array[indexer]
# indexer will be -1 for locations where we don't have a known
# value. Overwrite those locations with c.missing_value.
raw[indexer < 0] = c.missing_value
# Delegate the actual array formatting logic to a DataFrameLoader.
loader = DataFrameLoader(c, to_frame(raw), adjustments=None)
out[c] = loader.load_adjusted_array(
domain, [c], dates, sids, mask
)[c]
return out
def load_adjusted_array(self, domain, columns, dates, sids, mask):
n, p = self.split_next_and_previous_event_columns(columns)
return merge(
self.load_next_events(domain, n, dates, sids, mask),
self.load_previous_events(domain, p, dates, sids, mask),
)
| StarcoderdataPython |
1736086 | <filename>aisutils/daemon.py<gh_stars>10-100
#!/usr/bin/env python
__author__ = '<NAME>'
__version__ = '$Revision: 11839 $'.split()[1]
__revision__ = __version__
__date__ = '$Date: 2009-05-05 17:34:17 -0400 (Tue, 05 May 2009) $'.split()[1]
__copyright__ = '2007, 2008'
__license__ = 'Apache 2.0'
__doc__ = '''
Daemon tool to detach from the terminal
@requires: U{epydoc<http://epydoc.sourceforge.net/>} > 3.0alpha3
@status: under development
@since: 2008-Feb-04
@undocumented: __doc__
@todo: Clean way to shut down
'''
import os
def stdCmdlineOptions(parser,skip_short=False):
'''
Standard command line options
@param parser: OptionParser parser that will get the additional options
'''
if skip_short:
parser.add_option('--daemon'
,dest='daemon_mode'
,default=False,action='store_true'
,help='Detach from the terminal and run as a daemon service.'
+' Returns the pid. [default: %default]')
else:
parser.add_option('-d'
,'--daemon'
,dest='daemon_mode'
,default=False,action='store_true'
,help='Detach from the terminal and run as a daemon service.'
+' Returns the pid. [default: %default]')
# FIX: have an option to make a default pid file location
parser.add_option('--pid-file'
,dest='pid_file'
,default=None
,help='Where to write the process id when in daemon mode')
def start(pid_file=None):
'''
Jump to daemon mode. Must set either
@param options: must have pid_file key
'''
create()
if pid_file != None:
open(pid_file, 'w').write(str(os.getpid())+'\n')
def create():
"""
nohup like function to detach from the terminal. Best to call start(), not this.
"""
try:
pid = os.fork()
except OSError, except_params:
raise Exception, "%s [%d]" % (except_params.strerror, except_params.errno)
if (pid == 0):
# The first child.
os.setsid()
try:
pid = os.fork() # Fork a second child.
except OSError, except_params:
raise Exception, "%s [%d]" % (except_params.strerror, except_params.errno)
if (pid != 0):
os._exit(0) # Exit parent (the first child) of the second child.
else:
os._exit(0) # Exit parent of the first child.
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = 1024
# Iterate through and close all file descriptors.
if True:
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Send all output to /dev/null - FIX: send it to a log file
os.open('/dev/null', os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
return (0)
| StarcoderdataPython |
340284 | <gh_stars>1-10
class Base:
WINDOW_W = 700
WINDOW_H = 550
GAME_WH = 500
SIZE = 5
FPS = 60
DEBUG = False
COLORS = {
'0': (205, 193, 180),
'2': (238, 228, 218),
'4': (237, 224, 200),
'8': (242, 177, 121),
'16': (245, 149, 99),
'32': (246, 124, 95),
'64': (246, 94, 59),
'128': (237, 207, 114),
'256': (237, 204, 97),
'512': (237, 200, 80),
'1024': (237, 197, 63),
'2048': (200, 63, 63),
'4096': (170, 30, 70),
'8192': (150, 30, 90),
'16384': (120, 30, 110)
}
class SupperFast(Base):
STEP_TIME = 0
ANIMATION = False
class Fast(Base):
STEP_TIME = 0.3
ANIMATION = True
class Watch(Base):
STEP_TIME = 0.9
ANIMATION = True
class Development(Base):
STEP_TIME = 1.5
ANIMATION = True
DEBUG = True
| StarcoderdataPython |
4846822 | <filename>app/models.py
from datetime import datetime
import sys
import json
import os
import re
import parsedatetime as pdt
from pymongo import MongoClient
MONGO_URI = os.environ.get('MONGODB_URI')
if not MONGO_URI:
sys.exit("\nMONGODB_URI environment variable not set, see https://docs.mongodb.com/manual/reference/connection-string/\n")
CLIENT = MongoClient()
EMAIL_COLLECTION = CLIENT['futureboard']['emails']
EVENT_COLLECTION = CLIENT['futureboard']['events']
# Characters we don't want in our message ids
DEL_CHARS = ''.join(c for c in map(chr, range(256)) if not c.isalnum())
DATE_FORMATS = ['%a %b %d %X %Y', '%a, %d %b %Y %X', '%d %b %Y %X']
cal = pdt.Calendar()
def read_emails(fpath):
"""Fetches a particular month from parsed_data and returns it as a JSON
"""
with open(os.path.join(os.path.dirname(__file__), '..', 'parsed_data/', fpath), 'r') as emails:
return json.loads(emails.read())
def get_date_format(date_string):
for time_format in DATE_FORMATS:
try:
return datetime.strptime(date_string, time_format)
except:
pass
def get_email_model(email_json):
"""Converts a JSON representation of an email message to a dictionary representation
"""
return {
"message_id": email_json["id"],
"text": email_json["text"],
"subject": email_json["subject"],
"date": get_date_format(re.split('\s\-|\s\+', email_json["date"])[0]),
"author_email": email_json["author_email"],
"author_name": email_json["author_name"],
"replying_to": email_json["replying_to"]
}
def identify_events(data, src_id, date, collection):
"""Finds dates in the subjects of emails or texts, and creates events from those dates. Data is a string, src_id is the
unique id associated with the email or text in its respective collection, and date is the datetime the email or text was sent,
and collection is the collection the email or text goes into
"""
is_event = False
event_date = cal.parseDT(data, date)
if event_date[1]:
# if not EVENT_COLLECTION.find({"src_id": src_id}):
EVENT_COLLECTION.insert({'data': data, 'date': event_date[0], 'collection': collection, 'src_id': src_id})
is_event = True
return is_event
def add_emails(date=None):
"""Adds emails from parsed_data directory to the database. If no date is specified, it will add every month."""
if date:
emails = [get_email_model(email) for email in read_emails(os.path.join(os.path.dirname(__file__), '..', 'parsed_data/'+date+".json"))]
print(EMAIL_COLLECTION.insert_many(emails).inserted_ids)
else:
for email_chunk in os.listdir(os.path.join(os.path.dirname(__file__), '..', 'parsed_data/')):
print(email_chunk)
emails = [get_email_model(email) for email in read_emails(email_chunk)]
EMAIL_COLLECTION.insert_many(emails).inserted_ids
# def parse_events(data=None):
# """Finds dates in the subjects of emails or texts, and creates events from those dates. Data is a list of id's; if not
# specified, parses all emails in EMAIL_COLLECTION and TEXT_COLLECTION
# """
# if data:
# # Find id's in database, update in database
# emails = EMAIL_COLLECTION.find({"_id": {"$in": data}})
# texts = TEXT_COLLECTION.find({"_id": {"$in": data}})
# else:
# emails = EMAIL_COLLECTION.find()
# texts = TEXT_COLLECTION.find()
# for email in emails:
# # If the subject is not in EVENTS, strip out an event and add that and the subject to EVENTS
# if not EVENT_COLLECTION.find({"src_id": emai['_id']}):
# EVENT_COLLECTION.insert({"subject": email['subject'], "body": email['text'], "type": 'email', "src_id": email["_id"]})
# for text in texts:
# # If the text is not in TEXTS, strip out an event and add that and the text to EVENTS
# if not EVENT_COLLECTION.find({"src_id": text['_id']}):
# EVENT_COLLECTION.insert({"body": text['body'], "type": 'text', "src_id": text["_id"]})
def reset_db():
"""
Resets the database and adds all the JSONs stored in the parsed_data directory
"""
EMAIL_COLLECTION.delete_many({})
add_emails()
if __name__ == "__main__":
reset_db()
| StarcoderdataPython |
11283773 | <filename>tests/__init__.py
class Test:
def __init__(self, x, y):
self.x = x
self.y = y
def mult(self):
return self.x * self.y
| StarcoderdataPython |
1989718 | from rest_framework import serializers
from api import models
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model=models.UserProfile
fields=('id','username','name','password')
extra_kwargs={
'password':{
'write_only':True,
'style':{'input_type':'password'}
}
}
def create(self,validated_data):
user=models.UserProfile.objects.create_user(
username=validated_data['username'],
name=validated_data['name'],
password=validated_data['password']
)
return user
class ProfileTodoItemSerializer(serializers.ModelSerializer):
class Meta:
model=models.ProfileTodoItem
fields=('id','user_profile','todo','created_on')
extra_kwargs={'user_profile':{'read_only':True}}
| StarcoderdataPython |
12854129 | import json
from typing import Callable, TypeVar, cast
from .constants import CUSTOM_LOG_FORMAT, CUSTOM_EVENT_NAME_MAP, CUSTOM_PAGE_NAME_MAP
from datetime import datetime
import logging
from airflow.settings import TIMEZONE
from airflow.utils.session import create_session
import functools
T = TypeVar("T", bound=Callable)
_logger = logging.getLogger(__name__)
def access_log(event, page, msg):
'''
一个装饰器,用于将访问事件记录日志,供抓取分析,格式和参数对照见constants.py文件
示例:
@access_log('VIEW', 'CURVES', '查看曲线对比页面')
'''
def decorator(func: T) -> T:
@functools.wraps(func)
def wrapper(*args, **kwargs):
_logger.info(repr(args))
_logger.info(repr(kwargs))
ret = func(*args, **kwargs)
from flask_login import current_user # noqa: F401
full_msg = CUSTOM_LOG_FORMAT.format(
datetime.now(tz=TIMEZONE).strftime("%Y-%m-%d %H:%M:%S"),
current_user if current_user and current_user.is_active else 'anonymous',
getattr(current_user, 'last_name', '') if current_user and current_user.is_active else 'anonymous',
CUSTOM_EVENT_NAME_MAP[event], CUSTOM_PAGE_NAME_MAP[page], msg
)
_logger.info(full_msg)
return ret
return cast(T, wrapper)
return decorator
| StarcoderdataPython |
1718955 | <reponame>jeshan/botodocs
from boto3.resources.model import Action, Waiter
from botocore.waiter import WaiterModel
import pythonic
from util import create_new_file, get_botostubs_message, get_link_to_client_function, write_lines, get_variable_name_for
def create_waiter_index(path, client_name, service_name, waiter_name):
create_new_file(path)
return [
f'# {service_name} waiters',
f"""You get a waiter by calling `get_waiter` on a certain client:
```python
import boto3
client = boto3.client('{client_name}')
waiter = client.get_waiter('{pythonic.xform_name(waiter_name)}') # type: botostubs.{service_name}.{waiter_name}Waiter
```
""",
get_botostubs_message(),
'The available client waiters are:',
]
def get_example_waiter_snippet(name, pythonic_name, client_name, service, fn_name, service_path):
return f"""```python
import boto3
client = boto3.client('{client_name}')
waiter = client.get_waiter('{pythonic_name}') # type: botostubs.{service}.{name}Waiter
waiter.wait(
WaiterConfig={{'Delay': 123, 'MaxAttempts': 123}}, OtherParams=...
)
```
{get_botostubs_message()}
### Accepts
_See {client_name}_client.[{fn_name}]({service_path}/client/operations/{fn_name}#Accepts) for other parameters that you can pass in._
### Returns
None
"""
def get_waiter_page(name, fn_name, client_name, class_name, waiter_path, service_path):
pythonic_name = pythonic.xform_name(name)
headline = f'# {pythonic_name} waiter'
signature = f"""
{get_example_waiter_snippet(name, pythonic_name, client_name, class_name, fn_name, service_path)}
"""
documentation = f'Polls {client_name}_client.{get_link_to_client_function(fn_name, service_path)} every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.'
list_item = f'- [{pythonic_name}]({waiter_path})'
return list_item, signature, documentation, headline
def handle_waiters(client, client_name, class_name, service_name, service_path, sidebar_lines):
waiter_config = client._get_waiter_config()
waiter_model = WaiterModel(waiter_config) if 'waiters' in waiter_config else None
if not waiter_model:
return
waiters_path = f'{service_path}/waiters'
sidebar_lines.append(f' - [Waiters]({waiters_path})')
docs_waiters_path = f'docs/{waiters_path}.md'
waiter_names = waiter_model.waiter_names
example_waiter_name = waiter_names[0]
waiter_list_items = create_waiter_index(docs_waiters_path, client_name, service_name, example_waiter_name)
for name in waiter_names:
handle_waiter(class_name, client_name, name, service_path, waiter_list_items, waiter_model, waiters_path)
write_lines(docs_waiters_path, waiter_list_items)
def handle_waiter(class_name, client_name, name, service_path, waiter_list_items, waiter_model, waiters_path):
waiter = waiter_model.get_waiter(name)
pythonic_name = pythonic.xform_name(waiter.operation)
waiter_path = f'{waiters_path}/{pythonic.xform_name(name)}'
docs_waiter_path = f'docs/{waiter_path}.md'
create_new_file(docs_waiter_path)
list_item, signature, documentation, headline = get_waiter_page(
name, pythonic_name, client_name, class_name, waiter_path, service_path
)
create_new_file(docs_waiter_path)
write_lines(docs_waiter_path, [headline, documentation, signature])
waiter_list_items.append(list_item)
def handle_sub_resource_waiters(resource: Action, resource_list_items, service_path):
waiters = resource.resource.model.waiters
if waiters:
resource_list_items.extend(['# Waiters', 'The following waiters are available:'])
waiters_path = f'{service_path}/waiters'
waiter: Waiter
for waiter in waiters:
name = pythonic.xform_name(waiter.waiter_name)
variable_name = get_variable_name_for(resource.name)
resource_list_items.append(f'## {waiter.name}')
resource_list_items.append(
f"""```python
{variable_name}.{waiter.name}(...)
```
"""
)
resource_list_items.append(
f'> Note that this waiter delegates to the client [{name}]({waiters_path}/{name}) waiter'
)
| StarcoderdataPython |
341639 | <filename>token.py
tokentype = {
'INT': 'INT',
'FLOAT': 'FLOAT',
'STRING': 'STRING',
'CHAR': 'CHAR',
'+': 'PLUS',
'-': 'MINUS',
'*': 'MUL',
'/': 'DIV',
'=': 'ASSIGN',
'%': 'MODULO',
':': 'COLON',
';': 'SEMICOLON',
'<': 'LT',
'>': 'GT',
'[': 'O_BRACKET',
']': 'C_BRACKET',
'(': 'O_PAREN',
')': 'C_PAREN',
'{': 'O_BRACE',
'}': 'C_BRACE',
'&': 'AND',
'|': 'OR',
'!': 'NOT',
'^': 'EXPO',
'ID': 'ID',
'EOF': 'EOF'
}
class Token:
def __init__(self, type, value):
self.type = type
self.value = value
def __str__(self):
return f'<{self.type}: {self.value}>'
__repr__ = __str__
| StarcoderdataPython |
9776386 | <gh_stars>1-10
import os
import sys
from cps.base import BaseClient
class WebClient(BaseClient):
def info(self, service_id):
service = BaseClient.info(self, service_id)
nodes = self.callmanager(service['sid'], "list_nodes", False, {})
if 'error' in nodes:
return
errmsg = ''
for what in 'proxy', 'web', 'backend':
print what,
for proxy in nodes[what]:
params = { 'serviceNodeId': proxy }
details = self.callmanager(service['sid'], "get_node_info", False, params)
if 'error' in details:
errmsg = errmsg + details['error'] + "\n"
else:
print details['serviceNode']['ip'],
print
if errmsg:
print 'WARNING: %s' % errmsg
def upload_key(self, service_id, filename):
contents = open(filename).read()
files = [ ( 'key', filename, contents ) ]
res = self.callmanager(service_id, "/", True, { 'method': "upload_authorized_key", }, files)
if 'error' in res:
print res['error']
else:
print res['outcome']
def upload_code(self, service_id, filename):
contents = open(filename).read()
files = [ ( 'code', filename, contents ) ]
res = self.callmanager(service_id, "/", True, { 'method': "upload_code_version", }, files)
if 'error' in res:
print res['error']
else:
print "Code version %(codeVersionId)s uploaded" % res
def download_code(self, service_id, version):
res = self.callmanager(service_id, 'list_code_versions', False, {})
if 'error' in res:
print res['error']
sys.exit(0)
filenames = [ code['filename'] for code in res['codeVersions']
if code['codeVersionId'] == version ]
if not filenames:
print "ERROR: Cannot download code: invalid version %s" % version
sys.exit(0)
destfile = filenames[0]
params = { 'codeVersionId': version }
res = self.callmanager(service_id, "download_code_version",
False, params)
if 'error' in res:
print res['error']
else:
open(destfile, 'w').write(res)
print destfile, 'written'
def delete_code(self, service_id, code_version):
params = { 'codeVersionId': code_version }
res = self.callmanager(service_id, "delete_code_version",
True, params)
if 'error' in res:
print res['error']
else:
print code_version, 'deleted'
def migrate_nodes(self, service_id, migration_plan, delay=None):
data = {}
data['migration_plan'] = migration_plan
if delay is not None:
data['delay'] = delay
res = self.callmanager(service_id, "migrate_nodes", True, data)
return res
def usage(self, cmdname):
BaseClient.usage(self, cmdname)
print " add_nodes serviceid b w p [cloud] # add b backend, w web and p proxy nodes"
print " remove_nodes serviceid b w p # remove b backend, w web and p proxy nodes"
print " list_keys serviceid # list authorized SSH keys"
print " upload_key serviceid filename # upload an SSH key"
print " list_uploads serviceid # list uploaded code versions"
print " upload_code serviceid filename # upload a new code version"
print " download_code serviceid version # download a specific code version"
print " delete_code serviceid version # delete a specific code version"
# implemented in {php,java}.py
print " enable_code serviceid version # set a specific code version active"
print " migrate_nodes serviceid from_cloud:vmid:to_cloud[,from_cloud:vmid:to_cloud]* [delay]"
def main(self, argv):
command = argv[1]
# Check serviceid for all the commands requiring one
if command in ( 'add_nodes', 'remove_nodes', 'list_keys',
'upload_key', 'list_uploads', 'upload_code',
'enable_code', 'download_code', 'delete_code' ):
try:
sid = int(argv[2])
except (IndexError, ValueError):
self.usage(argv[0])
sys.exit(0)
self.check_service_id(sid)
# Check provided filename for all the commands requiring one
if command in ( 'upload_key', 'upload_code' ):
try:
filename = argv[3]
if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
raise IndexError
except IndexError:
self.usage(argv[0])
sys.exit(0)
getattr(self, command)(sid, filename)
if command == 'list_keys':
res = self.callmanager(sid, 'list_authorized_keys', False, {})
for key in res['authorizedKeys']:
print key
if command == 'list_uploads':
res = self.callmanager(sid, 'list_code_versions', False, {})
def add_cur(row):
if 'current' in row:
row['current'] = ' *'
else:
row['current'] = ''
return row
data = [ add_cur(el) for el in res['codeVersions'] ]
print self.prettytable(( 'current', 'codeVersionId', 'filename', 'description' ), data)
if command in ( 'enable_code', 'download_code', 'delete_code' ):
try:
code_version = argv[3]
except IndexError:
self.usage(argv[0])
sys.exit(0)
getattr(self, command)(sid, code_version)
if command in ( 'add_nodes', 'remove_nodes' ):
try:
params = {
'backend': int(argv[3]),
'web': int(argv[4]),
'proxy': int(argv[5])
}
except (IndexError, ValueError):
self.usage(argv[0])
sys.exit(0)
if command == 'add_nodes':
if len(argv) == 6:
params['cloud'] = 'default'
else:
params['cloud'] = argv[6]
# call the method
res = self.callmanager(sid, command, True, params)
if 'error' in res:
print res['error']
else:
print "Service", sid, "is performing the requested operation (%s)" % command
if command == 'migrate_nodes':
try:
sid = int(argv[2])
except (IndexError, ValueError):
print "ERROR: missing the service identifier argument after the sub-command name."
sys.exit(1)
self.check_service_id(sid)
delay = None
if len(sys.argv) < 4:
print "ERROR: missing arguments to migrate_nodes sub-command."
sys.exit(1)
elif len(sys.argv) > 4:
delay = sys.argv[4]
if not isinstance(delay, int) or int(delay) < 0:
print "ERROR: delay argument must be a positive or null integer."
delay = int(delay)
elif len(sys.argv) > 5:
print "ERROR: too many arguments to migrate_nodes sub-command."
sys.exit(1)
migration_plan = []
migr_arg = sys.argv[3].split(',')
for migrate_node in migr_arg:
try:
from_cloud, node_id, dest_cloud = migrate_node.split(':')
migr_node = {'from_cloud': from_cloud, 'vmid': node_id, 'to_cloud': dest_cloud}
migration_plan.append(migr_node)
except:
print "ERROR: format error on migration argument '%s'." % migrate_node
sys.exit(1)
res = self.migrate_nodes(sid, migration_plan, delay)
if 'error' in res:
print "ERROR: %s" % (res['error'])
else:
print "Migration started..."
| StarcoderdataPython |
9722368 | def poly_consolidate(poly):
powers = {}
for coeff, power in poly:
power = tuple(power)
powers[power] = powers.get(power, 0) + coeff
conspoly = [[coeff, list(power)] for power, coeff in powers.items()]
return conspoly
def poly_degree(poly):
degree = 0
for i in poly:
if sum(i[1]) > degree:
degree = sum(i[1])
return degree
def poly_find_degree(poly, variable):
degree = 0
for i in poly:
if i[1][variable - 1] > degree:
degree = i[1][variable - 1]
return degree
def poly_remove_zeros(poly):
zero = []
for _ in range(len(poly[0][1])):
zero.append(0.0)
for i in range(len(poly)):
if poly[i][0] == 0:
poly[i] = [0, zero]
return poly
def poly_pop_zeros(poly):
zero = []
for _ in range(len(poly[0][1])):
zero.append(0.0)
poly = poly_remove_zeros(poly)
i = 0
while i < len(poly):
if poly[i] == [0, zero]:
poly.pop(i)
else:
i += 1
return poly
def poly_sort(poly):
for i in reversed(range(len(poly[0][1]))):
poly = sorted(poly, key=lambda x: x[1][i], reverse=True)
return poly
def poly_leading_coeff(poly):
poly = poly_sort(poly)
return poly[0][0]
def poly_find_coefficient(poly, power):
coeff = 0
found = 0
i = 0
while i < len(poly) and not found:
if poly[i][1] == power:
coeff = poly[i][0]
found = 1
i += 1
return coeff
# Might move this to univariate polynomials. Depending on how zeros are implemented.
def poly_quadratic_zeros(poly):
if poly_degree(poly) == 2:
a = poly_find_coefficient(poly, 2)
b = poly_find_coefficient(poly, 1)
c = poly_find_coefficient(poly, 0)
print(a, b, c)
discriminant = b ** 2 - 4 * a * c
print(discriminant)
if discriminant > 0:
zero1 = (-b + discriminant ** 0.5) / (2 * a)
zero2 = (-b - discriminant ** 0.5) / (2 * a)
else:
print("The zeroes are complex")
zero1 = 'C'
zero2 = 'C'
else:
print("The polynomials is not quadratic")
zero1 = 'NaN'
zero2 = 'NaN'
return zero1, zero2
def poly_addition(poly1, poly2):
result = poly_sort(poly_consolidate(poly1 + poly2))
return poly_pop_zeros(poly_sort(poly_consolidate(result)))
def poly_subtraction(poly1, poly2):
ply2 = []
for i in range(len(poly2)):
ply2.append([- poly2[i][0], poly2[i][1]])
result = poly_sort(poly_consolidate(poly1 + ply2))
return poly_pop_zeros(poly_sort(poly_consolidate(result)))
def poly_scalar_multiplication(poly, scalar):
result = []
for i in poly:
result.append([i[0] * scalar, i[1]])
return poly_pop_zeros(poly_sort(poly_consolidate(result)))
def poly_poly_multiplication(poly1, poly2):
result = []
for i in range(len(poly1)):
for j in range(len(poly2)):
power = []
for k in range(len(poly1[0][1])):
power.append(poly1[i][1][k] + poly2[j][1][k])
result.append([poly1[i][0] * poly2[j][0], power])
return poly_pop_zeros(poly_sort(poly_consolidate(result)))
def poly_scalar_division(poly, scalar):
result = []
for i in poly:
result.append([i[0] / scalar, i[1]])
return poly_pop_zeros(poly_sort(poly_consolidate(result)))
def poly_poly_division(poly1, poly2):
quotient = []
remainder = poly1
divisor_degree = poly_find_degree(poly2, 1)
divisor_lc = poly_leading_coeff(poly2)
while poly_find_degree(remainder, 1) >= divisor_degree:
remainder = poly_remove_zeros(remainder)
s_coeff = poly_leading_coeff(remainder) / divisor_lc
s_degree = []
for i in range(len(remainder[1])):
s_degree.append(remainder[0][1][i] - poly2[0][1][i])
s = [s_coeff, s_degree]
quotient.append(s)
remainder = poly_subtraction(remainder, poly_poly_multiplication([s], poly2))
return quotient, remainder
def partial_pr_derivative(poly, variables, wrt):
var_index = -1
for i in range(len(variables)):
if wrt == variables[i]:
var_index = i
if var_index >= 0:
derivative = []
for j in poly:
if j[1][var_index] != 0:
power = []
for k in range(len(j[1])):
if k == var_index:
power.append(j[1][k] - 1)
else:
power.append(j[1][k])
derivative.append([j[0] * j[1][var_index], power])
else:
raise TypeError("Invalid variable to differentiate with respect to.")
return derivative
def pr_indef_integral(poly, variables, wrt, initial_input, initial_value):
var_index = -1
for i in range(len(variables)):
print(variables[i])
if wrt == variables[i]:
var_index = i
if var_index >= 0:
primitive = []
for j in poly:
power = []
for k in range(len(j[1])):
if k == var_index:
power.append(j[1][k] + 1)
else:
power.append(j[1][k])
primitive.append([j[0] / (j[1][var_index] + 1), power])
constant = initial_value
for j in primitive:
term = j[0]
for k in range(len(j[1])):
term *= initial_input[k] ** j[1][k]
constant -= term
zero = []
for _ in range(len(poly[0][1])):
zero.append(0.0)
primitive.append([constant, zero])
else:
raise TypeError("Invalid variable to integrate with respect to.")
return primitive
def pr_definite_integral(poly, variables, wrt, start, end):
var_index = -1
for i in range(len(variables)):
if wrt == variables[i]:
var_index = i
if var_index >= 0:
primitive = []
for j in poly:
power = []
for k in range(len(j[1])):
if k == var_index:
power.append(j[1][k] + 1)
else:
power.append(j[1][k])
primitive.append([j[0] / (j[1][var_index] + 1), power])
start_primitive = []
end_primitive = []
for j in primitive:
power = []
for k in range(len(j[1])):
if k == var_index:
power.append(0.0)
else:
power.append(j[1][k])
start_primitive.append([j[0] * (start ** j[1][var_index]), power])
end_primitive.append([j[0] * (end ** j[1][var_index]), power])
result = poly_subtraction(end_primitive, start_primitive)
if len(result) == 1:
result = result[0][0]
else:
raise TypeError("Invalid variable to integrate with respect to.")
return result
| StarcoderdataPython |
3208782 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: find_in_wikipedia
:platform: Unix
:synopsis: the top-level submodule of Dragonfire.commands that contains the classes related to Dragonfire's simple if-else struct of searching in wikipedia ability.
.. moduleauthors:: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
import re # Regular expression operations
import wikipedia # Python library that makes it easy to access and parse data from Wikipedia
import wikipedia.exceptions # Exceptions of wikipedia library
import requests.exceptions # HTTP for Humans
from ava.utilities import nostderr # Submodule of Dragonfire to provide various utilities
class FindInWikiCommand():
"""Class to contains searching in wikipedia process with simply if-else struct.
"""
def first_compare(self, doc, h, user_answering, userin, user_prefix):
"""Method to ava's first command struct of searching in wikipedia ability.
Args:
doc: doc of com from __init__.py
h: doc helper from __init__.py
user_answering: User answering string array.
userin: :class:`ava.utilities.TextToAction` instance.
Keyword Args:
user_prefix: user's preferred titles.
"""
if (h.check_lemma("search") or h.check_lemma("find")) and h.check_lemma("wikipedia"):
with nostderr():
search_query = ""
for token in doc:
if not (
token.lemma_ == "search" or token.lemma_ == "find" or token.lemma_ == "wikipedia" or token.is_stop):
search_query += ' ' + token.text
search_query = search_query.strip()
if search_query:
try:
wikiresult = wikipedia.search(search_query)
if len(wikiresult) == 0:
userin.say(
"Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.")
return True
wikipage = wikipedia.page(wikiresult[0])
wikicontent = "".join([i if ord(i) < 128 else ' ' for i in wikipage.content])
wikicontent = re.sub(r'\([^)]*\)', '', wikicontent)
cmds = [{'distro': 'All', 'name': ["sensible-browser", wikipage.url]}]
userin.execute(cmds, search_query)
return userin.say(wikicontent, cmd=["sensible-browser", wikipage.url])
except requests.exceptions.ConnectionError:
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, "Wikipedia connection error.")
return userin.say("Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers.")
except wikipedia.exceptions.DisambiguationError as disambiguation:
user_answering['status'] = True
user_answering['for'] = 'wikipedia'
user_answering['reason'] = 'disambiguation'
user_answering['options'] = disambiguation.options[:3]
notify = "Wikipedia disambiguation. Which one of these you meant?:\n - " + disambiguation.options[0]
msg = user_prefix + ", there is a disambiguation. Which one of these you meant? " + disambiguation.options[0]
for option in disambiguation.options[1:3]:
msg += ", or " + option
notify += "\n - " + option
notify += '\nSay, for example: "THE FIRST ONE" to choose.'
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, notify)
return userin.say(msg)
except BaseException:
pass
return None
def second_compare(self, com, user_answering, userin, user_prefix):
"""Method to ava's first command struct of searching in wikipedia ability.
Args:
com (str): User's command.
user_answering: User answering string array.
userin: :class:`ava.utilities.TextToAction` instance.
user_prefix: user's preferred titles.
"""
if user_answering['status'] and user_answering['for'] == 'wikipedia':
if com.startswith("FIRST") or com.startswith("THE FIRST") or com.startswith("SECOND") or com.startswith(
"THE SECOND") or com.startswith("THIRD") or com.startswith("THE THIRD"):
user_answering['status'] = False
selection = None
if com.startswith("FIRST") or com.startswith("THE FIRST"):
selection = 0
elif com.startswith("SECOND") or com.startswith("THE SECOND"):
selection = 1
elif com.startswith("THIRD") or com.startswith("THE THIRD"):
selection = 2
with nostderr():
search_query = user_answering['options'][selection]
try:
wikiresult = wikipedia.search(search_query)
if len(wikiresult) == 0:
userin.say(
"Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.")
return True
wikipage = wikipedia.page(wikiresult[0])
wikicontent = "".join([i if ord(i) < 128 else ' ' for i in wikipage.content])
wikicontent = re.sub(r'\([^)]*\)', '', wikicontent)
cmds = [{'distro': 'All', 'name': ["sensible-browser", wikipage.url]}]
userin.execute(cmds, search_query)
return userin.say(wikicontent, cmd=["sensible-browser", wikipage.url])
except requests.exceptions.ConnectionError:
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, "Wikipedia connection error.")
return userin.say(
"Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers.")
except Exception:
return False
return None
| StarcoderdataPython |
111632 | <gh_stars>0
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.image import rbac_base
class ImageNamespacesObjectsRbacTest(rbac_base.BaseV2ImageRbacTest):
@rbac_rule_validation.action(service="glance",
rule="add_metadef_object")
@decorators.idempotent_id("772156f2-e33d-432e-8521-12385746c2f0")
def test_create_metadef_object_in_namespace(self):
"""Create Metadef Object Namespace Test
RBAC test for the glance add_metadef_object policy
"""
namespace = self.create_namespace()
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
# create a md object, it will be cleaned automatically after
# cleanup of namespace
object_name = data_utils.rand_name(
self.__class__.__name__ + '-test-object')
self.namespace_objects_client.create_namespace_object(
namespace['namespace'],
name=object_name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.namespace_objects_client.delete_namespace_object,
namespace['namespace'], object_name)
@rbac_rule_validation.action(service="glance",
rule="get_metadef_objects")
@decorators.idempotent_id("48b50ecb-237d-4909-be62-b6a05c47b64d")
def test_list_metadef_objects_in_namespace(self):
"""List Metadef Object Namespace Test
RBAC test for the glance get_metadef_objects policy
"""
namespace = self.create_namespace()
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
# list md objects
self.namespace_objects_client.list_namespace_objects(
namespace['namespace'])
@rbac_rule_validation.action(service="glance",
rule="modify_metadef_object")
@decorators.idempotent_id("cd130b1d-89fa-479c-a90e-498d895fb455")
def test_update_metadef_object_in_namespace(self):
"""Update Metadef Object Namespace Test
RBAC test for the glance modify_metadef_object policy
"""
namespace = self.create_namespace()
object_name = data_utils.rand_name(
self.__class__.__name__ + '-test-object')
self.namespace_objects_client.create_namespace_object(
namespace['namespace'],
name=object_name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.namespace_objects_client.delete_namespace_object,
namespace['namespace'], object_name)
# Toggle role and modify object
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
new_name = "Object New Name"
self.namespace_objects_client.update_namespace_object(
namespace['namespace'], object_name, name=new_name)
@rbac_rule_validation.action(service="glance",
rule="get_metadef_object")
@decorators.idempotent_id("93c61420-5b80-4a0e-b6f3-4ccc6e90b865")
def test_show_metadef_object_in_namespace(self):
"""Show Metadef Object Namespace Test
RBAC test for the glance get_metadef_object policy
"""
namespace = self.create_namespace()
object_name = data_utils.rand_name(
self.__class__.__name__ + '-test-object')
self.namespace_objects_client.create_namespace_object(
namespace['namespace'],
name=object_name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.namespace_objects_client.delete_namespace_object,
namespace['namespace'], object_name)
# Toggle role and get object
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.namespace_objects_client.show_namespace_object(
namespace['namespace'],
object_name)
| StarcoderdataPython |
6503061 | import pytest
from common.constants import LoginConstants
from models.auth import AuthData
class TestAuth:
"""Класс, представляющий набор тестов для проверки функции аутентификации пользователя."""
@pytest.mark.positive
def test_auth_valid_data(self, app):
"""
Steps
1. Open main page
2. Auth with valid data
3. Check auth result
"""
app.open_auth_page()
data = AuthData(login="andrei_inn", password="<PASSWORD>")
app.login.auth(data)
assert app.login.is_auth(), "We are not auth"
@pytest.mark.negative
def test_auth_invalid_data(self, app):
"""
Steps
1. Open main page
2. Auth with invalid data
3. Check auth result
"""
app.open_auth_page()
data = AuthData.random()
app.login.auth(data)
assert LoginConstants.AUTH_ERROR == app.login.auth_login_error(), "We are auth!"
@pytest.mark.negative
@pytest.mark.parametrize("field", ["login", "password"])
def test_auth_empty_data(self, app, field):
"""
Steps
1. Open main page
2. Auth with empty data
3. Check auth result
"""
app.open_auth_page()
data = AuthData.random()
setattr(data, field, None)
app.login.auth(data)
assert LoginConstants.AUTH_ERROR == app.login.auth_login_error(), "We are auth!"
| StarcoderdataPython |
9605902 | # Generated by Django 2.2.17 on 2021-03-05 16:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('features', '0028_auto_20210216_1600'),
('features', '0029_auto_20210223_2106'),
]
operations = [
]
| StarcoderdataPython |
12812880 | import unittest
from machinetranslation.translator import french_to_english
from machinetranslation.translator import english_to_french
null = ''
class TestTranslator(unittest.TestCase):
def test_f2e(self):
b = 'Bonjour'
self.assertEqual(french_to_english(b), 'Hello')
pass
self.assertNotEqual(french_to_english(b), 'Goodbye')
pass
self.assertNotEqual(french_to_english('Null'), '')
def test_e2f(self):
h = 'Hello'
self.assertEqual(english_to_french(h), 'Bonjour')
pass
self.assertNotEqual(english_to_french(h), 8)
pass
self.assertNotEqual(english_to_french('Null'), '')
if __name__ == "__main__":
unittest.main()
print('Passed All Tests')
| StarcoderdataPython |
9612839 | from torch.utils.data import Dataset
from PIL import Image
import numpy as np
import torch
class PositionDataset(Dataset):
"""Position encoding dataset"""
def __init__(self, image_name):
self.image_name = image_name
#open routine from fastaiv1
with open(self.image_name, 'rb') as f:
self.img = Image.open(f)
w, h = self.img.size
neww, newh = int(512*w/h), 512
self.width, self.height = neww, newh
self.img = self.img.resize((neww, newh), Image.BICUBIC)
self.img = np.asarray(self.img.convert('RGB'))
self.img = np.transpose(self.img, (1, 0, 2))
self.img = np.transpose(self.img, (2, 1, 0))
self.img = torch.from_numpy(self.img.astype(np.float32, copy=False) )
self.img.div_(255.)
self.p = None
def __len__(self):
return 10 # for now just single image but hardcoding for larger batches
def __getitem__(self, idx):
if self.p is None:
coordh = np.linspace(0,1, self.height, endpoint=False)
coordw = np.linspace(0,1, self.width, endpoint=False)
self.p = np.stack(np.meshgrid(coordw, coordh), 0)
self.p = torch.from_numpy(self.p.astype(np.float32, copy=False))
return self.p, self.img
| StarcoderdataPython |
265517 | # https://www.hackerrank.com/contests/w28/challenges/boat-trip
# Author : <NAME>
#!/bin/python3
import sys
n,c,m = input().strip().split(' ')
n,c,m = [int(n),int(c),int(m)]
p = list(map(int, input().strip().split(' ')))
#print("max", max(p))
if max(p) <= m*c:
print("Yes")
else:
print("No")
| StarcoderdataPython |
5003767 | # Problem Set 4B
# Name: <NAME>
# Collaborators: None
# Time Spent: 02:30
import string
from copy import deepcopy
### HELPER CODE ###
def load_words(file_name):
'''
file_name (string): the name of the file containing
the list of words to load
Returns: a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
'''
# print("Loading word list from file...")
# inFile: file
inFile = open(file_name, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.extend([word.lower() for word in line.split(' ')])
# print(" ", len(wordlist), "words loaded.")
return wordlist
def is_word(word_list, word):
'''
Determines if word is a valid word, ignoring
capitalization and punctuation
word_list (list): list of words in the dictionary.
word (string): a possible word.
Returns: True if word is in word_list, False otherwise
Example:
>>> is_word(word_list, 'bat') returns
True
>>> is_word(word_list, 'asdf') returns
False
'''
word = word.lower()
word = word.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"")
return word in word_list
def get_story_string():
"""
Returns: a story in encrypted text.
"""
f = open("story.txt", "r")
story = str(f.read())
f.close()
return story
### END HELPER CODE ###
WORDLIST_FILENAME = 'words.txt'
class Message(object):
def __init__(self, text):
'''
Initializes a Message object
text (string): the message's text
a Message object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
self.message_text = text
self.valid_words = load_words(WORDLIST_FILENAME)
def get_message_text(self):
'''
Used to safely access self.message_text outside of the class
Returns: self.message_text
'''
return self.message_text
def get_valid_words(self):
'''
Used to safely access a copy of self.valid_words outside of the class.
This helps you avoid accidentally mutating class attributes.
Returns: a COPY of self.valid_words
'''
return self.valid_words[:]
def build_shift_dict(self, shift):
'''
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift. The dictionary
should have 52 keys of all the uppercase letters and all the lowercase
letters only.
shift (integer): the amount by which to shift every letter of the
alphabet. 0 <= shift < 26
Returns: a dictionary mapping a letter (string) to
another letter (string).
'''
assert shift >= 0 and shift < 26, "Can't shift more than 25 number!"
all_alpha = [alpha for alpha in string.ascii_lowercase]
mapped_alpha_dict = {}
# print(all_alpha)
for i, alpha in enumerate(all_alpha):
mapped_alpha_dict[alpha] = all_alpha[(i+shift)%len(all_alpha)]
return mapped_alpha_dict
def apply_shift(self, shift):
'''
Applies the Caesar Cipher to self.message_text with the input shift.
Creates a new string that is self.message_text shifted down the
alphabet by some number of characters determined by the input shift
shift (integer): the shift with which to encrypt the message.
0 <= shift < 26
Returns: the message text (string) in which every character is shifted
down the alphabet by the input shift
'''
shift_dict = self.build_shift_dict(shift)
shifted_text = ""
for alpha in self.message_text:
upperCase = False
if alpha in string.ascii_uppercase:
alpha = alpha.lower()
# print(alpha)
upperCase = True
try:
changed_letter = shift_dict[alpha]
if upperCase:
changed_letter = changed_letter.upper()
shifted_text += changed_letter
except KeyError:
shifted_text += alpha
except:
print("Something went wrong!")
return shifted_text
# hello = Message("Hello, World!")
# print(hello.build_shift_dict(4))
# print(hello.apply_shift(4))
# print(hello.get_message_text())
class PlaintextMessage(Message):
def __init__(self, text, shift):
'''
Initializes a PlaintextMessage object
text (string): the message's text
shift (integer): the shift associated with this message
A PlaintextMessage object inherits from Message and has five attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
self.shift (integer, determined by input shift)
self.encryption_dict (dictionary, built using shift)
self.message_text_encrypted (string, created using shift)
'''
super().__init__(text)
self.shift = shift
self.encryption_dict = super().build_shift_dict(self.shift)
self.message_text_encrypted = super().apply_shift(self.shift)
def get_shift(self):
'''
Used to safely access self.shift outside of the class
Returns: self.shift
'''
return self.shift
def get_encryption_dict(self):
'''
Used to safely access a copy self.encryption_dict outside of the class
Returns: a COPY of self.encryption_dict
'''
return deepcopy(self.encryption_dict)
def get_message_text_encrypted(self):
'''
Used to safely access self.message_text_encrypted outside of the class
Returns: self.message_text_encrypted
'''
return self.message_text_encrypted[:]
def change_shift(self, shift):
'''
Changes self.shift of the PlaintextMessage and updates other
attributes determined by shift.
shift (integer): the new shift that should be associated with this message.
0 <= shift < 26
Returns: nothing
'''
self.shift = shift
self.encryption_dict = Message.build_shift_dict(self.shift)
self.message_text_encrypted = Message.apply_shift(self.shift)
class CiphertextMessage(Message):
def __init__(self, text):
'''
Initializes a CiphertextMessage object
text (string): the message's text
a CiphertextMessage object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
super().__init__(text)
def decrypt_message(self):
'''
Decrypt self.message_text by trying every possible shift value
and find the "best" one. We will define "best" as the shift that
creates the maximum number of real words when we use apply_shift(shift)
on the message text. If s is the original shift value used to encrypt
the message, then we would expect 26 - s to be the best shift value
for decrypting it.
Note: if multiple shifts are equally good such that they all create
the maximum number of valid words, you may choose any of those shifts
(and their corresponding decrypted messages) to return
Returns: a tuple of the best shift value used to decrypt the message
and the decrypted message text using that shift value
'''
# Rule: s >= 0 and s < 26
# List of tuples (guess_shift, score)
track_best = []
# s is a guess of shift int
for s in range(26):
decipher_message_list = super().apply_shift(s).split()
score = 0
for word in decipher_message_list:
if is_word(self.valid_words, word):
score += 1
track_best.append((s, score))
# print(sorted(track_best, key=lambda x:x[1], reverse=True))
track_best.sort(key=lambda x: x[1], reverse=True)
best_guess = track_best[0][0]
# Best shift value to decrypt the message and the message probably.
return (best_guess, super().apply_shift(best_guess))
# test1 = Message("This is the greatest day of me life!")
# encrypted_message = test1.apply_shift(19)
# cipher = CiphertextMessage(encrypted_message)
# print(cipher.decrypt_message())
if __name__ == '__main__':
#TODO: WRITE YOUR TEST CASES HERE
#Test case (PlaintextMessage)
plaintext1 = PlaintextMessage('hello', 2)
print("\n---PlainTest 1---")
print('Expected Output: jgnnq')
print('Actual Output:', plaintext1.get_message_text_encrypted())
plaintext2 = PlaintextMessage('Hello World, this is me Vikram!', 12)
print("\n---PlainTest 2---")
print('Expected Output: Tqxxa Iadxp, ftue ue yq Huwdmy!')
output2 = plaintext2.get_message_text_encrypted()
print('Actual Output:', output2)
print("\n---PlainTest 3---")
plaintext3 = PlaintextMessage('Would you like to go on a date with me?', 17)
print('Expected Output: Nflcu pfl czbv kf xf fe r urkv nzky dv?')
output3 = plaintext3.get_message_text_encrypted()
print('Actual Output:', output3)
#Test case (CiphertextMessage)
ciphertext1 = CiphertextMessage('jgnnq')
print("\n---CiperTest 1---")
print('Expected Output:', (24, 'hello'))
print('Actual Output:', ciphertext1.decrypt_message())
ciphertext2 = CiphertextMessage(output2)
print("\n---CiperTest 2---")
print('Expected Output:', (26-12, 'Hello World, this is me Vikram!'))
print('Actual Output:', ciphertext2.decrypt_message())
ciphertext3 = CiphertextMessage(output3)
print("\n---CiperTest 3---")
print('Expected Output:', (26-17, 'Would you like to go on a date with me?'))
print('Actual Output:', ciphertext3.decrypt_message(), end="\n\n")
#TODO: best shift value and unencrypted story
story_text = get_story_string()
cipherStory = CiphertextMessage(story_text)
print("Output:", cipherStory.decrypt_message()) | StarcoderdataPython |
8121703 | <gh_stars>0
#! /usr/bin/python
from Tkinter import *
from Adafruit_BME280 import *
sensor = BME280(t_mode=BME280_OSAMPLE_8, p_mode=BME280_OSAMPLE_8, h_mode=BME280_OSAMPLE_8)
root = Tk()
frame=Frame(root)
frame.pack()
degrees = sensor.read_temperature()
pascals = sensor.read_pressure()
hectopascals = pascals / 100
humidity = sensor.read_humidity()
label1=Label(frame,text="Temperatura: " + str(degrees) + " C degrees")
label2=Label(frame,text="Precion: " + str(hectopascals) + " hpa")
label3=Label(frame,text="Humedad: " + str(humidity))
label1.pack()
label2.pack()
label3.pack()
time.sleep(3)
root.mainloop() | StarcoderdataPython |
11381267 | from mock import patch
from nose.tools import assert_equals
from prompter import yesno
YESNO_COMBINATIONS = [
('yes', 'yes', True),
('yes', 'YES', True),
('yes', 'Yes', True),
('yes', 'y', True),
('yes', 'Y', True),
('yes', '', True),
('yes', 'no', False),
('yes', 'NO', False),
('yes', 'No', False),
('yes', 'n', False),
('yes', 'N', False),
('no', 'yes', False),
('no', 'YES', False),
('no', 'Yes', False),
('no', 'y', False),
('no', 'Y', False),
('no', '', True),
('no', 'no', True),
('no', 'NO', True),
('no', 'No', True),
('no', 'n', True),
('no', 'N', True),
]
def test_yesno_combinations():
for (default, value, expected_result) in YESNO_COMBINATIONS:
yield yesno_checker, default, value, expected_result
def yesno_checker(default, value, expected_result):
with patch('prompter.get_input', return_value=value):
returned_value = yesno('Does this work?', default=default)
assert_equals(returned_value, expected_result)
#@patch('prompter.get_input', return_value=' ')
#def test_prompt_returns_default_with_only_whitespace_input(mock_raw_input):
# returned_value = prompt('What is your name?', default='Dave')
# assert_equals(returned_value, 'Dave')
| StarcoderdataPython |
1950117 | ''' This plotter expects 3-tuples
(median, firstQuantile, thirdQuantile) as input.
The output is the default Boxerrorbar plot
from gnuplot.
'''
import os
import sys
import subprocess
separator = '|'
def _writePlotHeaderData(gnuplotFile, outputFileName, config, additionalCommands=None):
# For details see the gnuplot manual.
# See https://stackoverflow.com/questions/62848395/horizontal-bar-chart-in-gnuplot
print("set terminal pdfcairo enhanced color size 7cm, 7cm", file=gnuplotFile)
print(f'set output "{outputFileName}"', file=gnuplotFile)
print(f'set datafile separator "{separator}"', file=gnuplotFile)
print('set style fill solid', file=gnuplotFile)
print('unset key', file=gnuplotFile)
#print('set pointsize 0', file=gnuplotFile)
#print(f'set xlabel "{config["plotYAxisLabel"]}"', file=gnuplotFile)
print('set boxwidth 0.4 relative', file=gnuplotFile)
print(f'set title "{config["plotTitle"]}"', file=gnuplotFile)
#print('myBoxWidth = 0.8', file=gnuplotFile)
#print('set offsets 0,0,0.5-myBoxWidth/2.,0.5', file=gnuplotFile)
if additionalCommands:
print(additionalCommands, file=gnuplotFile)
def plot(valuesOfSequences, outputPath, experimentId, config):
# initializations
gnuplotFileName = experimentId + '.txt'
dataFileName = f'{experimentId}.dat'
colors = config.get('plotSequenceColors', {})
# Create simple CSV file that can be referenced in gnuplot.
with open(os.path.join(outputPath, dataFileName), 'w') as dataFile:
# find the correct order of the sequences to plot
orderSequences = None
if 'plotArrangementSequences' in config:
orderSequences = config.get('plotArrangementSequences')
else:
orderSequences = valuesOfSequences.keys()
# write data according to the order
for sequenceId in orderSequences:
if sequenceId in valuesOfSequences.keys():
sequence = valuesOfSequences[sequenceId]
parameter, value = sequence[0]
#dataFile.write(config['plotSequenceNames'][sequenceId] + separator + str(value[0]) + separator +
# str(value[1]) + separator + str(value[2]) + separator + str(colors.get(sequenceId, 1)) + '\n')
dataFile.write(config['plotSequenceNames'][sequenceId] + separator + str(value[0]) + separator +
str(value[1]) + separator + str(value[2]) + separator + str(colors.get(sequenceId, 1)) + '\n')
# create file for gnuplot and execute gnuplot
gnuPlotFilePath = os.path.join(outputPath, gnuplotFileName)
with open(gnuPlotFilePath, 'w') as gnuPlotFile:
_writePlotHeaderData(
gnuplotFile = gnuPlotFile,
outputFileName = gnuplotFileName.replace('.txt', '.pdf'),
config = config,
additionalCommands = config.get('plotAdditionalGnuplotHeaderCommands') )
plot_commands = []
#plot_commands.append(f'"{dataFileName}" using ($0):2:3:4:xticlabels(1) with boxes linecolor variable')
#plot_commands.append(f'"{dataFileName}" using ($0):2:5:xticlabels(1) with boxes linecolor variable, "" using ($0):2:3:4 with yerrorbars linecolor 0')
plot_commands.append(f'"{dataFileName}" using ($0):2:5:xticlabels(1) with boxes linecolor variable, "" using ($0):2:3:4 with yerrorbars linecolor 0')
print("plot " + ",\\\n".join(plot_commands), file=gnuPlotFile)
try:
subprocess.run(args=['gnuplot', gnuplotFileName], cwd=outputPath)
except Exception as e:
print('Warning: Failed to run "gnuplot", run it manually.', file=sys.stderr)
print(e, file=sys.stderr)
| StarcoderdataPython |
9667008 | #coding=utf8
from setuptools import setup
try:
long_description = open('README.md', encoding='utf8').read()
except Exception as e:
print(e)
long_description = ''
setup(
name='myrsa',
version='0.0.1',
description='Simple use of RSA for asymmetric encryption and signature | 简单使用 rsa 进行非对称加密和签名',
long_description=long_description,
long_description_content_type="text/markdown",
author='tao.py',
author_email='<EMAIL>',
maintainer='tao.py',
maintainer_email='<EMAIL>',
install_requires=['rsa'],
license='MIT License',
py_modules=['myrsa'],
platforms=["all"],
url='https://github.com/taojy123/myrsa',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries'
],
)
| StarcoderdataPython |
6698653 | # Exceptions
class CheckerException(Exception):
def __str__(self):
return 'Base checker Exception'
class WrongColorError(CheckerException):
def __str__(self):
return 'Wrong color - only black or white for choose'
def __repr__(self):
return 'Wrong color'
class PositionError(CheckerException):
def __str__(self):
return 'Unreal position'
def __repr__(self):
return 'Unreal position'
class WrongMoveError(CheckerException):
"""
move == 'from' or 'to'
"""
def __init__(self, move):
self.move = move
def __str__(self):
return 'Unreal "{}" move'.format(self.move)
def __repr__(self):
return 'Unreal "{}" move'.format(self.move)
class GameInternalError(CheckerException):
def __str__(self):
return 'Wrong game playing'
def __repr__(self):
return 'Wrong game playing'
| StarcoderdataPython |
1954720 | <filename>math_question/math_so.py<gh_stars>1-10
import numpy as np
import pandas as pd
import tensorflow as tf
import ops
tf.set_random_seed(0)
np.random.seed(0)
np.set_printoptions(precision=5, linewidth=120, suppress=True)
mat_val = np.array([[1 / (i + j + 1) for i in range(10)] for j in range(10)])
rhs_val = 0.05 * np.ones([10, 1])
mat = tf.constant(value=mat_val, dtype=tf.float32)
rhs = tf.constant(value=rhs_val, dtype=tf.float32)
x = tf.Variable(initial_value=tf.zeros_like(rhs), dtype=tf.float32)
loss = tf.reduce_sum(tf.square(ops.matmul(mat, x) - rhs))
train_op = tf.train.GradientDescentOptimizer(1e-2).minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
df = pd.DataFrame(columns=["step", "loss"])
for i in range(100):
if i % 5 == 0:
loss_val = sess.run(loss)
df.loc[i] = [str(i), str(loss_val)]
print("step:{}\tloss:{}".format(i, loss_val))
sess.run(train_op)
df.to_csv("./logs/csv/math_so.csv") | StarcoderdataPython |
3401646 | <reponame>yupeekiyay/ofta365
from django.urls import path
from . import views
app_name = 'events'
urlpatterns = [
path('<slug>/', views.EventDetailView.as_view(), name='event-detail'),
path('<slug>/update/', views.EventUpdateView.as_view(), name='event-update'),
path('<slug>/delete/', views.EventDeleteView.as_view(), name='event-delete'),
] | StarcoderdataPython |
3591422 | '''
A quick and dirty skeleton for prototyping GLSL shaders. It consists of a
self contained slice-based volume renderer.
'''
import numpy, sys, wx
from OpenGL.GL import *
from OpenGL.GLU import *
from numpy import array
from transfer_function import TransferFunctionWidget
from wx.glcanvas import GLCanvas
# The skeleton
def box_side(w=1.0, z=0.0):
return [[0.0, 0.0, z], [w, 0.0, z], [w, 0.0, z], [w, w, z],
[w, w, z], [0.0, w, z], [0.0, w, z], [0.0, 0.0, z]]
def gen_plane(t, p=0.0, w=1.0):
''' Creates front facing planes '''
try:
return { 'yz': [(p, 0, 0), (p, w, 0), (p, w, w), (p, 0, w)],
'xz': [(0, p, w), (w, p, w), (w, p, 0), (0, p, 0)],
'xy': [(0, 0, p), (w, 0, p), (w, w, p), (0, w, p)]}[t]
except KeyError:
raise Exception, 'What kind of planes do you want?'
box = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0],
[1.0, 0.0, 0.0], [1.0, 0.0, 1.0],
[1.0, 1.0, 0.0], [1.0, 1.0, 1.0],
[0.0, 1.0, 0.0], [0.0, 1.0, 1.0]]
box.extend(box_side())
box.extend(box_side(z=1.0))
plane_count = 1000
def compile_program(vertex_src, fragment_src):
'''
Compile a Shader program given the vertex
and fragment sources
'''
program = glCreateProgram()
shaders = []
for shader_type, src in ((GL_VERTEX_SHADER, vertex_src),
(GL_FRAGMENT_SHADER, fragment_src)):
shader = glCreateShader(shader_type)
glShaderSource(shader, src)
glCompileShader(shader)
shaders.append(shader)
status = glGetShaderiv(shader, GL_COMPILE_STATUS)
if not status:
if glGetShaderiv(shader, GL_INFO_LOG_LENGTH) > 0:
log = glGetShaderInfoLog(shader)
print >> sys.stderr, log.value
glDeleteShader(shader)
raise ValueError, 'Shader compilation failed'
glAttachShader(program, shader)
glLinkProgram(program)
for shader in shaders:
glDeleteShader(shader)
return program
class TransferGraph(wx.Dialog):
def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE):
wx.Dialog.__init__(self, parent, id, title, pos, size, style)
self.mainPanel = wx.Panel(self, -1)
# Create some CustomCheckBoxes
self.t_function = TransferFunctionWidget(self.mainPanel, -1, "", size=wx.Size(300, 150))
# Layout the items with sizers
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.mainPanel, 1, wx.EXPAND)
self.SetSizer(mainSizer)
mainSizer.Layout()
class VolumeRenderSkeleton(GLCanvas):
def __init__(self, parent):
GLCanvas.__init__(self, parent, -1, attribList=[wx.glcanvas.WX_GL_DOUBLEBUFFER])
self.t_graph = TransferGraph(self)
wx.EVT_PAINT(self, self.OnDraw)
wx.EVT_SIZE(self, self.OnSize)
wx.EVT_MOTION(self, self.OnMouseMotion)
wx.EVT_LEFT_DOWN(self, self.OnMouseLeftDown)
wx.EVT_LEFT_UP(self, self.OnMouseLeftUp)
wx.EVT_ERASE_BACKGROUND(self, lambda e: None)
wx.EVT_CLOSE(self, self.OnClose)
wx.EVT_CHAR(self, self.OnKeyDown)
self.SetFocus()
# So we know when new values are added / changed on the tgraph
self.t_graph.Connect(-1, -1, wx.wxEVT_COMMAND_SLIDER_UPDATED, self.OnTGraphUpdate)
self.init = False
self.rotation_y = 0.0
self.rotation_x = 0.0
self.prev_y = 0
self.prev_x = 0
self.mouse_down = False
self.width = 400
self.height = 400
self.fragment_shader_src = '''
uniform sampler1D TransferFunction;
uniform sampler3D VolumeData;
void main(void)
{
gl_FragColor = vec4(1.0, 0.0, 0.0, 0.0);
}
'''
self.vertex_shader_src = '''
void main(void)
{
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
'''
self.fragment_src_file = 'earth.f.c'
self.vertex_src_file = 'earth.v.c'
self.lighting = False
self.light_count = 1
# List of textures that need to be freed
self.texture_list = []
# List of VBOs that need to be freed
self.buffers_list = []
# This is the transfer graph
self.t_graph.Show()
def OnTGraphUpdate(self, event):
self.UpdateTransferFunction()
self.Refresh()
def OnDraw(self, event):
self.SetCurrent()
if not self.init:
self.InitGL()
self.init = True
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslate(0.0, 0.0, -2.0)
glRotate(self.rotation_y, 0.0, 1.0, 0.0)
glRotate(self.rotation_x, 1.0, 0.0, 0.0)
glTranslate(-0.5, -0.5, -0.5)
glEnable(GL_BLEND)
glEnable(GL_POLYGON_SMOOTH)
# Draw the box
glUseProgram(0)
glColor(0.0, 1.0, 0.0)
glDisable(GL_LIGHTING)
glVertexPointerf(box)
glDrawArrays(GL_LINES, 0, len(box))
# Draw the slice planes
glUseProgram(self.program)
self.SetupUniforms()
# Choose the correct set of planes
if self.rotation_y < 45.0 or self.rotation_y >= 315.0:
vertex_vbo = self.planes_vbo['xy'][0]
elif self.rotation_y >= 45.0 and self.rotation_y < 135.0:
vertex_vbo = self.planes_vbo['yz'][1]
elif self.rotation_y >= 135.0 and self.rotation_y < 225.0:
vertex_vbo = self.planes_vbo['xy'][1]
elif self.rotation_y >= 225.0 and self.rotation_y < 315.0:
vertex_vbo = self.planes_vbo['yz'][0]
# Render the planes using VBOs
glBindBuffer(GL_ARRAY_BUFFER, vertex_vbo)
glVertexPointer(3, GL_FLOAT, 0, None)
glDrawArrays(GL_QUADS, 0, 4*plane_count)
glBindBuffer(GL_ARRAY_BUFFER, 0)
self.SwapBuffers()
return
def InitGL(self):
# Load the Shader sources from the files
self.LoadShaderSources()
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glEnableClientState(GL_VERTEX_ARRAY)
glDepthFunc(GL_LESS)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glShadeModel(GL_SMOOTH)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, self.width/float(self.height), 0.1, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
self.SetupLighting()
self.LoadVolumeData()
self.LoadTransferFunction((self.t_graph.t_function.get_map() / array([255.0, 255.0, 255.0, 1.0])).flatten())
self.program = compile_program(self.vertex_shader_src, self.fragment_shader_src)
self.BuildGeometry()
def SetupLighting(self):
'''
Initialize default lighting
'''
glLight(GL_LIGHT0, GL_AMBIENT, (1.0, 1.0, 1.0))
glLight(GL_LIGHT0, GL_DIFFUSE, (1.0, 1.0, 1.0))
glLight(GL_LIGHT0, GL_SPECULAR, (1.0, 1.0, 1.0))
glLight(GL_LIGHT0, GL_POSITION, (-1.0, -1.0, -1.0))
glEnable(GL_LIGHT0)
def SetupUniforms(self):
# Init the texture units
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_1D, self.transfer_function)
glUniform1i(glGetUniformLocation(self.program, "TransferFunction"), 0)
glUniform1i(glGetUniformLocation(self.program, "EnableLighting"),
self.lighting)
glUniform1i(glGetUniformLocation(self.program, "NumberOfLights"),
self.light_count)
def BuildGeometry(self):
self.planes_vbo = { 'xy':None, 'xz':None, 'yz':None }
increment = 1.0 / (plane_count)
for k in self.planes_vbo.keys():
fwd = [gen_plane(p=(i*increment), t=k) for i in range(plane_count + 1)]
rev = []
rev.extend(fwd)
rev.reverse()
data = (array(fwd, dtype=numpy.float32).flatten(),
array(rev, dtype=numpy.float32).flatten())
self.planes_vbo[k] = []
for i in range(2):
self.planes_vbo[k].append(glGenBuffers(1))
glBindBuffer(GL_ARRAY_BUFFER, self.planes_vbo[k][i])
glBufferData(GL_ARRAY_BUFFER, data[i], GL_STATIC_DRAW_ARB)
def LoadTransferFunction(self, data):
# Create Texture
self.transfer_function = glGenTextures(1)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glBindTexture(GL_TEXTURE_1D, self.transfer_function)
glTexParameterf(GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGBA, 256, 0, GL_RGBA, GL_FLOAT, data)
return
def UpdateTransferFunction(self):
data = (self.t_graph.t_function.get_map() / array([255.0, 255.0, 255.0, 1.0])).flatten()
glBindTexture(GL_TEXTURE_1D, self.transfer_function)
glTexSubImage1D(GL_TEXTURE_1D, 0, 0, 256, GL_RGBA, GL_FLOAT, data)
def LoadVolumeData(self):
pass
def LoadShaderSources(self):
try:
self.fragment_shader_src = open(self.fragment_src_file).read()
except IOError, e:
print 'Fragment source not found, using default'
try:
self.vertex_shader_src = open(self.vertex_src_file).read()
except IOError, e:
print 'Vertex source not found, using default'
def OnSize(self, event):
try:
self.width, self.height = event.GetSize()
except:
self.width = event.GetSize().width
self.height = event.GetSize().height
self.Refresh()
self.Update()
def OnMouseMotion(self, event):
x = event.GetX()
y = event.GetY()
if self.mouse_down:
self.rotation_y += (x - self.prev_x)/2.0
self.rotation_y %= 360.0
# self.rotation_x -= ((y - self.prev_y)/2.0
# self.rotation_x %= 360.0
self.prev_x = x
self.prev_y = y
self.Refresh()
self.Update()
def OnMouseLeftDown(self, event):
self.mouse_down = True
self.prev_x = event.GetX()
self.prev_y = event.GetY()
def OnMouseLeftUp(self, event):
self.mouse_down = False
def OnKeyDown(self, event):
if event.GetKeyCode() == ord('r'):
try:
print 'Compiling shaders...',
self.LoadShaderSources()
program = compile_program(self.vertex_shader_src,
self.fragment_shader_src)
self.program = program
self.Refresh()
except Exception, e:
print 'FAILED'
print e
print 'Done'
elif event.GetKeyCode() == ord('l'):
self.lighting = not self.lighting
print 'Lighting', self.lighting
self.Refresh()
def OnClose(self):
for t in self.texture_list:
glDeleteTextures(t)
for b in self.buffers_list:
glDeleteBuffers(1, b)
if __name__ == '__main__':
app = wx.App()
frame = wx.Frame(None, -1, 'Volume Rendering Skeleton', wx.DefaultPosition, wx.Size(600, 600))
canvas = VolumeRenderSkeleton(frame)
frame.Show()
app.MainLoop()
| StarcoderdataPython |
6483850 | #!/usr/bin/env python
from setuptools import setup
setup(name='fasp',
version='1.0',
packages=['fasp',
'fasp.search'],
) | StarcoderdataPython |
360738 | import os
from JumpScale import j
import re
# requires sshfs package
class SshFS(object):
server = None
directory = None
share = None
filename = None
end_type = None
username = None
password = None
mntpoint = None
_command = 'sshfs'
def __init__(self,end_type,server,directory,username,password,is_dir,recursive,tempdir=j.dirs.tmpDir, Atype='copy'):
"""
Initialize connection
"""
self.is_dir = is_dir
self.recursive = recursive
self.end_type = end_type
self.server = server
self.share = directory
self.tempdir=tempdir
self.Atype = Atype
self.curdir = os.path.realpath(os.curdir)
ldirectory = directory
while ldirectory.startswith('/'):
ldirectory = ldirectory.lstrip('/')
while ldirectory.endswith('/'):
ldirectory = ldirectory.rstrip('/')
self.path_components = ldirectory.split('/')
if not self.is_dir:
self.filename = j.system.fs.getBaseName(directory)
self.directory = os.path.dirname(self.share)
else:
self.directory = self.share
self.username = re.escape(username)
self.password = re.escape(password)
self.mntpoint = '/'.join(['/mnt',j.base.idgenerator.generateGUID()])
self.is_mounted = False
def _connect(self):
j.system.fs.createDir(self.mntpoint)
j.logger.log("SshFS: mounting share [%s] from server [%s] with credentials login [%s] and password [%s]" % (self.directory,self.server,self.username,self.password))
command = "echo \"%s\" | %s %s@%s:%s %s -o password_stdin -o StrictHostKeyChecking=no" % (self.password,self._command,self.username,self.server,self.directory,self.mntpoint)
j.logger.log("SshFS: executing command [%s]" % command)
exitCode, output = j.system.process.execute(command,dieOnNonZeroExitCode=False, outputToStdout=False)
if not exitCode == 0:
raise RuntimeError('Failed to execute command %s'%command)
else:
self.is_mounted = True
def exists(self):
"""
Checks file or directory existance
"""
self._connect()
if self.is_dir:
path = self.mntpoint
else:
path = j.system.fs.joinPaths(self.mntpoint, self.filename)
return j.system.fs.exists(path)
def upload(self,uploadPath):
"""
Store file
"""
self. _connect()
if self.Atype == "move":
if self.is_dir:
if self.recursive:
j.system.fs.moveDir(uploadPath,self.mntpoint)
else:
# walk tree and move
for file in j.system.fs.walk(uploadPath, recurse=0):
j.logger.log("SshFS: uploading directory - Copying file [%s] to path [%s]" % (file,self.mntpoint))
j.system.fs.moveFile(file,self.mntpoint)
else:
j.logger.log("SshFS: uploading file - [%s] to [%s]" % (uploadPath,self.mntpoint))
j.system.fs.moveFile(uploadPath,j.system.fs.joinPaths(self.mntpoint,self.filename))
else:
if self.Atype == "copy":
if self.is_dir:
if self.recursive:
j.system.fs.copyDirTree(uploadPath,self.mntpoint, update=True)
else:
# walk tree and copy
for file in j.system.fs.walk(uploadPath, recurse=0):
j.logger.log("SshFS: uploading directory - Copying file [%s] to path [%s]" % (file,self.mntpoint))
j.system.fs.copyFile(file,self.mntpoint)
else:
j.logger.log("SshFS: uploading file - [%s] to [%s]" % (uploadPath,self.mntpoint))
j.system.fs.copyFile(uploadPath,j.system.fs.joinPaths(self.mntpoint,self.filename))
def download(self):
"""
Download file
"""
self. _connect()
if self.is_dir:
j.logger.log("SshFS: downloading from [%s]" % self.mntpoint)
return self.mntpoint
else:
pathname = j.system.fs.joinPaths(self.mntpoint,self.filename)
j.logger.log("SshFS: downloading from [%s]" % pathname)
return pathname
def cleanup(self):
"""
Umount sshfs share
"""
j.logger.log("SshFS: cleaning up and umounting share")
command = "umount %s" % self.mntpoint
exitCode, output = j.system.process.execute(command,dieOnNonZeroExitCode=False, outputToStdout=False)
if not exitCode == 0:
raise RuntimeError('Failed to execute command %s'%command)
j.system.fs.removeDir(self.mntpoint)
self.is_mounted = False
def list(self):
"""
List content of directory
"""
self._connect()
os.chdir(self.mntpoint)
if self.path_components:
if len(self.path_components) > 1:
os.chdir('/' + '/'.join(self.path_components[:-1]))
if os.path.isdir(self.path_components[-1]):
os.chdir(self.path_components[-1])
else:
raise RuntimeError('%s is not a valid directory under %s' %('/'.join(self.path_components),self.sharename))
if os.path.isdir(self.path_components[0]):
os.chdir(self.path_components[0])
flist = j.system.fs.walk(os.curdir,return_folders=1,return_files=1)
os.chdir(self.curdir)
j.logger.log("list: Returning content of SSH Mount [%s] which is tmp mounted under [%s]" % (self.share , self.mntpoint))
return flist
def __del__(self):
if self.is_mounted:
j.logger.log('SshFS GC')
self.cleanup()
os.chdir(self.curdir)
| StarcoderdataPython |
4882340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `vimanga` package."""
import pytest
from vimanga.api.core import call_api, get_chapters, get_images
from vimanga.api.types import Manga, Chapters
@pytest.fixture
def manga():
"""Manga Sample"""
return Manga(21937, 'type', 'score', 'name', 'synopsis', 'genders')
@pytest.fixture
def chapters(manga):
"""Chapters of manga"""
return next(get_chapters(manga))
def test_api_works():
"""Test not change the api"""
resp = call_api()
assert resp.ok, 'Problem with page message={}'.format(resp.text)
def test_get_sample_manga(manga, chapters):
"""Test with a sample manga"""
total = chapters.total
_chapters = chapters.data
message = 'Problem with manga id {} {} not is {}'
assert total == 6, message.format(manga.id, 'total', 6)
assert _chapters, 'Not fetch chapters'
def test_get_sample_images(chapters: Chapters):
"""Test with a sample cap"""
chapter = chapters.data[0]
resp = list(get_images(chapter))
assert isinstance(resp, list)
assert len(resp) > 6
| StarcoderdataPython |
122587 | <reponame>gradut/cardboard
# Generated by Django 4.0.1 on 2022-01-07 02:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("hunts", "0006_start_end_times"),
]
operations = [
migrations.CreateModel(
name="HuntSettings",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"google_drive_folder_id",
models.CharField(blank=True, max_length=128),
),
(
"google_sheets_template_file_id",
models.CharField(blank=True, max_length=128),
),
("google_drive_human_url", models.URLField(blank=True)),
("discord_guild_id", models.CharField(blank=True, max_length=128)),
(
"discord_puzzle_announcements_channel_id",
models.CharField(blank=True, max_length=128),
),
(
"discord_text_category",
models.CharField(
blank=True, default="text [puzzles]", max_length=128
),
),
(
"discord_voice_category",
models.CharField(
blank=True, default="voice [puzzles]", max_length=128
),
),
(
"discord_archive_category",
models.CharField(blank=True, default="archive", max_length=128),
),
(
"discord_devs_role",
models.CharField(blank=True, default="dev", max_length=128),
),
(
"hunt",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="settings",
to="hunts.hunt",
),
),
],
),
]
| StarcoderdataPython |
269807 | sequence = input("Enter your sequence: ")
sequence_list = sequence[1:len(sequence)-1].split(", ")
subset_sum = False
for element in sequence_list:
for other in sequence_list:
if int(element) + int(other) == 0:
subset_sum = True
print(str(subset_sum))
| StarcoderdataPython |
1620306 | <filename>src/utilsmodule/test.py
'''
Get all football match on direct
'''
import requests
from bs4 import BeautifulSoup
from urlvalidator import validate_url, ValidationError
from src import WilliamHillURLs as whurls
import pandas as pd
import csv
import os
if __name__ == "__main__":
# --------------------------------------------------------------------------------------------------------------
# myVariable = whurls.WilliamHillURLs()
# # 0 - List all matches availables
# ListURLs = myVariable.GetAllUrlMatches(urlSport=myVariable.URL_FootballOnDirect)
# # for i in myVariable.GetAllUrlMatches(urlSport=myVariable.URL_FootballOnDirect):
# # print(i)
# # 1 - Chose the first URL for example.
# URL = ListURLs[1]
# # 2 - Get web page and extract it.
# req = requests.get(URL)
# soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "html.parser")
# # 3 - Extract bet and the tittle.
# aux = soup.findAll("button", {"class": ['btn betbutton oddsbutton']})
# print(URL.encode())
# # 4 - Print all bets and
# for i in aux:
# print(myVariable.ConvertFractionalBetToDecimalBet(i.text), ' ', i['data-name'])
# --------------------------------------------------------------------------------------------------------------
import sys
import time
from datetime import datetime
from requests_html import HTMLSession
count = 0
while( count < 1):
myVariable = whurls.WilliamHillURLs()
URL = myVariable.GetAllUrlMatches(myVariable.URL_FootballOnDirect)[0]
req = requests.get(URL)
soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "html.parser")
print('Count: ', count, ' - URL: ', URL)
aux = soup.find('h2', {'class' : ['css-qbolbz']})
allBets = soup.findAll("button", {"class": ['btn betbutton oddsbutton']})
#result = soup.find_all(class_='tableCellMiddle mainScoreBox')
result = soup.findAll('span', attrs={"class":"betbutton__odds"})
print(result)
# create an HTML Session object
session = HTMLSession()
# Use the object above to connect to needed webpage
resp = session.get(URL)
# Run JavaScript code on webpage
resp.html.render()
print(resp.html.find('span'))
'''
# for i in allBets:
# print(myVariable.ConvertFractionalBetToDecimalBet(i.text), ' ', i['data-name'])
# print(aux)
# print(len(aux))
# print(aux.text)
CSVFileName = aux.text.replace(' ','') + '.csv'
pathCSVFile = '.././data/' + CSVFileName
print(pathCSVFile)
if not os.path.exists(pathCSVFile):
if not os.path.exists('.././data/'):
os.mkdir('.././data/')
with open(pathCSVFile, 'a', newline='\n') as f:
writer = csv.writer(f)
f.close()
df = pd.read_csv(pathCSVFile, header=0, names=['TimeStamp', 'LocalVictory', 'Draw', 'VisitanVictory','Result'])
df.to_csv(pathCSVFile)
now = datetime.now()
fields=[now.strftime("%d/%m/%Y %H:%M:%S"), myVariable.ConvertFractionalBetToDecimalBet(allBets[0].text),
myVariable.ConvertFractionalBetToDecimalBet(allBets[1].text),
myVariable.ConvertFractionalBetToDecimalBet(allBets[2].text),
result.text]
with open(pathCSVFile, 'a', newline='\n') as f:
writer = csv.writer(f)
writer.writerow(fields)
f.close()
'''
time.sleep(5) # Sleep 5 seconds
count += 1
| StarcoderdataPython |
6685298 | import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
from typing import Tuple
class NNet(nn.Module):
def __init__(self):
super(NNet, self).__init__()
self.layer1 = nn.Linear(2+1,5)
self.layer2 = nn.Linear(5,1)
pass
def forward(self, x: Tuple):
x = torch.cat(x, dim=1)
x = self.layer1(x)
x = F.relu(x)
x = self.layer2(x)
return F.softmax(x)
pass
def build_data(dat):
'''
Data must be list of tensors of floats.
Example: [(1., [2., 3.]), (4., [5., 6.])]
'''
return tuple(torch.tensor(t) for t in data)
if __name__ == '__main__':
data = (
[
[2.],
[3]],
[
[3., 4],
[5, 6]]
)
x = build_data(data)
nnet = NNet()
y = nnet(x)
print(y)
| StarcoderdataPython |
385891 | #!/usr/bin/env python
import sys
import os
from subprocess import *
if len(sys.argv) <= 1:
print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
raise SystemExit
# svm, grid, and gnuplot executable files
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "../svm-scale"
svmtrain_exe = "../svm-train-gpu"
svmpredict_exe = "../svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train-gpu.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe), "svm-scale executable not found"
assert os.path.exists(svmtrain_exe), "svm-train executable not found"
assert os.path.exists(svmpredict_exe), "svm-predict executable not found"
assert os.path.exists(gnuplot_exe), "gnuplot executable not found"
assert os.path.exists(grid_py), "grid.py not found"
train_pathname = sys.argv[1]
assert os.path.exists(train_pathname), "training file not found"
file_name = os.path.split(train_pathname)[1]
scaled_file = file_name + ".scale"
model_file = file_name + ".model"
range_file = file_name + ".range"
if len(sys.argv) > 2:
test_pathname = sys.argv[2]
file_name = os.path.split(test_pathname)[1]
assert os.path.exists(test_pathname), "testing file not found"
scaled_test_file = file_name + ".scale"
predict_test_file = file_name + ".predict"
cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell=True, stdout=PIPE).communicate()
cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
print('Cross validation...')
f = Popen(cmd, shell=True, stdout=PIPE).stdout
line = ''
while True:
last_line = line
line = f.readline()
if not line:
break
else:
print line
c, g, rate = map(float, last_line.split())
print('Best c={0}, g={1} CV rate={2}'.format(c, g, rate))
cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe, c, g, scaled_file, model_file)
print('Training...')
Popen(cmd, shell=True, stdout=PIPE).communicate()
print('Output model: {0}'.format(model_file))
if len(sys.argv) > 2:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell=True, stdout=PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
Popen(cmd, shell=True).communicate()
print('Output prediction: {0}'.format(predict_test_file))
| StarcoderdataPython |
173373 | <reponame>gour/holidata
[
{
'date': '2020-01-01',
'description': 'Nouvel An',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2020-04-12',
'description': 'Pâques',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2020-04-13',
'description': 'Lundi de Pâques',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2020-05-01',
'description': 'Fête du Travail',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2020-05-21',
'description': 'Ascension',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2020-05-31',
'description': 'Pentecôte',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2020-06-01',
'description': 'Lundi de Pentecôte',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2020-07-21',
'description': 'Fête nationale',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2020-08-15',
'description': 'Assomption',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2020-11-01',
'description': 'Toussaint',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2020-11-11',
'description': "Jour de l'armistice",
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2020-12-25',
'description': 'Noël',
'locale': 'fr-BE',
'notes': '',
'region': '',
'type': 'NRF'
}
] | StarcoderdataPython |
325421 | <filename>contrib/0.挖宝行动/youzidata-机坪跑道航空器识别/src/utils/label_converter.py
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from xml.dom import minidom
import random
import cv2
import os
def generateXml(xml_path, boxes, w, h, d):
impl = minidom.getDOMImplementation()
doc = impl.createDocument(None, None, None)
rootElement = doc.createElement('annotation')
sizeElement = doc.createElement("size")
width = doc.createElement("width")
width.appendChild(doc.createTextNode(str(w)))
sizeElement.appendChild(width)
height = doc.createElement("height")
height.appendChild(doc.createTextNode(str(h)))
sizeElement.appendChild(height)
depth = doc.createElement("depth")
depth.appendChild(doc.createTextNode(str(d)))
sizeElement.appendChild(depth)
rootElement.appendChild(sizeElement)
for item in boxes:
objElement = doc.createElement('object')
nameElement = doc.createElement("name")
nameElement.appendChild(doc.createTextNode(str(item[0])))
objElement.appendChild(nameElement)
difficultElement = doc.createElement("difficult")
difficultElement.appendChild(doc.createTextNode(str(0)))
objElement.appendChild(difficultElement)
bndElement = doc.createElement('bndbox')
xmin = doc.createElement('xmin')
xmin.appendChild(doc.createTextNode(str(item[1])))
bndElement.appendChild(xmin)
ymin = doc.createElement('ymin')
ymin.appendChild(doc.createTextNode(str(item[2])))
bndElement.appendChild(ymin)
xmax = doc.createElement('xmax')
xmax.appendChild(doc.createTextNode(str(item[3])))
bndElement.appendChild(xmax)
ymax = doc.createElement('ymax')
ymax.appendChild(doc.createTextNode(str(item[4])))
bndElement.appendChild(ymax)
objElement.appendChild(bndElement)
rootElement.appendChild(objElement)
doc.appendChild(rootElement)
f = open(xml_path, 'w')
doc.writexml(f, addindent=' ', newl='\n')
f.close()
Index = 0
exp_path='./DeepLeague100K/origin_data/train'
def export(npz_file_name, exp_path):
global Index
np_obj = np.load(npz_file_name)
print (len(np_obj['images']))
for image, boxes in zip(np_obj['images'], np_obj['boxes']):
img = Image.fromarray(image)
img = np.array(img, dtype = np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
generateXml(exp_path + '/Annotations/' + str(Index) + '.xml', boxes, img.shape[0], img.shape[1], img.shape[2])
cv2.imwrite(exp_path + '/Images/' + str(Index) + '.jpg', img)
Index += 1
if __name__ == '__main__':
root_path = './DeepLeague100K/clusters_cleaned/train/'
npz_names = os.listdir(root_path)
for item in npz_names:
export(os.path.join(root_path, item), './DeepLeague100K/lol/train')
root_path = './DeepLeague100K/clusters_cleaned/val/'
npz_names = os.listdir(root_path)
for item in npz_names:
export(os.path.join(root_path, item), './DeepLeague100K/lol/eval')
| StarcoderdataPython |
9670081 | import torch
import torch.nn as nn
from ..advtrainer import AdvTrainer
class GradAlign(AdvTrainer):
r"""
GradAlign in 'Understanding and Improving Fast Adversarial Training'
[https://arxiv.org/abs/2007.02617]
[https://github.com/tml-epfl/understanding-fast-adv-training]
Attributes:
self.model : model.
self.device : device where model is.
self.optimizer : optimizer.
self.scheduler : scheduler (Automatically updated).
self.max_epoch : total number of epochs.
self.max_iter : total number of iterations.
self.epoch : current epoch starts from 1 (Automatically updated).
self.iter : current iters starts from 1 (Automatically updated).
* e.g., is_last_batch = (self.iter == self.max_iter)
self.record_keys : names of items returned by do_iter.
Arguments:
model (nn.Module): model to train.
eps (float): strength of the attack or maximum perturbation.
alpha (float): alpha in the paper.
grad_align_cos_lambda (float): parameter for the regularization term.
"""
def __init__(self, model, eps, alpha, grad_align_cos_lambda):
super().__init__("GradAlign", model)
self.record_keys = ["Loss", "CALoss", "GALoss"] # Must be same as the items returned by self._do_iter
self.eps = eps
self.alpha = alpha
self.grad_align_cos_lambda = grad_align_cos_lambda
def _do_iter(self, train_data):
r"""
Overridden.
"""
images, labels = train_data
X = images.to(self.device)
Y = labels.to(self.device)
# Calculate loss_gradalign
X_new = torch.cat([X.clone(), X.clone()], dim=0)
Y_new = torch.cat([Y.clone(), Y.clone()], dim=0)
delta1 = torch.empty_like(X).uniform_(-self.eps, self.eps)
delta2 = torch.empty_like(X).uniform_(-self.eps, self.eps)
delta1.requires_grad = True
delta2.requires_grad = True
X_new[:len(X)] += delta1
X_new[len(X):] += delta2
X_new = torch.clamp(X_new, 0, 1)
logits_new = self.model(X_new)
loss_gn = nn.CrossEntropyLoss()(logits_new, Y_new)
grad1, grad2 = torch.autograd.grad(loss_gn, [delta1, delta2],
retain_graph=False, create_graph=False)
X_adv = X_new[:len(X)] + self.alpha*grad1.sign()
delta = torch.clamp(X_adv - X, min=-self.eps, max=self.eps).detach()
X_adv = torch.clamp(X + delta, min=0, max=1).detach()
grads_nnz_idx = ((grad1**2).sum([1, 2, 3])**0.5 != 0) * ((grad2**2).sum([1, 2, 3])**0.5 != 0)
grad1, grad2 = grad1[grads_nnz_idx], grad2[grads_nnz_idx]
grad1_norms = self._l2_norm_batch(grad1)
grad2_norms = self._l2_norm_batch(grad2)
grad1_normalized = grad1 / grad1_norms[:, None, None, None]
grad2_normalized = grad2 / grad2_norms[:, None, None, None]
cos = torch.sum(grad1_normalized * grad2_normalized, (1, 2, 3))
loss_gradalign = torch.tensor([0]).to(self.device)
if len(cos) > 0:
cos_aggr = cos.mean()
loss_gradalign = (1.0 - cos_aggr)
# Calculate loss_ce
logits_adv = self.model(X_adv)
loss_ce_adv = nn.CrossEntropyLoss()(logits_adv, Y)
cost = loss_ce_adv + self.grad_align_cos_lambda * loss_gradalign
self.optimizer.zero_grad()
cost.backward()
self.optimizer.step()
return cost.item(), loss_ce_adv.item(), loss_gradalign.item()
def _l2_norm_batch(self, v):
norms = (v ** 2).sum([1, 2, 3]) ** 0.5
# norms[norms == 0] = np.inf
return norms
| StarcoderdataPython |
9600992 | import os
import sys
import pandas as pd
from yahoo_fin import stock_info as si
import csv
from datetime import datetime, date, time, timedelta, timezone
import alpaca_trade_api as tradeapi
import pytz;
from yahoo_fin import stock_info as si
import time;
import json
api = tradeapi.REST('XyZ','XYZ')
print ('Number of arguments:', len(sys.argv), 'arguments.')
print ('Argument List:', str(sys.argv))
screenerName='looseCANSLIM'
if(len(sys.argv)>1):
screenerName = sys.argv[1];
print("Using screenerName: "+screenerName);
maxStockPrice = 500.0;
account = api.get_account()
dir = 'screenerOutput'
print("Available Cash $"+account.cash);
def getfiles(dirpath):
a = [s for s in os.listdir(dirpath)
if os.path.isfile(os.path.join(dirpath, s))]
a.sort(key=lambda s: os.path.getmtime(os.path.join(dirpath, s)))
a.reverse()
return a
fileToUse = None;
for file in getfiles(dir):
if file.lower().startswith(screenerName.lower()) and file.lower().endswith(".csv") and int(file.lower().split('-')[1].replace('.csv',''))<120000:
fileToUse = file;
break;
frame = pd.read_csv(os.path.join(dir, fileToUse))
print("Ignoring these (More than $"+str(maxStockPrice)+":\n "+str(frame[frame.Price >= maxStockPrice][["Ticker","Price"]]))
frame = frame[frame.Price < maxStockPrice]
frame = frame.sort_values(by=['Price'], ascending=False)
positions = api.list_positions();
currentPositions = {};
for position in positions:
cost_basis = position.__getattr__('cost_basis');
market_value = position.__getattr__('market_value');
price = position.__getattr__('current_price');
symbol = position.__getattr__('symbol');
qty = position.__getattr__('qty');
relevantRow = frame.loc[frame['Ticker'] == symbol]
if relevantRow.empty:
print("Did not find "+symbol+" in screener. Selling for "+"{0:.3}".format(float(market_value)-float(cost_basis)) + " PL ");
(api.submit_order(
ticker,
int(qty),
'sell',
'market',
'day'
))
| StarcoderdataPython |
9695275 | <filename>DFS.py<gh_stars>0
'''
find biggest region of connected 1's in a grid of 1s and 0s
'''
def isSafe(grid, row, col, visited):
if (row < 0) or (row >= ROWS) or (col < 0) or (col >= COLS) \
or (grid[row][col] == 0) or (visited[row][col] == 1):
return False
else:
return True
def DFS(grid, row, col, count, visited):
global ROWS, COLS
neighbours = [[-1,0], [-1,-1], [-1,1], [0,1],[0,-1],[1,-1],[1,0],[1,1]]
visited[row][col] = 1
for dx, dy in neighbours:
if isSafe(grid, row+dx, col+dy, visited):
count[0] += 1
DFS(grid, row+dx, col+dy, count, visited)
def largestRegion(grid):
global ROWS, COLS
result = 0
visited = [[0] * COLS for i in range(ROWS)]
for i in range(ROWS):
for j in range(COLS):
if grid[i][j] == 1 and not visited[i][j]:
count = [1]
DFS(grid, i, j, count, visited)
result = max(result, count[0])
return result
grid = [[0, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]]
ROWS = len(grid)
COLS = len(grid[0])
print(largestRegion(grid)) | StarcoderdataPython |
3405352 | import pylint
import reward
def test_reward_straight_track_success():
""" Test to see if a straight track results in rewarding 2x """
params = {
'waypoints': [
[0, 1], [1, 2], [2, 3], [3, 4]
],
'closest_waypoints': [1, 2],
'steering_angle': 0
}
assert reward.reward_straight_track(params) == 2
def test_reward_straight_track_end():
""" Test to see if a straight track results in rewarding 2x """
params = {
'waypoints': [
[0, 1], [1, 2], [2, 3], [3, 4]
],
'closest_waypoints': [2, 3],
'steering_angle': 0
}
assert reward.reward_straight_track(params) == 1
def test_reward_straight_track_oversteer():
""" Test to see if a straight track results in rewarding 2x """
params = {
'waypoints': [
[0, 1], [1, 2], [2, 3], [3, 4]
],
'closest_waypoints': [1, 2],
'steering_angle': 15
}
assert reward.reward_straight_track(params) == 0.8
def test_reward_straight_track_fail():
""" Test to see if a non-straight track results in rewarding 1x """
params = {
'waypoints': [
[0, 1], [0, 2], [0, 3], [0, 15]
],
'closest_waypoints': [1, 2],
'steering_angle': 0
}
assert reward.reward_straight_track(params) == 1
| StarcoderdataPython |
11391071 | <reponame>jkleczar/ttslabdev
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
import os
import multiprocessing
from glob import glob
import subprocess
def extract_lf0(parms):
cmds = "python scripts/wav2lf0_fixocterrs.py %(infn)s %(outfn)s %(lowerf0)s %(upperf0)s"
subprocess.call(cmds % parms, shell=True)
if __name__ == "__main__":
try:
import multiprocessing
POOL = multiprocessing.Pool(processes=multiprocessing.cpu_count())
def map(f, i):
return POOL.map(f, i, chunksize=1)
except ImportError:
pass
argnames = ["lowerf0", "upperf0"]
assert len(argnames) == len(sys.argv[1:])
args = dict(zip(argnames, sys.argv[1:]))
#make parms:
parms = []
for fn in glob(os.path.join("wav", "*.wav")):
tempd = dict(args)
tempd["infn"] = fn
base = os.path.basename(fn).rstrip(".wav")
tempd["outfn"] = os.path.join("lf0", base + ".lf0")
parms.append(tempd)
#run:
map(extract_lf0, parms)
| StarcoderdataPython |
3249996 | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
return render_template('index.html')
@app.route("/contact")
def about():
return render_template('contact.html')
cars = [
{"car_id": "112093012309120310", "car_model": "Honda",
"car_speed": "200"},
{"car_id": "212309120310093012", "car_model": "BMW",
"car_speed": "180"},
{"car_id": "312012031093012309", "car_model": "Subaru",
"car_speed": "100"},
{"car_id": "419120310209301230", "car_model": "Nisan",
"car_speed": "200"},
{"car_id": "509301230129120310", "car_model": "Toyota",
"car_speed": "50"},
{"car_id": "613091203102093012", "car_model": "Tesla",
"car_speed": "210"},
{"car_id": "712093003101230912", "car_model": "Acura",
"car_speed": "20"},
{"car_id": "801230120939120310", "car_model": "Lexus",
"car_speed": "90"},
{"car_id": "912093091203100123", "car_model": "Ford",
"car_speed": "150"}
]
@app.route("/data")
def data():
return render_template("data.html", cars=cars)
@app.route("/gallery")
def gallery():
links = [
"https://user-images.githubusercontent.com/16161226/46444401-df6b0500-c73f-11e8-89cb-ee3080c5080f.jpg",
"https://user-images.githubusercontent.com/16161226/46444402-df6b0500-c73f-11e8-949a-e3e6675b11dd.jpg",
"https://user-images.githubusercontent.com/16161226/46444403-df6b0500-c73f-11e8-8cba-5d664fc627fa.jpg",
"https://user-images.githubusercontent.com/16161226/46444404-df6b0500-c73f-11e8-8c7f-44c8a8515d01.jpg"
]
return render_template("gallery.html", links=links)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| StarcoderdataPython |
6697426 | import numpy as np
import random
import sys
import time
from TicTacToeView import TicTacToeView
from TicTacToeModel import TicTacToeModel
from stubView import stubView
BLUE = (0, 0, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
needToWinGLOBAL = 0
class TicTacToe:
def __init__(self, needToWin, GRID_SIZE, player1=None, player2=None, stub = False):
global needToWinGLOBAL
needToWinGLOBAL = needToWin
self.NEED_TO_WIN = needToWin
self.Model = TicTacToeModel(needToWin, GRID_SIZE, player1, player2)
self.stub = stub
if(stub == False):
self.View = TicTacToeView(GRID_SIZE)
self.View.controller = self
else:
self.View = stubView()
self.GRID_SIZE = GRID_SIZE
if(player1 is not None):
player1.x = 1
if(player2 is not None):
player2.x = -1
def gameover(self, board, lastmove = None):
self.View.gameover()
return self.Model.gameover(board, lastmove)
def Win(self, board, x, lastmove = None):
return self.Model.win(board, x, lastmove)
def Status(self, board, lastmove = None, cnt = None):
if(self.Win(board, 1, lastmove) > 0):
return 1
if(self.Win(board, -1, lastmove) > 0):
return -1
if(cnt!=None):
if(cnt<len(board)**2):
return None
else:
return 0
if(self.gameover(board, lastmove) > 0):
return 0
return None
def printBoard(self, board):
for x in board:
for y in x:
print (y, end='')
print()
print()
def draw_board(self, board):
self.View.draw_board(board)
def finish(self, board, players):
if(self.Model.win(board, 1)):
self.View.headline(players[0].name + " Wins!!")
elif(self.Model.win(board, -1)):
self.View.headline(players[1].name + " Wins!")
else:
self.View.headline("Draw!!!")
def draw_turn(self, player):
self.View.draw_turn(player.name)
def run(self):
while(True):
Status = self.run2()
if(self.stub == True or Status == 1000 or self.View.isBackButtonClicked()):
break
wfc = self.View.WaitForAClick()
if(wfc == 1000):
break
return Status
def run2(self):
players = self.Model.players # [player1,player2]
turn = 1
Log = []
board = self.Model.create_board()
self.draw_board(board)
players[0].now = None
players[1].now = None
while(self.gameover(board) == False):
if(self.View.isBackButtonClicked()):
return 1000
self.draw_turn(players[(turn != 1)])
start = time.time()
next = players[(turn != 1)].make_a_move(board)
end = time.time()
print(end - start)
if(self.Model.tryMakingAMove(board, next, turn) == 0):
continue
move_now = [next//self.GRID_SIZE, next%self.GRID_SIZE]
print(move_now)
Log.append(move_now)
turn *= -1
self.draw_board(board)
self.finish(board, players)
#print(Log)
return self.Status(board)
class TicTacToeStatic:
@staticmethod
def available_moves(s):
m = []
length = len(s)
for i in range(length):
for j in range(length):
if(s[i,j] == 0):
m.append((i,j))
return m
@staticmethod
def Status(s, lastmove = None, cnt = None):
if(len(s) != 3):
TTT = TicTacToe(needToWinGLOBAL, len(s))
return TTT.Status(s,lastmove,cnt)
all = []
for x in s.tolist():
all.append(x)
for x in [list(i) for i in zip(*s)]:
all.append(x)
all.append([s[0, 0], s[1, 1], s[2, 2]])
all.append([s[2, 0], s[1, 1], s[0, 2]])
e = 0
if [1, 1, 1] in all:
e = 1
elif [-1, -1, -1] in all:
e = -1
else:
for i in range(3):
for j in range(3):
if(s[i, j] == 0):
e = None
return e
@staticmethod
def nearest(s,r,c):
mi = 999999999
length = len(s)
for i in range(length):
for j in range(length):
if(s[i][j]!=0):
mi = min(mi,abs(i-r)+abs(j-c))
if(mi == 999999999):
mi = 0
return mi
@staticmethod
def getNTW():
return needToWinGLOBAL
@staticmethod
def removecopies(s,m):
boards = []
copies = []
newboards = []
for i in range(len(m)):
move = m[i]
new_s = s.copy()
r = move[0]
c = move[1]
new_s[r][c] = 10
if(TicTacToeStatic.nearest(s,r,c)>2):
copies.append(i)
boards.append(new_s)
new_s=None
if(len(s)!=10):
for i in range(len(m)):
for j in range(len(m)):
if(i>=j or i in copies or j in copies):
continue
if(np.array_equal(boards[i],np.flipud(boards[j]))):
copies.append(j)
elif(np.array_equal(boards[i],np.fliplr(boards[j]))):
copies.append(j)
elif(np.array_equal(boards[i],np.rot90(boards[j]))):
copies.append(j)
elif(np.array_equal(boards[i],np.rot90(np.rot90(boards[j])))):
copies.append(j)
elif(np.array_equal(boards[i],np.rot90(np.rot90(np.rot90(boards[j]))))):
copies.append(j)
for i in range(len(m)):
if(i in copies):
continue
newboards.append(m[i])
return newboards | StarcoderdataPython |
3352661 | __MAJOR__ = 0
__MINOR__ = 2
__MICRO__ = 0
__VERSION__ = (__MAJOR__, __MINOR__, __MICRO__)
__version__ = '.'.join(str(n) for n in __VERSION__)
__github_url__ = 'http://github.com/JWKennington/apsjournals'
from apsjournals.journals import PRL, PRM, PRA, PRB, PRC, PRD, PRE, PRX, PRAB, PRApplied, PRFluids, PRMaterials, PRPER
from apsjournals.web.auth import authenticate
| StarcoderdataPython |
3533616 | <reponame>myepes2/MiSiCgui
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import get_file
import numpy as np
from skimage.transform import resize,rescale
from skimage.feature import shape_index
from skimage.util import random_noise
from skimage.io import imread,imsave
import matplotlib.pyplot as plt
from MiSiCgui.utils import *
#from utils_gui import *
class SegModel():
def __init__(self):
self.size = 256
self.scalesvals = [1,1.5,2.0]
#self.model = load_model(model_name)
model_path = get_file(
'MiSiDC04082020',
'https://github.com/pswapnesh/Models/raw/master/MiSiDC04082020.h5', cache_dir='./cache')
self.model = load_model(model_path,compile=False)
def preprocess(self,im):
n = len(self.scalesvals)
sh = np.zeros((im.shape[0],im.shape[1],n))
if np.max(im) ==0:
return sh
pw = 15
im = np.pad(im,pw,'reflect')
sh = np.zeros((im.shape[0],im.shape[1],n))
for i in range(n):
sh[:,:,i] = shape_index(im,self.scalesvals[i])
return sh[pw:-pw,pw:-pw,:]
def segment(self,im,invert = False):
im = normalize2max(im)
pw = 16
if invert:
im = 1.0-im
im = np.pad(im,pw,'reflect')
sh = self.preprocess(im)
tiles,params = extract_tiles(sh,size = self.size,exclude=12)
yp = self.model.predict(tiles)
return stitch_tiles(yp,params)[pw:-pw,pw:-pw,:] | StarcoderdataPython |
11212789 | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List command for gcloud debug logpoints command group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.data_catalog import search
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Search(base.Command):
"""Search Data Catalog for resources that match a query."""
detailed_help = {
'DESCRIPTION': """\
Search Data Catalog for resources that match a query.
""",
'EXAMPLES': """\
To search project 'my-project' for Data Catalog resources that
match the simple predicate 'foo':
$ {command} 'foo' --include-project-ids=my-project
To search organization '1234' for Data Catalog resources that
match entities whose names match the predicate 'foo':
$ {command} 'name:foo' --include-organization-ids=1234
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'query',
help="""\
Query string in search query syntax in Data Catalog. For more
information, see:
https://cloud.google.com/data-catalog/docs/how-to/search-reference
""")
parser.add_argument(
'--limit',
type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
require_coverage_in_tests=False,
category=base.LIST_COMMAND_FLAGS,
help="""\
Maximum number of resources to list. The default is *unlimited*.
""")
parser.add_argument(
'--page-size',
type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
require_coverage_in_tests=False,
category=base.LIST_COMMAND_FLAGS,
help="""\
Some services group resource list output into pages. This flag specifies
the maximum number of resources per page.
""")
parser.add_argument(
'--order-by',
require_coverage_in_tests=False,
category=base.LIST_COMMAND_FLAGS,
help="""\
Specifies the ordering of results. Defaults to 'relevance'.
Currently supported case-sensitive choices are:
* relevance
* last_access_timestamp [asc|desc]: defaults to descending.
* last_modified_timestamp [asc|desc]: defaults to descending.
To order by last modified timestamp ascending, specify:
`--order-by="last_modified_timestamp desc"`.
""")
scope_group = parser.add_argument_group(
'Scope. Control the scope of the search.',
required=True)
scope_group.add_argument(
'--include-gcp-public-datasets',
action='store_true',
help="""\
If True, include Google Cloud Platform public datasets in the search
results.
""")
scope_group.add_argument(
'--include-project-ids',
type=arg_parsers.ArgList(),
metavar='PROJECT',
help="""\
List of Cloud Project IDs to include in the search.
""")
scope_group.add_argument(
'--include-organization-ids',
type=arg_parsers.ArgList(),
metavar='ORGANIZATION',
help="""\
List of Cloud Organization IDs to include in the search.
""")
scope_group.add_argument(
'--restricted-locations',
type=arg_parsers.ArgList(),
metavar='LOCATION',
help="""\
List of locations to search within.
""")
def Run(self, args):
"""Run the search command."""
version_label = 'v1'
return search.Search(args, version_label)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class SearchBeta(Search):
__doc__ = Search.__doc__
def Run(self, args):
"""Run the search command."""
version_label = 'v1beta1'
return search.Search(args, version_label)
| StarcoderdataPython |
4981735 | import json
import unittest
from unittest.mock import MagicMock
from conjur_api.models import CreateTokenData
from conjur.errors import MissingRequiredParameterException
from conjur.logic.hostfactory_logic import HostFactoryLogic
from unittest.mock import patch
class HostfactoryLogicTest(unittest.TestCase):
def test_empty_token_data_raises_correct_error(self):
mock_client = None
mock_hostfactory_logic = HostFactoryLogic(mock_client)
with self.assertRaises(MissingRequiredParameterException):
mock_hostfactory_logic.create_token(create_token_data=None)
@patch('conjur_api.Client')
def test_hostfactory_logic_call_passes_object(self, client):
client.create_token.return_value = '{"name": "value"}'
mock_hostfactory_logic = HostFactoryLogic(client)
mock_create_token_data = CreateTokenData(host_factory="some_host_factory_id", days=1)
mock_hostfactory_logic.create_token(create_token_data=mock_create_token_data)
mock_hostfactory_logic.client.create_token.assert_called_once_with(mock_create_token_data)
| StarcoderdataPython |
6492404 | <gh_stars>0
#!/usr/bin/env python2
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
filename = sys.argv[1]
print filename
data = pd.read_csv(filename, sep='\t', index_col=False)
df = pd.DataFrame()
for mnode in data['mnode'].unique() :
sub = data[(data.mnode==mnode)]
pyscpu = sub['pyscpu']
#if not 'pyscpu' in df.columns :
# df['pyscpu'] = pyscpu
#print pyscpu
sub = sub.drop(['mnode', 'pyscpu'], 1)
cycl = sub.mean(axis=1)
cycl.index = pyscpu
df[str(mnode)] = pd.Series(cycl)
print df
diagram = df.plot(kind='bar', title="Memory lateny per core and numa domain")
diagram.set_ylabel("latency in CPU cycles")
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.show()
| StarcoderdataPython |
1634672 | <filename>arkane/encorr/bac.py<gh_stars>100-1000
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2021 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module provides classes for deriving and applying two types of
bond additivity corrections (BACs).
The first type, Petersson-type BACs, are described in:
Petersson et al., J. Chem. Phys. 1998, 109, 10570-10579
The second type, Melius-type BACs, are described in:
Anantharaman and Melius, J. Phys. Chem. A 2005, 109, 1734-1747
"""
import csv
import importlib
import json
import logging
import os
import re
from collections import Counter, defaultdict
from typing import Dict, Iterable, List, Sequence, Set, Tuple, Union
import numpy as np
import scipy.optimize as optimize
from rmgpy.quantity import ScalarQuantity
import arkane.encorr.data as data
from arkane.encorr.data import Molecule, BACDatapoint, BACDataset, extract_dataset, geo_to_mol
from arkane.encorr.reference import ReferenceSpecies, ReferenceDatabase
from arkane.exceptions import BondAdditivityCorrectionError
from arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory
class BACJob:
"""
A representation of an Arkane BAC job. This job is used to fit and
save bond additivity corrections.
"""
def __init__(self,
level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory],
bac_type: str = 'p',
write_to_database: bool = False,
overwrite: bool = False,
**kwargs):
"""
Initialize a BACJob instance.
Args:
level_of_theory: The level of theory that will be used to get training data from the RMG database.
bac_type: 'p' for Petersson-style BACs, 'm' for Melius-style BACs.
write_to_database: Save the fitted BACs directly to the RMG database.
overwrite: Overwrite BACs in the RMG database if they already exist.
kwargs: Additional parameters passed to BAC.fit.
"""
self.level_of_theory = level_of_theory
self.bac_type = bac_type
self.write_to_database = write_to_database
self.overwrite = overwrite
self.kwargs = kwargs
self.bac = BAC(level_of_theory, bac_type=bac_type)
def execute(self, output_directory: str = None, plot: bool = False, jobnum: int = 1):
"""
Execute the BAC job.
Args:
output_directory: Save the results in this directory.
plot: Save plots of results.
jobnum: Job number.
"""
logging.info(f'Running BAC job {jobnum}')
self.bac.fit(**self.kwargs)
if output_directory is not None:
os.makedirs(output_directory, exist_ok=True)
self.write_output(output_directory, jobnum=jobnum)
if plot:
self.plot(output_directory, jobnum=jobnum)
if self.write_to_database:
try:
self.bac.write_to_database(overwrite=self.overwrite)
except IOError as e:
logging.warning('Could not write BACs to database. Captured error:')
logging.warning(str(e))
def write_output(self, output_directory: str, jobnum: int = 1):
"""
Save the BACs to the `output.py` file located in
`output_directory` and save a CSV file of the results.
Args:
output_directory: Save the results in this directory.
jobnum: Job number.
"""
model_chemistry_formatted = self.level_of_theory.to_model_chem().replace('//', '__').replace('/', '_')
output_file1 = os.path.join(output_directory, 'output.py')
output_file2 = os.path.join(output_directory, f'{jobnum}_{model_chemistry_formatted}.csv')
logging.info(f'Saving results for {self.level_of_theory}...')
with open(output_file1, 'a') as f:
stats_before = self.bac.dataset.calculate_stats()
stats_after = self.bac.dataset.calculate_stats(for_bac_data=True)
f.write(f'# BAC job {jobnum}: {"Melius" if self.bac.bac_type == "m" else "Petersson"}-type BACs:\n')
f.write(f'# RMSE/MAE before fitting: {stats_before.rmse:.2f}/{stats_before.mae:.2f} kcal/mol\n')
f.write(f'# RMSE/MAE after fitting: {stats_after.rmse:.2f}/{stats_after.mae:.2f} kcal/mol\n')
f.writelines(self.bac.format_bacs())
f.write('\n')
with open(output_file2, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'Smiles',
'InChI',
'Formula',
'Multiplicity',
'Charge',
'Reference Enthalpy',
'Calculated Enthalpy',
'Corrected Enthalpy',
'Source'
])
for d in self.bac.dataset:
writer.writerow([
d.spc.smiles,
d.spc.inchi,
d.spc.formula,
d.spc.multiplicity,
d.spc.charge,
f'{d.ref_data:.3f}',
f'{d.calc_data:.3f}',
f'{d.bac_data:.3f}',
d.spc.get_preferred_source()
])
def plot(self, output_directory: str, jobnum: int = 1):
"""
Plot the distribution of errors before and after fitting BACs
and plot the parameter correlation matrix.
Args:
output_directory: Save the plots in this directory.
jobnum: Job number
"""
try:
import matplotlib.pyplot as plt
except ImportError:
return
model_chemistry_formatted = self.level_of_theory.to_model_chem().replace('//', '__').replace('/', '_')
correlation_path = os.path.join(output_directory, f'{jobnum}_{model_chemistry_formatted}_correlation.pdf')
self.bac.save_correlation_mat(correlation_path)
plt.rcParams.update({'font.size': 16})
fig_path = os.path.join(output_directory, f'{jobnum}_{model_chemistry_formatted}_errors.pdf')
fig = plt.figure(figsize=(10, 7))
ax = fig.gca()
error_before = self.bac.dataset.calc_data - self.bac.dataset.ref_data
error_after = self.bac.dataset.bac_data - self.bac.dataset.ref_data
_, _, patches = ax.hist(
(error_before, error_after),
bins=50,
label=('before fitting', 'after fitting'),
edgecolor='black',
linewidth=0.5
)
ax.set_xlabel('Error (kcal/mol)')
ax.set_ylabel('Count')
hatches = ('////', '----')
for patch_set, hatch in zip(patches, hatches):
plt.setp(patch_set, hatch=hatch)
ax.tick_params(bottom=False)
ax.set_axisbelow(True)
ax.grid()
ax.legend()
fig.savefig(fig_path, bbox_inches='tight', pad_inches=0)
class BAC:
"""
A class for deriving and applying bond additivity corrections.
"""
ref_databases = {}
atom_spins = {
'H': 0.5, 'C': 1.0, 'N': 1.5, 'O': 1.0, 'F': 0.5,
'Si': 1.0, 'P': 1.5, 'S': 1.0, 'Cl': 0.5, 'Br': 0.5, 'I': 0.5
}
exp_coeff = 3.0 # Melius-type parameter (Angstrom^-1)
def __init__(self, level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory], bac_type: str = 'p'):
"""
Initialize a BAC instance.
There are two implemented BAC types:
Petersson-type: Petersson et al., J. Chem. Phys. 1998, 109, 10570-10579
Melius-type: Anantharaman and Melius, J. Phys. Chem. A 2005, 109, 1734-1747
Args:
level_of_theory: Level of theory to get preexisting BACs or data from reference database.
bac_type: Type of BACs to get/fit ('p' for Petersson and 'm' for Melius).
"""
self._level_of_theory = self._bac_type = None # Set these first to avoid errors in setters
self.level_of_theory = level_of_theory
self.bac_type = bac_type
# Attributes related to fitting BACs for a given model chemistry
self.database_key = None # Dictionary key to access reference database
self.dataset = None # Collection of BACDatapoints in BACDataset
self.correlation = None # Correlation matrix for BAC parameters
# Define attributes for memoization during fitting
self._reset_memoization()
@property
def bac_type(self) -> str:
return self._bac_type
@bac_type.setter
def bac_type(self, val: str):
"""Check validity and update BACs every time the BAC type is changed."""
if val not in {'m', 'p'}:
raise BondAdditivityCorrectionError(f'Invalid BAC type: {val}')
self._bac_type = val
self._update_bacs()
@property
def level_of_theory(self) -> Union[LevelOfTheory, CompositeLevelOfTheory]:
return self._level_of_theory
@level_of_theory.setter
def level_of_theory(self, val: Union[LevelOfTheory, CompositeLevelOfTheory]):
"""Update BACs every time the level of theory is changed."""
self._level_of_theory = val
self._update_bacs()
def _update_bacs(self):
self.bacs = None
try:
if self.bac_type == 'm':
self.bacs = data.mbac[self.level_of_theory]
elif self.bac_type == 'p':
self.bacs = data.pbac[self.level_of_theory]
except KeyError:
pass
@classmethod
def load_database(cls,
paths: Union[str, List[str]] = None,
names: Union[str, List[str]] = None,
reload: bool = False) -> str:
"""
Load a reference database.
Args:
paths: Paths to database folders.
names: Names of database folders in RMG database.
reload: Force reload of database.
Returns:
Key to access just loaded database.
"""
paths = ReferenceDatabase.get_database_paths(paths=paths, names=names)
key = cls.get_database_key(paths)
if key not in cls.ref_databases or reload:
logging.info(f'Loading reference database from {paths}')
cls.ref_databases[key] = ReferenceDatabase()
cls.ref_databases[key].load(paths=paths)
return key
@staticmethod
def get_database_key(paths: Union[str, List[str]]) -> Union[str, Tuple[str, ...]]:
"""Get a key to access a stored reference database based on the database paths."""
if not (isinstance(paths, str) or (isinstance(paths, list) and all(isinstance(p, str) for p in paths))):
raise ValueError(f'{paths} paths is invalid')
return tuple(sorted(paths)) if isinstance(paths, list) else paths
def _reset_memoization(self):
self._alpha_coeffs = {}
self._beta_coeffs = {}
self._gamma_coeffs = {}
self._k_coeffs = {}
def get_correction(self,
bonds: Dict[str, int] = None,
coords: np.ndarray = None,
nums: Iterable[int] = None,
datapoint: BACDatapoint = None,
spc: ReferenceSpecies = None,
multiplicity: int = None) -> ScalarQuantity:
"""
Returns the bond additivity correction.
There are two bond additivity corrections currently supported.
Peterson-type corrections can be specified by setting
`self.bac_type` to 'p'. This will use the `bonds` variable,
which is a dictionary associating bond types with the number of
that bond in the molecule.
The Melius-type BAC is specified with 'm' and utilizes the atom
coordinates in `coords` and the structure's multiplicity.
Args:
bonds: A dictionary of bond types (e.g., 'C=O') with their associated counts.
coords: A Numpy array of Cartesian molecular coordinates.
nums: A sequence of atomic numbers.
datapoint: If not using bonds, coords, nums, use BACDatapoint.
spc: Alternatively, use ReferenceSpecies.
multiplicity: The spin multiplicity of the molecule.
Returns:
The bond correction to the electronic energy.
"""
if self.bacs is None:
bac_type_str = 'Melius' if self.bac_type == 'm' else 'Petersson'
raise BondAdditivityCorrectionError(
f'Missing {bac_type_str}-type BAC parameters for {self.level_of_theory}'
)
if datapoint is None and spc is not None:
datapoint = BACDatapoint(spc, level_of_theory=self.level_of_theory)
if self.bac_type == 'm':
return self._get_melius_correction(coords=coords, nums=nums, datapoint=datapoint, multiplicity=multiplicity)
elif self.bac_type == 'p':
return self._get_petersson_correction(bonds=bonds, datapoint=datapoint)
def _get_petersson_correction(self, bonds: Dict[str, int] = None, datapoint: BACDatapoint = None) -> ScalarQuantity:
"""
Given the level of theory and a dictionary of bonds, return the
total BAC.
Args:
bonds: Dictionary of bonds with the following format:
bonds = {
'C-H': C-H_bond_count,
'C-C': C-C_bond_count,
'C=C': C=C_bond_count,
...
}
datapoint: BACDatapoint instead of bonds.
Returns:
Petersson-type bond additivity correction.
"""
if datapoint is not None:
if bonds is None:
bonds = datapoint.bonds
else:
logging.warning(f'Species {datapoint.spc.label} will not be used because `bonds` was specified')
# Sum up corrections for all bonds
bac = 0.0
for symbol, count in bonds.items():
if symbol in self.bacs:
bac += count * self.bacs[symbol]
else:
symbol_flipped = ''.join(re.findall('[a-zA-Z]+|[^a-zA-Z]+', symbol)[::-1]) # Check reversed symbol
if symbol_flipped in self.bacs:
bac += count * self.bacs[symbol_flipped]
else:
logging.warning(f'Bond correction not applied for unknown bond type {symbol}.')
return ScalarQuantity(bac, 'kcal/mol')
def _get_melius_correction(self,
coords: np.ndarray = None,
nums: Iterable[int] = None,
datapoint: BACDatapoint = None,
multiplicity: int = None,
params: Dict[str, Union[float, Dict[str, float]]] = None) -> ScalarQuantity:
"""
Given the level of theory, molecular coordinates, atomic numbers,
and dictionaries of BAC parameters, return the total BAC.
Notes:
A molecular correction term other than 0 destroys the size
consistency of the quantum chemistry method. This correction
also requires the multiplicity of the molecule.
The negative of the total correction described in
Anantharaman and Melius (JPCA 2005) is returned so that it
can be added to the energy.
Args:
coords: Numpy array of Cartesian atomic coordinates.
nums: Sequence of atomic numbers.
datapoint: BACDatapoint instead of molecule.
multiplicity: Multiplicity of the molecule (not necessary if using datapoint).
params: Optionally provide parameters other than those stored in self.
Returns:
Melius-type bond additivity correction.
"""
if params is None:
params = self.bacs
atom_corr = params['atom_corr']
bond_corr_length = params['bond_corr_length']
bond_corr_neighbor = params['bond_corr_neighbor']
mol_corr = params.get('mol_corr', 0.0)
# Get single-bonded RMG molecule
mol = None
if datapoint is not None:
if nums is None or coords is None:
mol = datapoint.to_mol(from_geo=True)
multiplicity = datapoint.spc.multiplicity # Use species multiplicity instead
else:
logging.warning(
f'Species {datapoint.spc.label} will not be used because `nums` and `coords` were specified'
)
if mol is None:
mol = geo_to_mol(coords, nums=nums)
# Molecular correction
if mol_corr != 0 and multiplicity is None:
raise BondAdditivityCorrectionError(f'Missing multiplicity for {mol}')
bac_mol = mol_corr * self._get_mol_coeff(mol, multiplicity=multiplicity)
# Atomic correction
bac_atom = sum(count * atom_corr[symbol] for symbol, count in self._get_atom_counts(mol).items())
# Bond correction
bac_length = sum(
coeff * (bond_corr_length[symbol[0]] * bond_corr_length[symbol[1]]) ** 0.5 if isinstance(symbol, tuple)
else coeff * bond_corr_length[symbol]
for symbol, coeff in self._get_length_coeffs(mol).items()
)
bac_neighbor = sum(count * bond_corr_neighbor[symbol] for
symbol, count in self._get_neighbor_coeffs(mol).items())
bac_bond = bac_length + bac_neighbor
# Note the minus sign
return ScalarQuantity(-(bac_mol + bac_atom + bac_bond), 'kcal/mol')
def _get_atom_counts(self, mol: Molecule) -> Counter:
"""
Get a counter containing how many atoms of each type are
present in the molecule.
Args:
mol: RMG-Py molecule.
Returns:
Counter containing atom counts.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._alpha_coeffs:
return self._alpha_coeffs[mol.id]
atom_counts = Counter(atom.element.symbol for atom in mol.atoms)
if hasattr(mol, 'id'):
self._alpha_coeffs[mol.id] = atom_counts
return atom_counts
def _get_length_coeffs(self, mol: Molecule) -> defaultdict:
"""
Get a dictionary containing the coefficients for the beta
(bond_corr_length) variables. There is one coefficient per atom
type and an additional coefficient for each combination of atom
types.
Example: If the atoms are H, C, and O, there are (at most)
coefficients for H, C, O, (C, H), (H, O), and (C, O).
Args:
mol: RMG-Py molecule.
Returns:
Defaultdict containing beta coefficients.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._beta_coeffs:
return self._beta_coeffs[mol.id]
coeffs = defaultdict(float)
for bond in mol.get_all_edges():
atom1 = bond.atom1
atom2 = bond.atom2
symbol1 = atom1.element.symbol
symbol2 = atom2.element.symbol
c = np.exp(-self.exp_coeff * np.linalg.norm(atom1.coords - atom2.coords))
k = symbol1 if symbol1 == symbol2 else tuple(sorted([symbol1, symbol2]))
coeffs[k] += c
if hasattr(mol, 'id'):
self._beta_coeffs[mol.id] = coeffs
return coeffs
def _get_neighbor_coeffs(self, mol: Molecule) -> Counter:
"""
Get a counter containing the coefficients for the gamma
(bond_corr_neighbor) variables.
Args:
mol: RMG-Py molecule.
Returns:
Counter containing gamma coefficients.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._gamma_coeffs:
return self._gamma_coeffs[mol.id]
coeffs = Counter()
for bond in mol.get_all_edges():
atom1 = bond.atom1
atom2 = bond.atom2
# Atoms adjacent to atom1
counts1 = Counter(a.element.symbol for a, b in atom1.bonds.items() if b is not bond)
counts1[atom1.element.symbol] += max(0, len(atom1.bonds) - 1)
# Atoms adjacent to atom2
counts2 = Counter(a.element.symbol for a, b in atom2.bonds.items() if b is not bond)
counts2[atom2.element.symbol] += max(0, len(atom2.bonds) - 1)
coeffs += counts1 + counts2
if hasattr(mol, 'id'):
self._gamma_coeffs[mol.id] = coeffs
return coeffs
def _get_mol_coeff(self, mol: Molecule, multiplicity: int = 1) -> float:
"""
Get the coefficient for the K (mol_corr) variable.
Args:
mol: RMG-Py molecule.
multiplicity: Multiplicity of the molecule.
Returns:
K coefficient.
"""
if hasattr(mol, 'id') and mol.id is not None:
if mol.id in self._k_coeffs:
return self._k_coeffs[mol.id]
spin = 0.5 * (multiplicity - 1)
coeff = spin - sum(self.atom_spins[atom.element.symbol] for atom in mol.atoms)
if hasattr(mol, 'id'):
self._k_coeffs[mol.id] = coeff
return coeff
def fit(self,
weighted: bool = False,
db_names: Union[str, List[str]] = 'main',
exclude_elements: Union[Sequence[str], Set[str], str] = None,
charge: Union[Sequence[Union[str, int]], Set[Union[str, int]], str, int] = 'all',
multiplicity: Union[Sequence[int], Set[int], int, str] = 'all',
**kwargs):
"""
Fits bond additivity corrections using calculated and reference
data available in the RMG database. The resulting BACs stored
in self.bacs will be based on kcal/mol.
Args:
weighted: Perform weighted least squares by balancing training data.
db_names: Optionally specify database names to train on (defaults to main).
exclude_elements: Molecules with any of the elements in this sequence are excluded from training data.
charge: Allowable charges for molecules in training data.
multiplicity: Allowable multiplicites for molecules in training data.
kwargs: Keyword arguments for fitting Melius-type BACs (see self._fit_melius).
"""
self._reset_memoization()
self.database_key = self.load_database(names=db_names)
self.dataset = extract_dataset(self.ref_databases[self.database_key], self.level_of_theory,
exclude_elements=exclude_elements, charge=charge, multiplicity=multiplicity)
if len(self.dataset) == 0:
raise BondAdditivityCorrectionError(f'No species available for {self.level_of_theory}')
if weighted:
self.dataset.compute_weights()
if self.bac_type == 'm':
logging.info(f'Fitting Melius-type BACs for {self.level_of_theory}...')
self._fit_melius(**kwargs)
elif self.bac_type == 'p':
logging.info(f'Fitting Petersson-type BACs for {self.level_of_theory}...')
self._fit_petersson()
stats_before = self.dataset.calculate_stats()
stats_after = self.dataset.calculate_stats(for_bac_data=True)
logging.info(f'RMSE/MAE before fitting: {stats_before.rmse:.2f}/{stats_before.mae:.2f} kcal/mol')
logging.info(f'RMSE/MAE after fitting: {stats_after.rmse:.2f}/{stats_after.mae:.2f} kcal/mol')
def test(self,
species: List[ReferenceSpecies] = None,
dataset: BACDataset = None,
db_names: Union[str, List[str]] = None) -> BACDataset:
"""
Test on data.
Note:
Only one of `species`, `dataset`, or `db_names` can be specified.
Args:
species: Species to test on.
dataset: BACDataset to test on.
db_names: Database names to test on..
Returns:
BACDataset containing the calculated BAC enthalpies in `bac_data`.
"""
if sum(1 for arg in (species, dataset, db_names) if arg is not None) > 1:
raise BondAdditivityCorrectionError('Cannot specify several data sources')
if species is not None:
dataset = BACDataset([BACDatapoint(spc, level_of_theory=self.level_of_theory) for spc in species])
elif db_names is not None:
database_key = self.load_database(names=db_names)
dataset = extract_dataset(self.ref_databases[database_key], self.level_of_theory)
if dataset is None or len(dataset) == 0:
raise BondAdditivityCorrectionError('No data available for evaluation')
corr = np.array([self.get_correction(datapoint=d).value_si / 4184 for d in dataset])
dataset.bac_data = dataset.calc_data + corr
return dataset
def _fit_petersson(self):
"""
Fit Petersson-type BACs.
"""
features = self.dataset.bonds
feature_keys = list({k for f in features for k in f})
feature_keys.sort()
def make_feature_mat(_features: List[Dict[str, int]]) -> np.ndarray:
_x = np.zeros((len(_features), len(feature_keys)))
for idx, f in enumerate(_features):
flist = []
for k in feature_keys:
try:
flist.append(f[k])
except KeyError:
flist.append(0.0)
_x[idx] = np.array(flist)
return _x
# Assume that variance of observations is unity. This is clearly
# not true because we know the uncertainties but we often care
# more about less certain molecules.
x = make_feature_mat(features)
y = self.dataset.ref_data - self.dataset.calc_data
weights = np.diag(self.dataset.weight)
w = np.linalg.solve(x.T @ weights @ x, x.T @ weights @ y)
ypred = x @ w
covariance = np.linalg.inv(x.T @ weights @ x)
self.correlation = _covariance_to_correlation(covariance)
self.dataset.bac_data = self.dataset.calc_data + ypred
self.bacs = {fk: wi for fk, wi in zip(feature_keys, w)}
def _fit_melius(self,
fit_mol_corr: bool = True,
global_opt: bool = True,
global_opt_iter: int = 10,
lsq_max_nfev: int = 500):
"""
Fit Melius-type BACs.
Args:
fit_mol_corr: Also fit molecular correction term.
global_opt: Perform a global optimization.
global_opt_iter: Number of global opt iterations.
lsq_max_nfev: Maximum function evaluations in least squares optimizer.
"""
mols = self.dataset.get_mols(from_geo=True)
for i, mol in enumerate(mols):
mol.id = i
all_atom_symbols = list({atom.element.symbol for mol in mols for atom in mol.atoms})
all_atom_symbols.sort()
nelements = len(all_atom_symbols)
# The order of parameters is
# atom_corr (alpha)
# bond_corr_length (beta)
# bond_corr_neighbor (gamma)
# optional: mol_corr (k)
# where atom_corr are the atomic corrections, bond_corr_length are the bondwise corrections
# due to bond lengths (bounded by 0 below), bond_corr_neighbor are the bondwise corrections
# due to neighboring atoms, and mol_corr (optional) is a molecular correction.
# Choose reasonable bounds depending on the parameter
lim_alpha = (-5.0, 5.0)
lim_beta = (0.0, 1e4)
lim_gamma = (-1.0, 1.0)
lim_k = (-5.0, 5.0)
wmin = [lim_alpha[0]] * nelements + [lim_beta[0]] * nelements + [lim_gamma[0]] * nelements
wmax = [lim_alpha[1]] * nelements + [lim_beta[1]] * nelements + [lim_gamma[1]] * nelements
if fit_mol_corr:
wmin.append(lim_k[0])
wmax.append(lim_k[1])
def get_params(_w: np.ndarray) -> Dict[str, Union[float, Dict[str, float]]]:
_atom_corr = dict(zip(all_atom_symbols, _w[:nelements]))
_bond_corr_length = dict(zip(all_atom_symbols, _w[nelements:2*nelements]))
_bond_corr_neighbor = dict(zip(all_atom_symbols, _w[2*nelements:3*nelements]))
_mol_corr = _w[3*nelements] if fit_mol_corr else 0.0
return dict(
atom_corr=_atom_corr,
bond_corr_length=_bond_corr_length,
bond_corr_neighbor=_bond_corr_neighbor,
mol_corr=_mol_corr
)
def get_bac_data(_w: np.ndarray) -> np.ndarray:
corr = np.array(
[self._get_melius_correction(datapoint=d, params=get_params(_w)).value_si / 4184 for d in self.dataset]
)
return self.dataset.calc_data + corr
# Construct weight matrix
weights = np.diag(self.dataset.weight)
def residuals(_w: np.ndarray) -> Union[float, np.ndarray]:
"""Calculate residuals"""
bac_data = get_bac_data(_w)
return np.sqrt(weights) @ (self.dataset.ref_data - bac_data)
global_opt_iter = global_opt_iter if global_opt else 1
results = []
for it in range(global_opt_iter):
if global_opt:
logging.info(f'Global opt iteration {it}')
# Get random initial guess
w_alpha = np.random.uniform(*lim_alpha, nelements)
w_beta = np.exp(np.random.uniform(-5, np.log(lim_beta[1]), nelements))
w_gamma = np.random.uniform(*lim_gamma, nelements)
w = np.concatenate((w_alpha, w_beta, w_gamma))
if fit_mol_corr:
w_k = np.random.uniform(*lim_k, 1)
w = np.concatenate((w, w_k))
res = optimize.least_squares(residuals, w, jac='3-point', bounds=(wmin, wmax),
max_nfev=lsq_max_nfev, verbose=1)
results.append(res)
res = min(results, key=lambda r: r.cost)
w = res.x
# Estimate parameter covariance matrix using Jacobian
covariance = np.linalg.inv(res.jac.T @ weights @ res.jac)
self.correlation = _covariance_to_correlation(covariance)
self.dataset.bac_data = get_bac_data(w)
self.bacs = get_params(w)
def write_to_database(self, overwrite: bool = False, alternate_path: str = None):
"""
Write BACs to database.
Args:
overwrite: Overwrite existing BACs.
alternate_path: Write BACs to this path instead.
"""
if self.bacs is None:
raise BondAdditivityCorrectionError('No BACs available for writing')
data_path = data.quantum_corrections_path
with open(data_path) as f:
lines = f.readlines()
bacs_formatted = self.format_bacs(indent=True)
bac_dict = data.mbac if self.bac_type == 'm' else data.pbac
keyword = 'mbac' if self.bac_type == 'm' else 'pbac'
has_entries = bool(data.mbac) if self.bac_type == 'm' else bool(data.pbac)
# Add new BACs to file without changing existing formatting
# First: find the BACs dict in the file
for i, line in enumerate(lines):
if keyword in line:
break
else:
# 'pbac' and 'mbac' should both be found at `data_path`
raise RuntimeError(f'Keyword "{keyword} is not found in the data file. '
f'Please check the database file at {data_path} and '
f'make sure an up-to-date RMG-database branch is used.')
# Second: Write the BACs block into the BACs dict
# Does not overwrite comments
if self.level_of_theory in bac_dict and overwrite:
del_idx_start = del_idx_end = None
lot_repr = repr(self.level_of_theory)
for j, line2 in enumerate(lines[i:]):
if lot_repr in line2 and 'Composite' not in lot_repr and 'Composite' not in line2:
del_idx_start = i + j
elif lot_repr in line2 and 'Composite' in lot_repr:
del_idx_start = i + j
if del_idx_start is not None and line2.rstrip() == ' },': # Can't have comment after final brace
del_idx_end = i + j + 1
if (lines[del_idx_start - 1].lstrip().startswith('#')
or lines[del_idx_end + 1].lstrip().startswith('#')):
logging.warning('There may be left over comments from previous BACs')
lines[del_idx_start:del_idx_end] = bacs_formatted
break
# Check if the entry is successfully inserted to the `lines`
if del_idx_start is None or del_idx_end is None:
raise RuntimeError(f'The script cannot identify the corresponding block for the given BACs. '
f'It is possible that the database file at {data_path} is not correctly '
f'formatted. Please check the file.')
elif self.level_of_theory in bac_dict and not overwrite:
raise IOError(
f'{self.level_of_theory} already exists. Set `overwrite` to True.'
)
else:
# Either empty BACs dict or adding BACs for a new level of theory
if not has_entries and '}' in lines[i]: # Empty BACs dict
lines[i] = f'{keyword} = {{\n'
lines[(i+1):(i+1)] = ['\n}\n']
lines[(i+1):(i+1)] = ['\n'] + bacs_formatted
with open(data_path if alternate_path is None else alternate_path, 'w') as f:
f.writelines(lines)
# Reload data to update BAC dictionaries
if alternate_path is None:
importlib.reload(data)
def format_bacs(self, indent: bool = False) -> List[str]:
"""
Obtain a list of nicely formatted BACs suitable for writelines.
Args:
indent: Indent each line for printing in database.
Returns:
Formatted list of BACs.
"""
bacs_formatted = json.dumps(self.bacs, indent=4).replace('"', "'").split('\n')
bacs_formatted[0] = f'"{self.level_of_theory!r}": ' + bacs_formatted[0]
bacs_formatted[-1] += ','
bacs_formatted = [e + '\n' for e in bacs_formatted]
if indent:
bacs_formatted = [' ' + e for e in bacs_formatted]
return bacs_formatted
def save_correlation_mat(self, path: str, labels: List[str] = None):
"""
Save a visual representation of the parameter correlation matrix.
Args:
path: Path to save figure to.
labels: Parameter labels.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
return
if self.correlation is None:
raise BondAdditivityCorrectionError('Fit BACs before saving correlation matrix!')
if labels is None:
if self.bac_type == 'm':
param_types = list(self.bacs.keys())
atom_symbols = list(self.bacs[param_types[0]])
labels = [r'$\alpha_{' + s + r'}$' for s in atom_symbols] # atom_corr is alpha
labels.extend(r'$\beta_{' + s + r'}$' for s in atom_symbols) # bond_corr_length is beta
labels.extend(r'$\gamma_{' + s + r'}$' for s in atom_symbols) # bond_corr_neighbor is gamma
if len(self.correlation) == 3 * len(atom_symbols) + 1:
labels.append('K') # mol_corr is K
elif self.bac_type == 'p':
labels = list(self.bacs.keys())
fig, ax = plt.subplots(figsize=(11, 11) if self.bac_type == 'm' else (18, 18))
ax.matshow(self.correlation, cmap=plt.cm.PiYG)
# Superimpose values as text
for i in range(len(self.correlation)):
for j in range(len(self.correlation)):
c = self.correlation[j, i]
ax.text(i, j, f'{c: .2f}', va='center', ha='center', fontsize=8)
# Save lims because they get changed when modifying labels
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.set_xticks(list(range(len(self.correlation))))
ax.set_yticks(list(range(len(self.correlation))))
ax.set_xticklabels(labels, fontsize=14, rotation='vertical' if self.bac_type == 'p' else None)
ax.set_yticklabels(labels, fontsize=14)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.tick_params(bottom=False, top=False, left=False, right=False)
fig.savefig(path, dpi=600, bbox_inches='tight', pad_inches=0)
def _covariance_to_correlation(cov: np.ndarray) -> np.ndarray:
"""Convert (unscaled) covariance matrix to correlation matrix"""
v = np.sqrt(np.diag(cov))
corr = cov / np.outer(v, v)
corr[cov == 0] = 0
return corr
| StarcoderdataPython |
8107069 | <reponame>peng-data-minimization/minimizer-poc
from collections import deque
import json
from kafka import KafkaProducer, KafkaConsumer
from anonymizer import Anonymizer
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", value_deserializer=json.loads)
consumer.subscribe(["unanon"])
producer = KafkaProducer(bootstrap_servers="localhost:9092")
def anonymize(consumer):
cache = deque()
for msg in consumer:
print(msg)
cache.append(msg)
if len(cache) > CACHE_SIZE:
output = anonymizer.process([{**msg.value} for msg in cache])
if isinstance(output, list):
for _ in output:
yield _
else:
yield output
CACHE_SIZE = 5
anonymizer = Anonymizer({
"drop": {"keys": ["something-unimportant"]},
"mean": {"keys": ["some-number"]}
})
for anon_msg in anonymize(consumer):
producer.send("anon", anon_msg.value if hasattr(anon_msg, "value") else str(anon_msg).encode())
| StarcoderdataPython |
1822449 | <reponame>globocom/globomap-api-client
"""
Copyright 2018 Globo.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from unittest.mock import Mock
from unittest.mock import patch
import unittest2
from globomap_api_client.collection import Collection
class CollectionTest(unittest2.TestCase):
def tearDown(self):
patch.stopall()
def test_post(self):
collection = Collection(Mock())
collection.make_request = Mock()
collection.post({'doc': 1})
collection.make_request.assert_called_once_with(
method='POST', uri='collections', data={'doc': 1})
def test_list(self):
collection = Collection(Mock())
collection.make_request = Mock()
collection.list()
collection.make_request.assert_called_once_with(
method='GET', params={'per_page': 10, 'page': 1}, uri='collections')
def test_list_with_pagination(self):
collection = Collection(Mock())
collection.make_request = Mock()
collection.list(page=2, per_page=20)
collection.make_request.assert_called_once_with(
method='GET', params={'per_page': 20, 'page': 2}, uri='collections')
def test_search(self):
collection = Collection(Mock())
collection.make_request = Mock()
query = [[{'field': 'name', 'operator': 'LIKE', 'value': 'test'}]]
collection.search(query=query)
params = {
'query': json.dumps(query),
'per_page': 10,
'page': 1
}
collection.make_request.assert_called_once_with(
method='GET', uri='collections',
params=params
)
def test_search_with_pagination(self):
collection = Collection(Mock())
collection.make_request = Mock()
query = [[{'field': 'name', 'operator': 'LIKE', 'value': 'test'}]]
collection.search(query, 20, 2)
params = {
'query': json.dumps(query),
'per_page': 20,
'page': 2
}
collection.make_request.assert_called_once_with(
method='GET', uri='collections',
params=params
)
| StarcoderdataPython |
258701 | <filename>eucaconsole/forms/groups.py
# -*- coding: utf-8 -*-
# Copyright 2013-2016 Hewlett Packard Enterprise Development LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Forms for Groups
"""
import wtforms
from wtforms import validators
from ..i18n import _
from . import BaseSecureForm, TextEscapedField
class GroupForm(BaseSecureForm):
"""Group form
"""
group_name_error_msg = 'Group name is required'
group_name = TextEscapedField(
id=u'group-name',
label=_(u'Name'),
validators=[validators.InputRequired(message=group_name_error_msg), validators.Length(min=1, max=255)],
)
path_error_msg = ''
path = TextEscapedField(
id=u'group-path',
label=_(u'Path'),
default="/",
validators=[validators.Length(min=1, max=255)],
)
def __init__(self, request, group=None, **kwargs):
super(GroupForm, self).__init__(request, **kwargs)
self.request = request
self.group_name.error_msg = self.group_name_error_msg # Used for Foundation Abide error message
self.path_error_msg = self.path_error_msg
if group is not None:
self.group_name.data = group.group_name
self.path.data = group.path
class GroupUpdateForm(BaseSecureForm):
"""Group update form
"""
group_name_error_msg = ''
group_name = wtforms.TextField(
id=u'group-name',
label=_(u'Name'),
validators=[validators.Length(min=1, max=255)],
)
path_error_msg = ''
path = TextEscapedField(
id=u'group-path',
label=_(u'Path'),
default="/",
validators=[validators.Length(min=1, max=255)],
)
users_error_msg = ''
users = wtforms.TextField(
id=u'group-users',
label=(u''),
validators=[],
)
def __init__(self, request, group=None, **kwargs):
super(GroupUpdateForm, self).__init__(request, **kwargs)
self.request = request
self.group_name.error_msg = self.group_name_error_msg # Used for Foundation Abide error message
self.path_error_msg = self.path_error_msg
if group is not None:
self.group_name.data = group.group_name
self.path.data = group.path
class DeleteGroupForm(BaseSecureForm):
"""CSRF-protected form to delete a group"""
pass
| StarcoderdataPython |
9742971 | import json
import matplotlib.pyplot as plt
public_name = ['hsemem', 'msuofmems']
man = 0
woman = 0
for n in public_name:
with open(f'data/{n}.json', 'r') as f:
data = json.load(f)
for i in data.keys():
if int(data[i]['user']['sex']) == 2:
man += 1
else:
woman += 1
print(f'{round((woman / (man + woman)) * 100)}% - {n}')
fig1, ax1 = plt.subplots()
plt.title(f'{n}')
ax1.pie([man, woman], labels=['М', 'Ж'], autopct='%1.2f%%')
plt.savefig(f'data/{n}/{n}_sex.png')
plt.show()
| StarcoderdataPython |
5141482 | """
Handles multidimensional huge data.
since it requires huge size memory:
- we use the mean from different cell types instead of just using samples.
- we use PCA to reduce the number of cell types
There are two approaches:
1. Discrete - discretization for words for each sequence, and then building words by combining them
2. Continuous - Use the real values of the channel with multi dimensional gaussian and covariance matrix to evaluate
Assumptions:
Just as an estimation for the size: 242 cells x 2492506 chromosome 1 size (bins of size 100)
requires 4.5Gb
see also:
multichannel_classify - script for multichannel classifications
"""
import numpy as np
from models.ClassifierStrategy import ClassifierStrategy
from models.PcaTransformer import PcaTransformer
from hmm.HMMModel import GaussianHMM, DiscreteHMM
from hmm.bwiter import bw_iter, IteratorCondition, DiffCondition
__author__ = 'eranroz'
def continuous_state_selection(data, num_states):
"""
Heuristic creation of emission for states/selecting number of stats.
Instead of random selection of the emission matrix we find clusters of co-occurring values,
and use those clusters as means for states and the close values as estimation for covariance matrix
Nubmer of clusters/states is subject to pruning if not pre-selected
@param num_states: number of states in model
@param data: dense data for specific chromosome
@return: initial emission for gaussian mixture model HMM (array of (mean, covariance)
"""
def soft_k_means_step(clustered_data, clusters_means):
"""
Soft k means
@param clustered_data: data to cluster
@param clusters_means: number of clusters
@return: new clusters means
"""
w = np.array([np.sum(np.power(clustered_data - c, 2), axis=1) for c in clusters_means])
w /= ((np.max(w) + np.mean(w)) / 1000) # scale w
w = np.minimum(w, 500) # 500 is enough (to eliminate underflow)
w = np.exp(-w)
w = w / np.sum(w, 0) # normalize for each point
w = w / np.sum(w, 1)[:, None] # normalize for all cluster
return np.dot(w, clustered_data)
data = data.T
num_sub_samples = 2
sub_indics = np.random.permutation(np.arange(data.shape[0] - data.shape[0] % num_sub_samples))
n_clusters = num_states or data.shape[1] * 2 # number of clustering will be subject to pruning
clusters = np.random.random((n_clusters, data.shape[1])) * np.max(data, 0)
# once we have assumption for clusters work with real sub batches of the data
sub_indics = sub_indics.reshape(num_sub_samples, -1)
different_clusters = False
step = 0
while not different_clusters:
diff = np.ones(1)
iter_count = 0
while np.any(diff > 1e-1) and iter_count < 10:
sub_data = data[sub_indics[step % num_sub_samples], :]
new_clusters = soft_k_means_step(sub_data, clusters)
diff = np.sum((new_clusters - clusters) ** 2, axis=1)
clusters = new_clusters
iter_count += 1
step += 1
if num_states:
different_clusters = True
else:
dist_matrix = np.array([np.sum(np.power(clusters - c, 2), axis=1) for c in clusters])
np.fill_diagonal(dist_matrix, 1000)
closest_cluster = np.min(dist_matrix)
threshold = 2 * np.mean(dist_matrix) / np.var(dist_matrix) # or to just assign 0.1?
if closest_cluster < threshold:
# pruning the closest point and add random to close points
subject_to_next_prune = list(set(np.where(dist_matrix < threshold)[0]))
clusters[subject_to_next_prune, :] += 0.5 * clusters[subject_to_next_prune, :] * np.random.random(
(len(subject_to_next_prune), data.shape[1]))
clusters = clusters[np.arange(n_clusters) != np.where(dist_matrix == closest_cluster)[0][0], :]
n_clusters -= 1
else:
different_clusters = True
# now assign points to clusters
# and add some random
clusters += clusters * np.random.random(clusters.shape) * 0.1
clusters = clusters[np.argsort(np.sum(clusters ** 2, 1))] # to give some meaning
weight = np.array([np.sum(np.power(data - c, 2), axis=1) for c in clusters])
weight /= (np.mean(weight) / 500) # scale w
weight = np.minimum(weight, 500)
weight = np.exp(-weight)
weight /= np.sum(weight, 0) # normalize for each point
weight /= np.sum(weight, 1)[:, None] # normalize for all cluster
means = np.dot(weight, data)
covs = []
min_std = 10 * np.finfo(float).tiny
for mu, p in zip(means, weight):
seq_min_mean = data - mu
new_cov = np.dot((seq_min_mean.T * p), seq_min_mean)
new_cov = np.maximum(new_cov, min_std)
covs.append(new_cov)
means_covs = list(zip(means, covs))
return means_covs
class GMMClassifier(ClassifierStrategy):
"""
multivariate version of HMMClassifier for multichannel data
* It uses PCA to reduce number of learned channels
* It adds some functions for smart selection of the initial state
"""
def __init__(self, model=None, pca_reduction=None, train_chromosome='chr1', study_diff=True):
"""
@type model: GaussianHMM
@param model: GaussianHMM to model the multichannel data
"""
self.model = model
self.pca_reduction = pca_reduction
self.train_chromosome = train_chromosome
self.study_diff = study_diff # whether we should reduce the mean from each location before PCA
def pca_ndims(self):
"""
number of dimensions
@return: number of dimensions
"""
return self.pca_reduction.w.shape
def training_chr(self, chromosome):
"""
Specifies on which chromosome we want to train or fit the model
@param chromosome: chromosome name for training
@return: None
"""
self.train_chromosome = chromosome
def fit(self, data, iterations=None, energy=0.9, pca_components=None):
"""
fits the classifiers to training sequences and returns the log likelihood after fitting
@param pca_components: number of dimensions to use for PCA (set energy to None)
@param energy: cumulative energy to use for pca (set pca_components to None)
@param data: data to use for PCA reduction matrix selection
@param iterations: number of iterations number of iteration
@return: likelihood for the model based on the model
"""
old_model = self.model
print("Starting fitting")
training_seqs = data[self.train_chromosome]
if self.pca_reduction is None:
print('Fitting PCA')
self.pca_reduction = PcaTransformer()
self.pca_reduction.fit(training_seqs[0], min_energy=energy, ndim=pca_components)
transformer = self.pca_reduction
training_seqs = transformer(training_seqs)
# TODO: use different sequences?
bw_stop_condition = IteratorCondition(iterations) if iterations is not None else DiffCondition()
self.model, p = bw_iter(training_seqs, self.model, bw_stop_condition)
print("Model fitting finished. likelihood", p)
print("Old model")
print(old_model)
print("New model")
print(self.model)
fit_params = {
'likelihoods': bw_stop_condition.prev_likelihoods
}
return p, fit_params
def classify(self, sequence_dict):
"""
Classifies chromosomes across samples (such as different tissues)
@param sequence_dict: dict like object with keys as chromosomes and values as matrix
@return: viterbi state assignment for the genome
"""
classified = dict()
transformer = self.pca_reduction
for chromosome, sequence in sequence_dict.items():
print('Classifying chromosome', chromosome)
# reduce dimensions
sequence = transformer(sequence)
# fit
classified[chromosome] = self.model.viterbi(sequence)
return classified
def data_transform(self):
"""
get associated data transformation pre-processing
@return: log(x+1)
"""
def log_diff(data):
log_data = np.log(np.array(data) + 1)
return log_data - np.mean(log_data, 0)
if self.study_diff:
return log_diff
else:
return lambda x: np.log(np.array(x) + 1)
def init_pca_clustering(self, data, train_chromosome='chr8', num_states=10, pca_energy=None):
"""
Default initialization for GMM classifier with PCA and then clustering (before actual training)
* "training" for PCA (based on train chromosome covar)
* heuristic selection of number of state and their emission (soft k means)
* state transition - random initialization with some prior assumptions
@param pca_energy: minimum energy for PCA (to select number of dimensions).
@type train_chromosome: str
@type num_states: int
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
"""
chrom_data = data[train_chromosome]
transformer = PcaTransformer()
transformer.fit(chrom_data, min_energy=pca_energy)
chrom_data = transformer(chrom_data)
self.init_by_clustering({train_chromosome: chrom_data}, train_chromosome, num_states)
self.pca_reduction = transformer # override if PCA reduction with the trained PCA
def init_by_clustering(self, data, train_chromosome='chr8', num_states=10):
"""
Default initialization for GMM classifier with clustering (before actual training)
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
"""
chrom_data = data[train_chromosome]
emission = continuous_state_selection(chrom_data, num_states=num_states)
n_states = len(emission) + 1 # number of states plus begin state
print('Number of states selected %i' % (n_states - 1))
state_transition = np.random.random((n_states, n_states))
# fill diagonal with higher values
np.fill_diagonal(state_transition, np.sum(state_transition, 1))
state_transition[:, 0] = 0 # set transition to begin state to zero
# normalize
state_transition /= np.sum(state_transition, 1)[:, np.newaxis]
# initial guess
initial_model = GaussianHMM(state_transition, emission)
self.model = initial_model
self.pca_reduction = PcaTransformer.empty()
self.train_chromosome = train_chromosome
@staticmethod
def default_strategy(data, train_chromosome='chr8', num_states=10):
"""
Creates a default GMM classifier with heuristic guess (see default)
@type train_chromosome: str
@type num_states: int
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
@return: a GMM classifier
"""
classifier = GMMClassifier()
classifier.init_pca_clustering(data, train_chromosome, num_states)
return classifier
def __str__(self):
return str(self.model)
def states_html(self, input_labels=None, column_title='Data/State'):
"""
Creates a nice html table with some description/meaning for the states
@param column_title: title for the columns
@param input_labels: labels for the input (original dimensions before PCA)
@return: table with html representation of the states
"""
import matplotlib as mpl
import matplotlib.cm as cm
mean_vars_states = [state[0] for state in self.model.emission.mean_vars]
mean_states = np.array([mean[0] for mean, var in mean_vars_states])
mean_states = self.pca_reduction.recover(mean_states)
n_states = mean_states.shape[0]
norm = mpl.colors.Normalize(vmin=0, vmax=n_states + 1)
cmap = cm.spectral
m = cm.ScalarMappable(norm=norm, cmap=cmap)
color_schema = dict()
for i in range(0, n_states + 1):
rgb = list(m.to_rgba(i)[:3])
for j in range(0, 3):
rgb[j] = str("%i" % (255 * rgb[j]))
color_schema[i] = ','.join(rgb)
states_ths = ''.join(
['<th style=\"color:rgb(%s)\">%i</th>' % (color_schema[i], i) for i in np.arange(1, n_states + 1)])
states_trs = []
"""
max_v = np.max(mean_states)
backgrounds = cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=np.min(mean_states), vmax=np.max(mean_states)), cmap=cm.Blues)
mean_to_color = lambda x: 'rgb(%i, %i, %i)' % backgrounds.to_rgba(x, bytes=True)[:3]
for cell_i, cell_means in enumerate(mean_states.T):
cell_description = "<td>%s</td>" % (str(cell_i+1) if input_labels is None else input_labels[cell_i])
# add mean values
cell_description += ''.join(['<td style="font-size: %i%%;color:#fff;background:%s">%.2f</td>' % (mean/max_v * 100, mean_to_color(mean), mean) for mean in cell_means])
# wrap in tr
cell_description = '<tr>%s</tr>' % cell_description
states_trs.append(cell_description)
"""
template = """
<table style="font-size:85%;text-align:center;border-collapse:collapse;border:1px solid #aaa;" cellpadding="5" border="1">
<tr style="font-size:larger; font-weight: bold;">
<th>{column_title}</th>
{states_ths}
</tr>
{states_trs}
</table>
"""
# rewrite
backgrounds = [
cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=np.min(data_type),
vmax=np.max(data_type)), cmap=cm.Blues)
for data_type in mean_states.T]
mean_to_color = lambda x, y: 'rgb(%i, %i, %i)' % backgrounds[y].to_rgba(x, bytes=True)[:3]
for cell_i, data_type_means in enumerate(mean_states.T):
cell_description = "<td>%s</td>" % (str(cell_i + 1) if input_labels is None else input_labels[cell_i])
# add mean values
cell_description += ''.join(['<td style="font-size: 85%%;color:#fff;background:%s">%.2f</td>' %
(mean_to_color(mean, cell_i), mean)
for mean in data_type_means])
# wrap in tr
cell_description = '<tr>%s</tr>' % cell_description
states_trs.append(cell_description)
template = """
<table style="font-size:85%;text-align:center;border-collapse:collapse;border:1px solid #aaa;" cellpadding="5" border="1">
<tr style="font-size:larger; font-weight: bold;">
<th>{column_title}</th>
{states_ths}
</tr>
{states_trs}
</table>
"""
return template.format(**({'states_ths': states_ths,
'states_trs': '\n'.join(states_trs),
'column_title': column_title
}))
class DiscreteMultichannelHMM(ClassifierStrategy):
"""
A model for discrete multichannel HMM:
data [position x tissue] =(PCA)> data [position x tissue combination] => discretization => word encoding => HMM
"""
def __init__(self):
self.model = None
self.pca_reduction = None
def classify(self, sequence):
raise NotImplementedError
def fit(self, data):
# TODO: only partially implemented here not tested...
raise NotImplementedError
from scipy.stats import norm as gaussian
min_alpha = 0
n_words = np.max(data)
# init hmm model
n_states = 5
state_transition = np.zeros(n_states + 1)
# begin state
state_transition[0, 1:] = np.random.rand(n_states)
# real states - random with some constraints. state 1 is most closed, and n is most opened
real_states = np.random.rand((n_states, n_states))
# set strong diagonal
diagonal_selector = np.eye(n_states, dtype='bool')
real_states[diagonal_selector] = np.sum(real_states, 1) * 9
real_states /= np.sum(real_states, 1)[:, None]
state_transition[1:, 1:] = real_states
# normalize
# emission
emission = np.zeros((n_states + 1, n_words))
real_emission = np.random.random((n_states, n_words))
for i in np.arange(0, n_states):
mean = i * (n_words / n_states)
variance = (n_words / n_states)
real_emission[i, :] = gaussian(mean, variance).pdf(np.arange(n_words))
real_emission /= np.sum(real_emission, 1)[:, None]
emission[1:, 1:] = real_emission
# init hmm
print('Creating model')
self.model = DiscreteHMM(state_transition, emission, min_alpha=min_alpha)
print('Training model')
def data_transform(self):
"""
get associated data transformation prepossessing
"""
if self.pca_reduction is None:
return lambda x: x
else:
return lambda x: DiscreteMultichannelHMM.preprocess(self.pca_reduction(x))
@staticmethod
def preprocess(data):
discrete = DiscreteMultichannelHMM.multichannel_discrete_transform(data)
multichannel_data = DiscreteMultichannelHMM.encode_discrete_words(discrete)
return multichannel_data
@staticmethod
def encode_discrete_words(data):
"""
Transforms a discrete matrix to one dimensional words
@param data: discrete matrix
@return: words array
"""
new_data = np.zeros(data.shape[1])
alphbet = np.power(2, np.arange(data.shape[0] * np.max(data)))
alphbet_assign = enumerate(alphbet)
# transform to powers of 2
for i in np.arange(0, np.max(data) + 1):
for j in np.arange(0, new_data.shape[0]):
selector = (data[j, :] == i)
data[j, selector] = next(alphbet_assign)
for cell in data:
# bitwise or
new_data |= cell
return new_data
@staticmethod
def multichannel_discrete_transform(data, percentiles=[60, 75, 90]):
"""
Transforms a matrix from continuous values to discrete values
@param percentiles: percentiles used for discretization
@param data: continuous values matrix
@return: discrete values
"""
data = np.log(data + 1)
prec_values = np.percentile(data, q=percentiles)
max_val = np.max(data) + 1
min_val = np.min(data) - 1
new_chrom_data = np.zeros_like(data)
for i, vals in enumerate(zip([min_val] + prec_values, prec_values + [max_val])):
new_chrom_data[(data >= vals[0]) & (data < vals[1])] = i
return new_chrom_data
class PCAClassifier(ClassifierStrategy):
"""
this is not a real classifier but a PCA transform
"""
def __init__(self, model=None, pca_reduction=None, train_chromosome='chr1'):
"""
@type model: GaussianHMM
@param model: GaussianHMM to model the multichannel data
"""
self.model = model
self.pca_reduction = pca_reduction
self.train_chromosome = train_chromosome
def pca_ndims(self):
"""
Dimension of PCA matrix
"""
return self.pca_reduction.w.shape
def training_chr(self, chromosome):
"""
Specifies on which chromosome we want to train or fit the model
@param chromosome: chromosome name for training
@return: None
"""
self.train_chromosome = chromosome
def fit(self, data, iterations=None, energy=0.9, pca_components=None):
"""
fits the classifiers to training sequences and returns the log likelihood after fitting
@param pca_components: number of dimensions to use for PCA (set energy to None)
@param energy: cumulative energy to use for pca (set pca_components to None)
@param data: data to use for PCA reduction matrix selection
@param iterations: number of iterations number of iteration
@return: likelihood for the model based on the model
"""
old_model = self.model
print("Starting fitting")
training_seqs = data[self.train_chromosome]
if self.pca_reduction is None:
self.pca_reduction = PcaTransformer()
self.pca_reduction.fit(training_seqs[0], min_energy=energy, ndim=pca_components)
else:
transformer = self.pca_reduction
training_seqs = transformer(training_seqs)
# TODO: use different sequences?
bw_stop_condition = IteratorCondition(iterations) if iterations is not None else DiffCondition()
self.model, p = bw_iter(training_seqs, self.model, bw_stop_condition)
print("Model fitting finished. likelihood", p)
print("Old model")
print(old_model)
print("New model")
print(self.model)
fit_params = {
'likelihoods': bw_stop_condition.prev_likelihoods
}
return p, fit_params
def classify(self, sequence_dict):
"""
Classifies chromosomes across samples (such as different tissues)
@param sequence_dict: dict like object with keys as chromosomes and values as matrix
@return: viterbi state assignment for the genome
"""
classified = dict()
transformer = self.pca_reduction
for chromosome, sequence in sequence_dict.items():
print('Classifying chromosome', chromosome)
# reduce dimensions
sequence = transformer(sequence)
# fit
classified[chromosome] = self.model.viterbi(sequence)
return classified
def data_transform(self):
"""
get associated data transformation pre-processing
@return: log(x+1)
"""
return lambda x: np.log(np.array(x) + 1)
def default(self, data, train_chromosome='chr8', num_states=10, pca_energy=None):
"""
Default initialization for GMM classifier with:
* "training" for PCA (based on train chromosome covar
* heuristic selection of number of state and their emission (soft k means)
* state transition - random initialization with some prior assumptions
@param pca_energy: minimum energy for PCA (to select number of dimensions)
@type train_chromosome: str
@type num_states: int
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
"""
chrom_data = data[train_chromosome]
transformer = PcaTransformer()
transformer.fit(chrom_data, min_energy=pca_energy)
chrom_data = transformer(chrom_data)
emission = continuous_state_selection(chrom_data, num_states=num_states)
n_states = len(emission) + 1 # number of states plus begin state
print('Number of states selected %i' % (n_states - 1))
state_transition = np.random.random((n_states, n_states))
# fill diagonal with higher values
np.fill_diagonal(state_transition, np.sum(state_transition, 1))
state_transition[:, 0] = 0 # set transition to begin state to zero
# normalize
state_transition /= np.sum(state_transition, 1)[:, np.newaxis]
# initial guess
initial_model = GaussianHMM(state_transition, emission)
self.model = initial_model
self.pca_reduction = transformer
self.train_chromosome = train_chromosome
@staticmethod
def default_strategy(data, train_chromosome='chr8', num_states=10):
"""
Creates a default GMM classifier with heuristic guess (see default)
@type train_chromosome: str
@type num_states: int
@param data: data (or partial data) to use for selection of pca transformation, and k-means for states
(initial guess). dictionary like object
@param train_chromosome: chromosome to use for training (must be in data. eg data[train_chromosome]
@param num_states: number of states in HMM
@return: a GMM classifier
"""
classifier = GMMClassifier()
classifier.init_pca_clustering(data, train_chromosome, num_states)
return classifier
def __str__(self):
return str(self.model)
def states_html(self):
"""
Creates a nice html table with some description/meaning for the states
@return: table with html representation of the states
"""
import matplotlib as mpl
import matplotlib.cm as cm
mean_vars_states = [state[0] for state in self.model.emission.mean_vars]
mean_states = np.array([mean[0] for mean, var in mean_vars_states])
mean_states = self.pca_reduction.recover(mean_states)
n_states = mean_states.shape[0]
n_cells = mean_states.shape[1]
norm = mpl.colors.Normalize(vmin=0, vmax=n_states + 1)
cmap = cm.spectral
m = cm.ScalarMappable(norm=norm, cmap=cmap)
color_schema = dict()
for i in range(0, n_states + 1):
rgb = list(m.to_rgba(i)[:3])
for j in range(0, 3):
rgb[j] = str("%i" % (255 * rgb[j]))
color_schema[i] = ','.join(rgb)
cells_ths = ''.join(['<th>%i</th>' % i for i in np.arange(1, n_cells + 1)])
states_trs = []
max_v = np.max(mean_states)
backgrounds = cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=np.min(mean_states), vmax=np.max(mean_states)),
cmap=cm.Blues)
mean_to_color = lambda x: 'rgb(%i, %i, %i)' % backgrounds.to_rgba(x, bytes=True)[:3]
for state_i, state_means in enumerate(mean_states):
state_description = "<td style=\"color:rgb(%s)\">%i</td>" % (color_schema[state_i], state_i + 1)
# add mean values
state_description += ''.join(['<td style="font-size: %i%%;color:#fff;background:%s">%.2f</td>' % (
mean / max_v * 100, mean_to_color(mean), mean) for mean in state_means])
# wrap in tr
state_description = '<tr>%s</tr>' % state_description
states_trs.append(state_description)
template = """
<table style="font-size:85%;text-align:center;border-collapse:collapse;border:1px solid #aaa;" cellpadding="5" border="1">
<tr style="font-size:larger; font-weight: bold;">
<th>State/Cell</th>
{cells_ths}
</tr>
{states_trs}
</table>
"""
return template.format(**({'cells_ths': cells_ths,
'states_trs': '\n'.join(states_trs)
}))
| StarcoderdataPython |
8057467 | <reponame>meetps/rhea
from __future__ import absolute_import
from .iceriver import IceRiver | StarcoderdataPython |
8097739 | from aiosparql.syntax import Node, RDFTerm
from tests.integration.helpers import IntegrationTestCase, unittest_run_loop
class DeltaTestCase(IntegrationTestCase):
@unittest_run_loop
async def test_push_notification(self):
ws = await self.client.ws_connect('/')
test_id = self.uuid4()
test_iri = self.resource("resource1", test_id)
await self.insert_node(Node(test_iri, {
"rdf:type": RDFTerm("push:Resource1"),
"mu:uuid": test_id,
"dct:title": "test",
"push:number": 1,
}))
data = await ws.receive_json()
self.assertIn('push', data)
self.assertEqual(data['push']['data']['id'], test_id)
self.assertEqual(data['push']['data']['type'], "resource1")
@unittest_run_loop
async def test_push_many_notifications(self):
ws = await self.client.ws_connect('/')
test1_id = self.uuid4()
test1_iri = self.resource("resource1", test1_id)
test2_id = self.uuid4()
test2_iri = self.resource("resource1", test2_id)
await self.insert_triples([
Node(test1_iri, {
"rdf:type": RDFTerm("push:Resource1"),
"mu:uuid": test1_id,
"dct:title": "test",
"push:number": 1,
}),
Node(test2_iri, {
"rdf:type": RDFTerm("push:Resource1"),
"mu:uuid": test2_id,
"dct:title": "test",
"push:number": 2,
}),
])
data1 = await ws.receive_json()
self.assertIn('push', data1)
self.assertIn(data1['push']['data']['id'], (test1_id, test2_id))
self.assertEqual(data1['push']['data']['type'], "resource1")
data2 = await ws.receive_json()
self.assertIn('push', data2)
self.assertIn(data2['push']['data']['id'], (test1_id, test2_id))
self.assertEqual(data2['push']['data']['type'], "resource1")
@unittest_run_loop
async def test_push_push_and_delete_notifications(self):
ws = await self.client.ws_connect('/')
test_id = self.uuid4()
test_iri = self.resource("resource1", test_id)
await self.insert_node(Node(test_iri, {
"rdf:type": RDFTerm("push:Resource1"),
"mu:uuid": test_id,
"dct:title": "test",
"push:number": 1,
}))
data1 = await ws.receive_json()
self.assertIn('push', data1)
self.assertEqual(data1['push']['data']['id'], test_id)
self.assertEqual(data1['push']['data']['type'], "resource1")
await self.delete_node(test_iri)
data2 = await ws.receive_json()
self.assertIn('delete', data2)
self.assertEqual(data2['delete']['id'], test_id)
self.assertEqual(data2['delete']['type'], "resource1")
| StarcoderdataPython |
11215491 | <filename>app/ext/api/controller/users_controller.py<gh_stars>0
from app.ext.api.controller import recipe_controller
from app.ext.api.exceptions import (
EmailAlreadyExist,
InvalidToken,
InvalidUser,
UserNotFound,
)
from app.ext.api.services import token_services, users_services, util_services
from dynaconf import settings
from flask import session
def create_user(new_user):
name = new_user["name"]
email = new_user["email"]
password = <PASSWORD>_<PASSWORD>["password"]
admin = new_user["admin"]
user = users_services.find_by_email(email)
if user:
raise EmailAlreadyExist
user = users_services.create_user(name, email, password, admin)
token = token_services.generate_token(user["id"], user["email"]) # noqa
if settings.SEND_MAIL:
util_services.send_mail(
user["email"], "Access your account", "mail/confirm.html", token=token
)
session["audit_log"] = {
"object_type": "USER",
"object_id": user.get("id"),
"object_name": user.get("name"),
"action": "CREATE",
}
return user
def confirm_user(token):
try:
user = token_services.verify_token(token)
except Exception:
raise InvalidToken
if users_services.is_confirmed(user.get("user_id")):
raise InvalidUser
user = users_services.confirm_user(user.get("user_id"))
if not user:
raise UserNotFound
return user
def list_user():
users = users_services.list_user()
return {"users": users}
def get_user(user_id):
user = users_services.find_by_id(user_id)
if not user:
raise UserNotFound
return {
"user_id": user.id,
"name": user.name,
"email": user.email,
"is_admin": user.is_admin,
}
def update_user(user_id, user_data):
user = users_services.find_by_id(user_id)
if not user:
raise UserNotFound
email = user_data.get("email")
if users_services.find_by_email(email):
raise EmailAlreadyExist
password = user_data.get("password")
name = user_data.get("name")
admin = user_data.get("admin")
user = users_services.update_user(user_id, email, password, name, admin)
session["audit_log"] = {
"object_type": "USER",
"object_id": user.id,
"object_name": user.name,
"action": "UPDATE",
}
return {
"user_id": user.id,
"name": user.name,
"email": user.email,
"is_admin": user.is_admin,
}
def delete_user(user_id):
user = users_services.find_by_id(user_id)
if not user:
UserNotFound
for recipe in user.recipes:
recipe_controller.delete_recipe(recipe.id, user_id)
session["audit_log"] = {
"object_type": "USER",
"object_id": user.id,
"object_name": user.name,
"action": "DELETE",
}
users_services.delete_user(user_id)
| StarcoderdataPython |
3440005 | <filename>_site/tomat/apps/ideas/views.py
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render
from django.core.paginator import PageNotAnInteger, InvalidPage, Paginator
from django.core.urlresolvers import reverse
from ideas.models import Idea, Category
from products.models import Product
def index(request):
queryset = Idea.objects.filter(is_visible=True).order_by('-id')
paginator = Paginator(queryset, 6)
try:
objects = paginator.page(request.GET.get('page'))
except (InvalidPage, PageNotAnInteger):
objects = paginator.page(1)
context = {
'categories': Category.objects.all().order_by('id'),
'objects': objects,
}
return render(request, 'ideas/index.html', context)
def category(request, category_slug):
"""Список идей подарков определенной категории"""
category = get_object_or_404(Category, slug=category_slug)
queryset = Idea.objects.filter(category=category, is_visible=True).order_by('-id')
paginator = Paginator(queryset, 6)
try:
objects = paginator.page(request.GET.get('page'))
except (InvalidPage, PageNotAnInteger):
objects = paginator.page(1)
context = {
'categories': Category.objects.all().order_by('id'),
'objects': objects,
}
request.breadcrumbs.add(u'Идеи подарков', reverse('ideas.views.index'))
return render(request, 'ideas/index.html', context)
def read(request, category_slug, idea_id):
category = get_object_or_404(Category, slug=category_slug)
idea = get_object_or_404(Idea, pk=idea_id, is_visible=True, category=category)
idea.category = category
products = Product.objects.for_user(request.user).filter(ideas=idea)
context = {
'object': idea,
'products': products,
}
request.breadcrumbs.add(u'Идеи подарков', reverse('ideas.views.index'))
request.breadcrumbs.add(category.title, category.get_absolute_url())
return render(request, 'ideas/read.html', context)
| StarcoderdataPython |
5181823 | #!/usr/bin/env python3
'''This NetworkTables client demonstrates the use of classes to access values.'''
import time
from networktables import NetworkTables
from networktables.util import ntproperty
import logging
# To see messages from networktables, you must setup logging
logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server='192.168.1.21')
class SomeClient(object):
robotTime = ntproperty("/SmartDashboard/robotTime", 0)
dsTime = ntproperty("/SmartDashboard/dsTime", 0)
c = SomeClient()
while True:
print("robotTime:", c.robotTime)
print("dsTime:", c.dsTime)
time.sleep(1)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.