text stringlengths 38 1.54M |
|---|
import sys,os,math,itertools, operator
sys.path.append('../pythonlib')
import basic
MAX = 10**6
MAX = 1500000
result = 0
tgt = [x for x in xrange(1, MAX) if x%2 == 0]
tgt.reverse()
for x in tgt:#[120]:
res = 0
for c in xrange(1, x/2):
ab = x*(x -2*c)/2
if c**2 != (x - c)**2 - 2*ab:
continue
for a in xrange(1, ab):
b = ab/a
if a**2 + b**2 == c**2:
#print 'match','x',x,'ab',ab,'c',c, '[ab]', ab, 'a', a, 'b', b
res += 1
if a > b:
break
print x, res
if res == 1:
result +=1
print 'ans = ', result
|
import os
import tensorflow as tf
import signal
from helpers import *
#from fc_2 import setup_model
# from cnn_2_16 import setup_model
#from cnn_2_8 import setup_model
from cnn_3_12 import setup_model
# from cnn_3 import setup_model
IS_TRAINING = True
MODEL_STORAGE_PATH = '/etc/bot/predictor/model/'
os.makedirs(MODEL_STORAGE_PATH, exist_ok=True)
def handle_sig_done(*args):
global IS_TRAINING
if not IS_TRAINING:
print("Cancelled")
exit(0)
IS_TRAINING = False
print("Ending training")
signal.signal(signal.SIGINT, handle_sig_done)
def main():
# Get the set of all the labels and file paths, pre shuffled
full_set = filenames_labels()
x = tf.placeholder(tf.float32, [None, PATCH_SIZE])
y = tf.placeholder(tf.float32, [None, 3])
p, h = setup_model(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=h)
loss = tf.reduce_mean(cross_entropy)# + tf.nn.l2_loss(p['fc1_w']) + tf.nn.l2_loss(p['fc0_w'])
train_step = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(h, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
import random
last_accuracy = 0
minibatch_size = 100
epochs = (len(full_set) // minibatch_size) * 20
for e in range(0, epochs):
# sub_ts_x, sub_ts_y = minibatch(full_set, random.randint(0, len(full_set) // 100), size=100)
sub_ts_x, sub_ts_y = minibatch(full_set, e % (len(full_set) // minibatch_size), size=100)
sess.run(train_step, feed_dict={x: sub_ts_x, y: sub_ts_y})
if e % 100 == 0:
train_accuracy = sess.run(accuracy, feed_dict={x: sub_ts_x, y: sub_ts_y})
print_stats(
{
'accuracy': last_accuracy
}, {
'accuracy': train_accuracy,
'epoch': e,
'epoch_total': epochs
})
last_accuracy = train_accuracy
if not IS_TRAINING:
break
# Save the learned parameters
for key in p:
file_name = key.replace('_', '.')
with open(MODEL_STORAGE_PATH + file_name, mode='wb') as fp:
serialize_matrix(sess.run(p[key]), fp)
if __name__ == '__main__':
main()
|
# Github Login
* Navigate to github login page "https://github.com/login"
* Verify page heading to be "Sign in to GitHub"
## SignIn to github account
* Enter user account creadentials
* Click to SignIn
* Verify landing page after signIn |
from pathlib import Path
import unittest
import numpy as np
from ibllib.pipes import histology
import ibllib.atlas as atlas
class TestHistology(unittest.TestCase):
def setUp(self) -> None:
self.brain_atlas = atlas.AllenAtlas(res_um=25)
self.path_tracks = Path(__file__).parent.joinpath('fixtures', 'histology', 'tracks')
def test_histology_get_brain_regions(self):
# first part of the test is to check on an actual track file
for file_track in self.path_tracks.rglob("*_pts.csv"):
xyz = histology.load_track_csv(file_track)
channels, ins = histology.get_brain_regions(xyz=xyz, brain_atlas=self.brain_atlas)
# also check that it works from an insertion
channels, ins2 = histology.get_brain_regions(xyz=ins.xyz, brain_atlas=self.brain_atlas)
self.assertTrue(channels.acronym[-1] == 'VISpm1')
self.assertTrue(channels.acronym[0] == 'APN')
a = np.array([ins.x, ins.y, ins.z, ins.phi, ins.theta, ins.depth])
b = np.array([ins2.x, ins2.y, ins2.z, ins2.phi, ins2.theta, ins2.depth])
self.assertTrue(np.all(np.isclose(a, b)))
def test_histology_insertion_from_track(self):
for file_track in self.path_tracks.rglob("*_pts.csv"):
xyz = histology.load_track_csv(file_track)
insertion = atlas.Insertion.from_track(xyz, brain_atlas=self.brain_atlas)
# checks that the tip coordinate is not the deepest point but its projection
self.assertFalse(np.all(np.isclose(insertion.tip, xyz[-1])))
def test_get_brain_exit_entry(self):
traj = atlas.Trajectory.fit(np.array([[0, 0, 0], [0, 0, 0.005]]))
top = atlas.Insertion.get_brain_entry(traj, brain_atlas=self.brain_atlas)
bottom = atlas.Insertion.get_brain_exit(traj, brain_atlas=self.brain_atlas)
self.brain_atlas.bc.nx
ix, iy = (self.brain_atlas.bc.x2i(0), self.brain_atlas.bc.y2i(0))
self.assertTrue(np.isclose(self.brain_atlas.top[iy, ix], top[2]))
self.assertTrue(np.isclose(self.brain_atlas.bottom[iy, ix], bottom[2]))
def test_filename_parser(self):
tdata = [
{'input': Path("/gna/electrode_tracks_SWC_014/2019-12-12_SWC_014_001_probe01_fit.csv"),
'output': {'date': '2019-12-12', 'experiment_number': 1, 'name': 'probe01',
'subject': 'SWC_014'}},
{'input': Path("/gna/datadisk/Data/Histology/"
"tracks/ZM_2407/2019-11-06_ZM_2407_001_probe_00_pts.csv"),
'output': {'date': '2019-11-06', 'experiment_number': 1, 'name': 'probe_00',
'subject': 'ZM_2407'}},
{'input': Path("/gna/2019-12-06_KS023_001_probe01_pts.csv"),
'output': {'date': '2019-12-06', 'experiment_number': 1, 'name': 'probe01',
'subject': 'KS023'}},
]
for t in tdata:
track_file = t['input']
assert t['output'] == histology._parse_filename(track_file)
|
def euler635():
# def A3(n):
# import itertools
# B = range(1,3*n+1)
# out = 0
# for i in itertools.combinations(B,n):
# if sum(i)%n==0:
# out+=1
# return out
# import cmath
# import mpmath
# def f(n):
# temp3 = 0
# for i in range(n):
# temp2 = 0
# for j in range(n):
# temp = 1
# for k in range(1,3*n):
# temp*=1+cmath.exp(2*cmath.pi*1j*(i+k*j)/n)
# temp2+=temp
# temp3+=temp2
# return temp3/(n*n)-1
def a2(n):
from sympy.ntheory import totient, divisors
from math import factorial
binomial = lambda n,k: factorial(n)/factorial(k)/factorial(n-k)
out = 0
for d in [1,n]:
out+=(totient(n/d)*binomial(2*d,d))%1000000009
return out*pow(n,10**9+7,10**9+9)
def a3(n):
from sympy.ntheory import totient, divisors
from math import factorial
binomial = lambda n,k: factorial(n)/factorial(k)/factorial(n-k)
out = 0
for d in [1,n]:
out+=(totient(n/d)*binomial(3*d,d))%1000000009
return out*pow(n,10**9+7,10**9+9)
choose2n_n = [1]
choose3n_n = [1]
def mod_inv(x,p):
return pow(x,p-2,p)
def load_binomial_coefficients(n):
import time
from fractions import Fraction
start = time.time()
i = 0
while len(choose2n_n)<n:
if i%10000==0: print i, time.time() - start
factor1 = 2*(2*i+1)
factor1 %= 10**9+9
factor1 *= mod_inv(i+1,10**9+9)
factor1 %= 10**9+9
factor2 = 3*(3*i+1)
factor2 %= 10**9+9
factor2 *= 3*i+2
factor2 %= 10**9+9
factor2 *= mod_inv(2*(i+1)*(2*i+1),10**9+9)
factor2 %= 10**9+9
choose2n_n.append((choose2n_n[-1]*factor1)%(10**9+9))
choose3n_n.append((choose3n_n[-1]*factor2)%(10**9+9))
i+=1
load_binomial_coefficients(10**8)
def s(n):
from sympy.ntheory import totient
from math import factorial
phi = n-1
binomial_term = choose2n_n[n]%(10**9+9)+choose3n_n[n]%(10**9+9)
out = (phi*(binomial_term)+5)
return (out*pow(n,10**9+7,10**9+9))%(10**9+9)
def S(L):
from sympy.ntheory.generate import primerange
import time
start = time.time()
primes = primerange(1,L)
out = 0
for p in primes:
if p%100==1: print p, time.time()-start
out = (out+s(p))%1000000009
return out
print S(10**8)
print euler635() |
n = int(input())
ans = 0
minDistModNot9 = float('inf')
minDistModNot9Reserve = float('inf')
for _ in range(n):
a, b, c = sorted([int(x) for x in input().split()])
ans += a + b
if c - b % 9 != 0:
minDistModNot9 = min(minDistModNot9, c - b)
if c - a % 9 != 0:
minDistModNot9Reserve = min(minDistModNot9Reserve, c - b)
if ans % 9 == 0:
if minDistModNot9 != float('inf'):
print(ans + minDistModNot9)
elif minDistModNot9Reserve != float('inf'):
print(ans + minDistModNot9Reserve)
else:
print(ans) |
def margeSort (list):
"""
This function sorts a list of numbers, and count the number of inversions done by the algorithm.
This function sort list of numbers, the function implement the marge sort algorithm,
dividing the list to two parts, sending thus lists to be sorted recursively and them marge the two parts in order.
:param list: the unsorted list
:return the sorted list, number of inversions done by the algorithms :
"""
sorted = []
n = len(list)
if (n <= 1):
return list, 0
middle = int(n/2)
left_list = list[0:middle]
right_list = list[middle:]
# send the two lists to sort
left_sorted, left_inv = margeSort(left_list)
right_sorted, right_inv = margeSort(right_list)
#merge the two lists
i = 0
j = 0
inv_count = left_inv + right_inv
for k in range(0,n):
if (j == len(right_sorted)):
sorted.append(left_sorted[i])
i = i+1
elif (i == len(left_sorted)):
sorted.append(right_sorted[j])
j = j+1
elif (left_sorted[i] < right_sorted[j]):
sorted.append (left_sorted[i])
i = i+1
else:
sorted.append(right_sorted[j])
inv_count = inv_count + (len(left_sorted) - i)
j = j+1
return sorted, inv_count
unsorted = [5,12,6,8,2,45,67,3,9,-12,4,24,35]
sorted, inversions = margeSort(unsorted)
print 'final list = ', sorted, 'and inversions count = ', inversions |
import collections
import json
import numpy as np
import typing
from smac.configspace import Configuration
from smac.tae.execute_ta_run import StatusType
from smac.utils.constants import MAXINT
__author__ = "Marius Lindauer"
__copyright__ = "Copyright 2015, ML4AAD"
__license__ = "3-clause BSD"
__maintainer__ = "Marius Lindauer"
__email__ = "lindauer@cs.uni-freiburg.de"
__version__ = "0.0.1"
RunKey = collections.namedtuple(
'RunKey', ['config_id', 'instance_id', 'seed'])
InstSeedKey = collections.namedtuple(
'InstSeedKey', ['instance', 'seed'])
RunValue = collections.namedtuple(
'RunValue', ['cost', 'time', 'status', 'additional_info'])
class RunHistory(object):
'''Container for target algorithm run information.
Guaranteed to be picklable.
Arguments
---------
aggregate_func: callable
function to aggregate perf across instances
'''
def __init__(self, aggregate_func):
# By having the data in a deterministic order we can do useful tests
# when we serialize the data and can assume it's still in the same
# order as it was added.
self.data = collections.OrderedDict()
# for fast access, we have also an unordered data structure
# to get all instance seed pairs of a configuration
self._configid_to_inst_seed = {}
self.config_ids = {} # config -> id
self.ids_config = {} # id -> config
self._n_id = 0
self.cost_per_config = {} # config_id -> cost
# runs_per_config is necessary for computing the moving average
self.runs_per_config = {} # config_id -> number of runs
self.aggregate_func = aggregate_func
def add(self, config, cost, time,
status, instance_id=None,
seed=None,
additional_info=None,
external_data: bool=False):
'''
adds a data of a new target algorithm (TA) run;
it will update data if the same key values are used
(config, instance_id, seed)
Attributes
----------
config : dict (or other type -- depending on config space module)
parameter configuration
cost: float
cost of TA run (will be minimized)
time: float
runtime of TA run
status: str
status in {SUCCESS, TIMEOUT, CRASHED, ABORT, MEMOUT}
instance_id: str
str representing an instance (default: None)
seed: int
random seed used by TA (default: None)
additional_info: dict
additional run infos (could include further returned
information from TA or fields such as start time and host_id)
external_data: bool
if True, run will not be added to self._configid_to_inst_seed
and not available through get_runs_for_config();
essentially, intensification will not see this run,
but the EPM still gets it
'''
config_id = self.config_ids.get(config)
if config_id is None:
self._n_id += 1
self.config_ids[config] = self._n_id
config_id = self.config_ids.get(config)
self.ids_config[self._n_id] = config
k = RunKey(config_id, instance_id, seed)
v = RunValue(cost, time, status, additional_info)
# Each runkey is supposed to be used only once. Repeated tries to add
# the same runkey will be ignored silently if not capped.
if self.data.get(k) is None:
self._add(k, v, status, external_data)
elif status != StatusType.CAPPED and self.data[k].status == StatusType.CAPPED:
# overwrite capped runs with uncapped runs
self._add(k, v, status, external_data)
elif status == StatusType.CAPPED and self.data[k].status == StatusType.CAPPED and cost > self.data[k].cost:
# overwrite if censored with a larger cutoff
self._add(k, v, status, external_data)
def _add(self, k, v, status, external_data):
'''
actual function to add new entry to data structures
'''
self.data[k] = v
if not external_data and status != StatusType.CAPPED:
# also add to fast data structure
is_k = InstSeedKey(k.instance_id, k.seed)
self._configid_to_inst_seed[
k.config_id] = self._configid_to_inst_seed.get(k.config_id, [])
self._configid_to_inst_seed[k.config_id].append(is_k)
# assumes an average across runs as cost function aggregation
self.incremental_update_cost(self.ids_config[k.config_id], v.cost)
def update_cost(self, config):
'''
store the performance of a configuration across the instances in self.cost_perf_config
and also updates self.runs_per_config;
uses self.aggregate_func
Arguments
--------
config: Configuration
configuration to update cost based on all runs in runhistory
'''
inst_seeds = set(self.get_runs_for_config(config))
perf = self.aggregate_func(config, self, inst_seeds)
config_id = self.config_ids[config]
self.cost_per_config[config_id] = perf
self.runs_per_config[config_id] = len(inst_seeds)
def compute_all_costs(self, instances: typing.List[str]=None):
'''
computes the cost of all configurations from scratch
and overwrites self.cost_perf_config and self.runs_per_config accordingly;
Arguments
---------
instances: typing.List[str]
list of instances; if given, cost is only computed wrt to this instance set
'''
self.cost_per_config = {}
self.runs_per_config = {}
for config, config_id in self.config_ids.items():
inst_seeds = set(self.get_runs_for_config(config))
if instances is not None:
inst_seeds = list(
filter(lambda x: x.instance in instances, inst_seeds))
if inst_seeds: # can be empty if never saw any runs on <instances>
perf = self.aggregate_func(config, self, inst_seeds)
self.cost_per_config[config_id] = perf
self.runs_per_config[config_id] = len(inst_seeds)
def incremental_update_cost(self, config: Configuration, cost: float):
'''
incrementally updates the performance of a configuration by using a moving average;
Arguments
--------
config: Configuration
configuration to update cost based on all runs in runhistory
cost: float
cost of new run of config
'''
config_id = self.config_ids[config]
n_runs = self.runs_per_config.get(config_id, 0)
old_cost = self.cost_per_config.get(config_id, 0.)
self.cost_per_config[config_id] = (
(old_cost * n_runs) + cost) / (n_runs + 1)
self.runs_per_config[config_id] = n_runs + 1
def get_cost(self, config):
'''
returns empirical cost for a configuration;
uses self.cost_per_config
'''
config_id = self.config_ids[config]
return self.cost_per_config.get(config_id, np.nan)
def get_runs_for_config(self, config):
"""Return all runs (instance seed pairs) for a configuration.
Parameters
----------
config : Configuration from ConfigSpace
parameter configuration
Returns
----------
list: tuples of instance, seed
"""
config_id = self.config_ids.get(config)
return self._configid_to_inst_seed.get(config_id, [])
def get_all_configs(self):
""" Return all configurations in this RunHistory object
Returns
-------
list: parameter configurations
"""
return list(self.config_ids.keys())
def empty(self):
"""
Check whether or not the RunHistory is empty.
Returns
----------
bool: True if runs have been added to the RunHistory,
False otherwise
"""
return len(self.data) == 0
def save_json(self, fn="runhistory.json"):
'''
saves runhistory on disk
Parameters
----------
fn : str
file name
'''
class EnumEncoder(json.JSONEncoder):
"""
custom encoder for enum-serialization
(implemented for StatusType from tae/execute_ta_run)
locally defined because only ever needed here.
using encoder implied using object_hook defined in StatusType
to deserialize from json.
"""
def default(self, obj):
if isinstance(obj, StatusType):
return {"__enum__": str(obj)}
return json.JSONEncoder.default(self, obj)
configs = {id_: conf.get_dictionary()
for id_, conf in self.ids_config.items()}
data = [([int(k.config_id),
str(k.instance_id) if k.instance_id is not None else None,
int(k.seed)], list(v))
for k, v in self.data.items()]
with open(fn, "w") as fp:
json.dump({"data": data,
"configs": configs}, fp, cls=EnumEncoder)
def load_json(self, fn, cs):
"""Load and runhistory in json representation from disk.
Overwrites current runthistory!
Parameters
----------
fn : str
file name to load from
cs : ConfigSpace
instance of configuration space
"""
with open(fn) as fp:
all_data = json.load(fp, object_hook=StatusType.enum_hook)
self.ids_config = {int(id_): Configuration(cs, values=values)
for id_, values in all_data["configs"].items()}
self.config_ids = {config: id_ for id_, config in self.ids_config.items()}
self._n_id = len(self.config_ids)
# important to use add method to use all data structure correctly
for k, v in all_data["data"]:
self.add(config=self.ids_config[int(k[0])],
cost=float(v[0]),
time=float(v[1]),
status=StatusType(v[2]),
instance_id=k[1],
seed=int(k[2]),
additional_info=v[3])
def update_from_json(self, fn, cs):
"""Update the current runhistory by adding new runs from a json file.
Parameters
----------
fn : str
file name to load from
cs : ConfigSpace
instance of configuration space
"""
new_runhistory = RunHistory(self.aggregate_func)
new_runhistory.load_json(fn, cs)
self.update(runhistory=new_runhistory)
def update(self, runhistory, external_data: bool=False):
"""Update the current runhistory by adding new runs from a json file.
Parameters
----------
runhistory: RunHistory
runhistory with additional data to be added to self
external_data: bool
if True, run will not be added to self._configid_to_inst_seed
and not available through get_runs_for_config()
"""
# Configurations might be already known, but by a different ID. This
# does not matter here because the add() method handles this
# correctly by assigning an ID to unknown configurations and re-using
# the ID
for key, value in runhistory.data.items():
config_id, instance_id, seed = key
cost, time, status, additional_info = value
config = runhistory.ids_config[config_id]
self.add(config=config, cost=cost, time=time,
status=status, instance_id=instance_id,
seed=seed, additional_info=additional_info,
external_data=external_data)
|
# Generated by Django 3.1.1 on 2020-09-23 04:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cus_ID', models.CharField(max_length=100)),
('cus_PW', models.CharField(max_length=100)),
('cus_name', models.CharField(max_length=100)),
('cus_address', models.CharField(max_length=500)),
('cus_RRN', models.CharField(max_length=6)),
('cus_phone_number', models.CharField(max_length=13)),
],
),
]
|
"""
Minion's bored game
===================
There you have it. Yet another pointless "bored" game created by the bored minions of Professor Boolean.
The game is a single player game, played on a board with n squares in a horizontal row. The minion places a token on the left-most square and rolls a special three-sided die.
If the die rolls a "Left", the minion moves the token to a square one space to the left of where it is currently. If there is no square to the left, the game is invalid, and you start again.
If the die rolls a "Stay", the token stays where it is.
If the die rolls a "Right", the minion moves the token to a square, one space to the right of where it is currently. If there is no square to the right, the game is invalid and you start again.
The aim is to roll the dice exactly t times, and be at the rightmost square on the last roll. If you land on the rightmost square before t rolls are done then the only valid dice roll is to roll a "Stay". If you roll anything else, the game is invalid (i.e., you cannot move left or right from the rightmost square).
To make it more interesting, the minions have leaderboards (one for each n,t pair) where each minion submits the game he just played: the sequence of dice rolls. If some minion has already submitted the exact same sequence, they cannot submit a new entry, so the entries in the leader-board correspond to unique games playable.
Since the minions refresh the leaderboards frequently on their mobile devices, as an infiltrating hacker, you are interested in knowing the maximum possible size a leaderboard can have.
Write a function answer(t, n), which given the number of dice rolls t, and the number of squares in the board n, returns the possible number of unique games modulo 123454321. i.e. if the total number is S, then return the remainder upon dividing S by 123454321, the remainder should be an integer between 0 and 123454320 (inclusive).
n and t will be positive integers, no more than 1000. n will be at least 2.
Languages
=========
To provide a Python solution, edit solution.py
To provide a Java solution, edit solution.java
Test cases
==========
Inputs:
(int) t = 1
(int) n = 2
Output:
(int) 1
Inputs:
(int) t = 3
(int) n = 2
Output:
(int) 3
"""
import sys
sys.setrecursionlimit(5000)
def answer(t, n):
"""
t - number of steps, n - length of the board
"""
cache = {}
def rec(remaining_steps, position):
# Check if game is valid
if position < 1 or position > n or position - remaining_steps > 1:
return 0
# base condition, if we are at the end and the steps are exhausted
if position == 1:
return 1
else:
return (mrec(remaining_steps-1, position) + mrec(remaining_steps-1, position+1) + mrec(remaining_steps-1, position-1)) % 123454321
def mrec(remaining_steps, position):
"""
memoize with hash table
"""
args = remaining_steps, position
if args in cache:
return cache[args]
else:
# we already store remaining_steps and position in args tuple
retval = cache[args] = rec(*args)
return retval
return mrec(t, n)
|
import math
x=raw_input('enter the coordinate x\n')
y=raw_input('enter the coordinate y\n')
x=float(x)
y=float(y)
s=math.atan(y/x)/math.pi*180
print 'the angle in degrees is', s
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-11 20:17
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('bike_auth', '0001_initial'),
('estacionamento', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Parada',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('nome', models.CharField(blank=True, max_length=100, null=True)),
('avaliacao', models.IntegerField(default=0)),
('local', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('comentario', models.TextField(blank=True, null=True)),
('foto', models.TextField(blank=True, null=True)),
('token', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paradas', to='bike_auth.Token')),
],
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 11:26:57 2019
@author: oghinde
"""
import sys
home = str(Path.home())
sys.path.append(home + '/Git/Clusterwise_Linear_Model/')
sys.path.append(home + '/Git/Utilities/')
import numpy as np
from normalization.time_series_normalizer import TimeSeriesNormalizer
import matplotlib
import matplotlib.pyplot as plt
N = 5
L = 100
new_min = 0
new_max = 1
perc_low = 5
perc_high = 90
amps = 100*np.random.rand(N, 1)
means = 100*np.random.randn(N, 1)
noise = 20*np.random.randn(N, L)
X = np.sin(np.linspace(-10, 10, L))[np.newaxis, :]
X_tr = np.dot(amps, X) + means + noise
y_tr = np.random.randn(5, 1)
tr_normalizer = TimeSeriesNormalizer(new_min=new_min,
new_max=new_max,
perc_low=perc_low,
perc_high=perc_high)
X_norm, y_norm = tr_normalizer.normalize(X_tr, y_tr)
X_denorm = tr_normalizer.denormalize(X_norm)
plt.plot(X_tr.T)
plt.title('Original')
plt.show()
plt.plot(X_norm.T)
plt.title('Normalized')
plt.show()
plt.plot(X_denorm.T)
plt.title('Denormalized')
plt.show()
|
from django.db import models
# Create your models here.
class Patient(models.Model):
name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
address = models.CharField(max_length=128)
dob = models.DateField(auto_now=False, auto_now_add=False)
phone_number = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Doctor(models.Model):
name = models.CharField(max_length=200, null=True)
email = models.EmailField(max_length=254)
address = models.CharField(max_length=128)
phone_number = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Opthalmologist(models.Model):
name = models.CharField(max_length=200, null=True)
email = models.EmailField(max_length=254)
address = models.CharField(max_length=128)
phone_number = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Patienthistory(models.Model):
STATUS = (
('Recovered', 'Recovered'),
('Not Recovered', 'Not Recovered')
)
name = models.CharField(max_length=200, null=True)
disease = models.CharField(max_length=200, null=True)
disease_doctor = models.CharField(max_length=200, null=True)
recovery_status = models.CharField(
max_length=200, null=True, choices=STATUS)
prescriptions = models.CharField(max_length=200)
def __str__(self):
return self.name
|
#coding=utf-8
import os
from mysite.iclock.models import USER_SPEDAY,USER_SPEDAY_DETAILS
from django.conf import settings
from mysite.utils import getJSResponse
from django.utils.translation import ugettext_lazy as _
def fileDelete(request,ModelName):
if ModelName=='USER_SPEDAY':
keys = request.POST.getlist("K")
return del_spedy_file(keys)
def del_spedy_file(keys):
path = '%s%s/'%(settings.ADDITION_FILE_ROOT,'userSpredyFile')
files = USER_SPEDAY_DETAILS.objects.filter(USER_SPEDAY_ID__pk__in=keys).values_list('file',flat=True)
try:
for file in files:
tmp_path = path + file
if os.path.exists(tmp_path):
os.remove(tmp_path)
USER_SPEDAY_DETAILS.objects.filter(USER_SPEDAY_ID__pk__in=keys).update(file='')
except:
return getJSResponse({"ret":1,"message":u'%s'%_(u'删除失败')})
return getJSResponse({"ret":0,"message":u'%s'%_(u'删除成功')})
|
from bs4 import BeautifulSoup
import requests
import sqlite3
import datetime
location = 'atm_numbers.sqlite'
table_name = 'currency'
conn = sqlite3.connect(location)
c = conn.cursor()
sql_create = 'create table if not exists ' + table_name + ' (date text, currency text, buy text, sell text, nbu text)'
# sql_drop = 'drop table ' + table_name
c.execute(sql_create)
conn.commit()
result = requests.get("https://credit-agricole.ua/press/exchange-rates")
data = result.content
soup = BeautifulSoup(data, "html.parser")
now = datetime.datetime.now()
date = str(now.strftime("%d%m%Y"))
# print(date)
table = soup.find('tbody')
# print(table)
for row in table.find_all('tr')[1:]:
# Create a variable of all the <td> tag pairs in each <tr> tag pair,
col = row.find_all('td')
currency = col[0].string.strip()
buy = col[1].string.strip()
sell = col[2].string.strip()
nbu = col[3].string.strip()
c.execute('insert into ' + table_name + ' (date, currency, buy, sell, nbu) values (?, ?, ?, ?, ?);',
(date, currency, buy, sell, nbu))
conn.commit()
c.close()
conn.close()
|
import queue
n = int(input())
m = int(input())
graph = [[] for i in range(n+1)]
for _ in range(m):
node1, node2 = map(int, input().split())
graph[node1].append(node2)
graph[node2].append(node1)
visited = [False] * (n+1)
q = queue.Queue()
q.put(1)
visited[1] = True
count = 0
while q.qsize() > 0:
currentNode = q.get()
for n in graph[currentNode]:
if visited[n] == False:
visited[n] = True
q.put(n)
count += 1
print(count) |
from django.shortcuts import render
from flask import request
# Create your views here.
if request.method == 'POST' and 'run_script' in request.POST:
# import function to run
from .py_code.hello.py import *
# call function
def print_some ():
# return user to required page
return HttpResponseRedirect(reverse("adi")) |
from django import forms
from ticketingApps.models import *
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from datetime import datetime, timedelta
from django.utils import timezone
class AddMovieForm(ModelForm):
class Meta:
model=Movie
fields=['movietitle','movieruntime','movierating','moviereleasedate','moviegenre','moviedescription','poster']
labels={
'movietitle':'Title',
'movieruntime':"Runtime (minutes)",
'movierating':'MPAA Rating',
'moviereleasedate':"Release date",
'moviegenre':"Genre",
'moviedescription':"Description"
}
yearNow = datetime.now().year
yearsT = (str(yearNow), str(yearNow+1),str(yearNow+2))+(tuple(map(str, range(1900,yearNow))))
widgets = {'moviereleasedate':forms.SelectDateWidget(years=yearsT)}
class AddRoomForm(ModelForm):
class Meta:
model=Room
fields=['roomnumber','rows','columns']
labels={
'roomnumber':'Room Number',
'rows':'Number of Rows of Seats',
'columns':"Number of Seats per Row"
}
class RoomForShowingField(forms.ModelChoiceField):
def label_from_instance(self,obj):
return "Room #%i" % obj.roomnumber
class MovieForShowingField(forms.ModelChoiceField):
def label_from_instance(self,obj):
return obj.movietitle
class PricingForShowingField(forms.ModelChoiceField):
def label_from_instance(self,obj):
return obj.name
class AddShowingForm(ModelForm):
room = RoomForShowingField(queryset=Room.objects.none())
movie = MovieForShowingField(queryset=Movie.objects.order_by('-moviereleasedate'))
pricing = PricingForShowingField(queryset=PricingGroup.objects.none())
class Meta:
model=Movieshowing
fields=['room','movie','time','pricing']
labels={
'time':'Date and time (mm/dd/yyyy hh:mm)'
}
class GroupForPriceField(forms.ModelChoiceField):
def label_from_instance(self,obj):
return obj.name
class AddPricePointForm(ModelForm):
group = GroupForPriceField(queryset=PricingGroup.objects.none())
class Meta:
model=PricePoint
fields='__all__'
class TheaterForPromoField(forms.ModelChoiceField):
def label_from_instance(self,obj):
return obj.theatername
class CreatePromoCodeForm(ModelForm):
theater = TheaterForPromoField(queryset=Theater.objects.none())
class Meta:
model=Promocode
fields='__all__'
class TheaterForm(ModelForm):
class Meta:
model=Theater
fields=['theatername','theaterstreet','theatercity','theaterstate','theaterzip']
labels={
'theaterstreet':'Street',
'theatername':'Name',
'theatercity':'City',
'theaterstate':"State",
"theaterzip":"Zip code",
'price':"Price per Ticket"
}
class SignupForm(UserCreationForm):
isemployee=forms.BooleanField(required=False,label="I am registering as a theater employee")
userphone=forms.CharField(label="Phone Number")
class Meta:
model=User
fields=['email','username','password1','password2','isemployee','first_name','last_name','userphone']
labels={
'email':'Email',
'username':'Username',
'password1':'Password',
'password2':'Confirm Password',
#'isemployee':"I am registering as a theater employee",
'first_name':"First Name",
'last_name':"Last Name",
#'userphone':'Phone Number',
}
def save(self, commit=True):
if not commit:
raise NotImplementedError("Can't create User and UserProfile without database save")
user = super(SignupForm, self).save(commit=True)
profile = user.profile
profile.isemployee=self.cleaned_data['isemployee']
profile.userphone=self.cleaned_data['userphone']
user.save()
profile.save()
return user, profile
class TicketTypeForm(forms.Form):
promocode = forms.CharField(label="Promo Code", required=False)
def __init__(self,pricingList,numberTix,*args,**kwargs):
super(TicketTypeForm, self).__init__(*args, **kwargs)
self.pList = pricingList
self.numTix=numberTix
for price in self.pList:
keyF = price.name
self.fields[keyF] = forms.ChoiceField(choices=((0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10)))
def clean(self):
cleaned_data = super().clean()
numTix =0
for price in self.pList:
numTix=numTix+int(cleaned_data.get(price.name))
if numTix is not self.numTix:
raise forms.ValidationError(
"The number of seats chosen does not match number of seats assigned to a ticket type."
)
return cleaned_data
class AssociateEmployeeForm(forms.Form):
username = forms.CharField(label="Username of New Employee")
def clean(self):
cleaned_data = super().clean()
matching = Profile.objects.filter(user__username=cleaned_data.get('username'))
if matching.count() is not 1:
raise forms.ValidationError("No matching user was found.")
if not matching.first().isemployee:
raise forms.ValidationError("The matching user is not an employee")
return cleaned_data
|
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from torchtext import data
from torchtext.vocab import pretrained_aliases, Vocab
from transformers import (BertConfig, BertForSequenceClassification, BertTokenizer)
from trainer import LSTMTrainer
from utils import set_seed, load_data, spacy_tokenizer
class MultiChannelEmbedding(nn.Module):
def __init__(self, vocab_size, embed_size, filters_size=64, filters=[2, 4, 6], dropout_rate=0.0):
super().__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
self.filters_size = filters_size
self.filters = filters
self.dropout_rate = dropout_rate
self.embedding = nn.Embedding(self.vocab_size, self.embed_size)
self.conv1 = nn.ModuleList([
nn.Conv1d(self.embed_size, filters_size, kernel_size=f, padding=f//2)
for f in filters
])
self.act = nn.Sequential(
nn.ReLU(inplace=True),
#nn.Dropout(p=dropout_rate)
)
def init_embedding(self, weight):
self.embedding.weight = nn.Parameter(weight.to(self.embedding.weight.device))
def forward(self, x):
x = x.transpose(0, 1)
x = self.embedding(x).transpose(1, 2)
channels = []
for c in self.conv1:
channels.append(c(x))
x = F.relu(torch.cat(channels, 1))
x = x.transpose(1, 2).transpose(0, 1)
return x
class BiLSTMClassifier(nn.Module):
def __init__(self, num_classes, vocab_size, embed_size, lstm_hidden_size, classif_hidden_size,
lstm_layers=1, dropout_rate=0.0, use_multichannel_embedding=False):
super().__init__()
self.vocab_size = vocab_size
self.lstm_hidden_size = lstm_hidden_size
self.use_multichannel_embedding = use_multichannel_embedding
if self.use_multichannel_embedding:
self.embedding = MultiChannelEmbedding(self.vocab_size, embed_size, dropout_rate=dropout_rate)
self.embed_size = len(self.embedding.filters) * self.embedding.filters_size
else:
self.embedding = nn.Embedding(self.vocab_size, embed_size)
self.embed_size = embed_size
self.lstm = nn.LSTM(self.embed_size, self.lstm_hidden_size, lstm_layers, bidirectional=True, dropout=dropout_rate)
self.classifier = nn.Sequential(
nn.Linear(lstm_hidden_size*2, classif_hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout_rate),
nn.Linear(classif_hidden_size, num_classes)
)
def init_embedding(self, weight):
if self.use_multichannel_embedding:
self.embedding.init_embedding(weight)
else:
self.embedding.weight = nn.Parameter(weight.to(self.embedding.weight.device))
def forward(self, seq, length):
# TODO use sort_within_batch?
# Sort batch
seq_size, batch_size = seq.size(0), seq.size(1)
length_perm = (-length).argsort()
length_perm_inv = length_perm.argsort()
seq = torch.gather(seq, 1, length_perm[None, :].expand(seq_size, batch_size))
length = torch.gather(length, 0, length_perm)
# Pack sequence
seq = self.embedding(seq)
seq = pack_padded_sequence(seq, length)
# Send through LSTM
features, hidden_states = self.lstm(seq)
# Unpack sequence
features = pad_packed_sequence(features)[0]
# Separate last dimension into forward/backward features
features = features.view(seq_size, batch_size, 2, -1)
# Index to get forward and backward features and concatenate
# Gather last word for each sequence
last_indexes = (length - 1)[None, :, None, None].expand((1, batch_size, 2, features.size(-1)))
forward_features = torch.gather(features, 0, last_indexes)
# Squeeze seq dimension, take forward features
forward_features = forward_features[0, :, 0]
# Take first word, backward features
backward_features = features[0, :, 1]
features = torch.cat((forward_features, backward_features), -1)
# Send through classifier
logits = self.classifier(features)
# Invert batch permutation
logits = torch.gather(logits, 0, length_perm_inv[:, None].expand((batch_size, logits.size(-1))))
return logits, hidden_states
def save_bilstm(model, output_dir):
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
torch.save(model.state_dict(), os.path.join(output_dir, "weights.pth"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, required=True, help="Directory containing the dataset.")
parser.add_argument("--output_dir", type=str, required=True, help="Directory where to save the model.")
parser.add_argument("--augmented", action="store_true", help="Wether to use the augmented dataset for knowledge distillation")
parser.add_argument("--use_teacher", action="store_true", help="Use scores from BERT as labels")
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--lr", type=float, default=5e-5, help="Learning rate.")
parser.add_argument("--lr_schedule", type=str, choices=["constant", "warmup", "cyclic"],
help="Schedule to use for the learning rate. Choices are: constant, linear warmup & decay, cyclic.")
parser.add_argument("--warmup_steps", type=int, default=0,
help="Warmup steps for the 'warmup' learning rate schedule. Ignored otherwise.")
parser.add_argument("--epochs_per_cycle", type=int, default=1,
help="Epochs per cycle for the 'cyclic' learning rate schedule. Ignored otherwise.")
parser.add_argument("--do_train", action="store_true")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--checkpoint_interval", type=int, default=-1)
parser.add_argument("--no_cuda", action="store_true")
args = parser.parse_args()
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
device = torch.device("cuda" if not args.no_cuda and torch.cuda.is_available() else "cpu")
set_seed(args.seed)
train_dataset, valid_dataset, text_field = load_data(args.data_dir, spacy_tokenizer, augmented=args.augmented, use_teacher=args.use_teacher)
vocab = text_field.vocab
model = BiLSTMClassifier(2, len(vocab.itos), vocab.vectors.shape[-1],
lstm_hidden_size=300, classif_hidden_size=400, dropout_rate=0.15).to(device)
# Initialize word embeddings to fasttext
model.init_embedding(vocab.vectors.to(device))
trainer = LSTMTrainer(model, device,
loss="mse" if args.augmented or args.use_teacher else "cross_entropy",
train_dataset=train_dataset, val_dataset=valid_dataset, val_interval=250,
checkpt_interval=args.checkpoint_interval,
checkpt_callback=lambda m, step: save_bilstm(m, os.path.join(args.output_dir, "checkpt_%d" % step)),
batch_size=args.batch_size, gradient_accumulation_steps=args.gradient_accumulation_steps,
lr=args.lr)
if args.do_train:
trainer.train(args.epochs, schedule=args.lr_schedule,
warmup_steps=args.warmup_steps, epochs_per_cycle=args.epochs_per_cycle)
print("Evaluating model:")
print(trainer.evaluate())
save_bilstm(model, args.output_dir)
|
""" Generic test data """
from datetime import datetime
from uuid import UUID
import sqlalchemy
from sqlalchemy.orm import Session
from sqlalchemy.types import CHAR
from pydantic import BaseModel, PositiveInt, constr
from fastapi_sqlalchemy import models
PEOPLE_DATA = [
{"name": "alice", "order": 1, "gender": "F", "age": 32},
{"name": "bob", "order": 2, "gender": "M", "age": 22},
{"name": "charlie", "order": 3, "gender": "M", "age": 60},
{"name": "david", "order": 4, "gender": "M", "age": 32},
]
class Person(models.BASE, models.GuidMixin, models.TimestampMixin):
__tablename__ = "people"
name = sqlalchemy.Column(
sqlalchemy.String(255),
nullable=False,
unique=True
)
order = sqlalchemy.Column(
sqlalchemy.Integer,
nullable=False,
unique=True
)
gender = sqlalchemy.Column(
CHAR(1),
nullable=False
)
age = sqlalchemy.Column(
sqlalchemy.Integer,
nullable=False
)
class PersonRequestModel(BaseModel):
name: constr(max_length=255)
order: int
gender: constr(min_length=1, max_length=1)
age: PositiveInt
class PersonResponseModel(PersonRequestModel):
id: UUID
created_at: datetime
updated_at: datetime
def load_people(session: Session):
people = []
for data in PEOPLE_DATA:
person = Person(**data)
people.append(person)
session.add_all(people)
session.commit()
assert len(people) == len(PEOPLE_DATA)
return people
|
#!/usr/bin/env python
"""Script to generate NIRISS SIAF content and files using pysiaf and flight-like SIAF reference files
Authors
-------
Johannes Sahlmann
References
----------
Parts of the code were adapted from Colin Cox' makeSIAF.py
For a detailed description of the NIRISS SIAF, the underlying reference files, and the
transformations, see Goudfrooij & Cox, 2018: The Pre-Flight SI Aperture File, Part 5: NIRISS
(JWST-STScI-006317).
"""
from collections import OrderedDict
import os
import numpy as np
import pysiaf
from pysiaf.utils import tools, compare
from pysiaf.constants import JWST_SOURCE_DATA_ROOT, JWST_TEMPORARY_DATA_ROOT, \
JWST_DELIVERY_DATA_ROOT
from pysiaf import iando
import generate_reference_files
#############################
instrument = 'NIRISS'
test_dir = os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'generate_test')
# regenerate SIAF reference files if needed
regenerate_basic_reference_files = False
if regenerate_basic_reference_files:
# generate_siaf_detector_layout()
# generate_reference_files.generate_initial_siaf_aperture_definitions(instrument)
# generate_siaf_detector_reference_file(instrument)
# generate_siaf_ddc_mapping_reference_file(instrument)
distortion_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,
'niriss_astrometric_coefficients_august_2016_with_header.txt')
generate_reference_files.generate_siaf_pre_flight_reference_files_niriss(distortion_file_name)
# DDC name mapping
_ddc_apername_mapping = iando.read.read_siaf_ddc_mapping_reference_file(instrument)
# NIRISS detector parameters, e.g. XDetSize
siaf_detector_parameters = iando.read.read_siaf_detector_reference_file(instrument)
detector_layout = iando.read.read_siaf_detector_layout()
siaf_alignment_parameters = iando.read.read_siaf_alignment_parameters(instrument)
# Fundamental aperture definitions: names, types, reference positions, dependencies
siaf_aperture_definitions = iando.read.read_siaf_aperture_definitions(instrument)
aperture_dict = {}
aperture_name_list = siaf_aperture_definitions['AperName'].tolist()
for AperName in aperture_name_list:
# child aperture to be constructed
aperture = pysiaf.JwstAperture()
aperture.AperName = AperName
aperture.InstrName = siaf_detector_parameters['InstrName'][0].upper() # all capitals. OK
aperture.XDetSize = siaf_detector_parameters['XDetSize'][0]
aperture.YDetSize = siaf_detector_parameters['YDetSize'][0]
aperture.AperShape = siaf_detector_parameters['AperShape'][0]
aperture.DetSciParity = 1
aperture_definitions_index = siaf_aperture_definitions['AperName'].tolist().index(AperName)
# Retrieve basic aperture parameters from definition files
for attribute in 'XDetRef YDetRef AperType XSciSize YSciSize XSciRef YSciRef'.split():
setattr(aperture, attribute, siaf_aperture_definitions[attribute][aperture_definitions_index])
if siaf_aperture_definitions['AperType'][aperture_definitions_index] == 'OSS':
aperture.DetSciYAngle = 0
aperture.DetSciParity = 1
aperture.VIdlParity = 1 # -> move to NIS_CEN aperture
if AperName in ['NIS_CEN', 'NIS_CEN_OSS']:
if AperName in detector_layout['AperName']:
detector_layout_index = detector_layout['AperName'].tolist().index(AperName)
for attribute in 'DetSciYAngle DetSciParity VIdlParity'.split():
setattr(aperture, attribute, detector_layout[attribute][detector_layout_index])
index = siaf_alignment_parameters['AperName'].tolist().index(AperName)
aperture.V3SciYAngle = siaf_alignment_parameters['V3SciYAngle'][index]
aperture.V3SciXAngle = siaf_alignment_parameters['V3SciXAngle'][index]
aperture.V3IdlYAngle = siaf_alignment_parameters['V3IdlYAngle'][index]
# aperture.V3IdlYAngle = tools.v3sciyangle_to_v3idlyangle(aperture.V3SciYAngle)
for attribute_name in 'V2Ref V3Ref'.split():
setattr(aperture, attribute_name, siaf_alignment_parameters[attribute_name][index])
polynomial_coefficients = iando.read.read_siaf_distortion_coefficients(instrument, AperName)
number_of_coefficients = len(polynomial_coefficients)
polynomial_degree = int((np.sqrt(8 * number_of_coefficients + 1) - 3) / 2)
# set polynomial coefficients
siaf_indices = ['{:02d}'.format(d) for d in polynomial_coefficients['siaf_index'].tolist()]
for i in range(polynomial_degree + 1):
for j in np.arange(i + 1):
row_index = siaf_indices.index('{:d}{:d}'.format(i, j))
for colname in 'Sci2IdlX Sci2IdlY Idl2SciX Idl2SciY'.split():
setattr(aperture, '{}{:d}{:d}'.format(colname, i, j), polynomial_coefficients[colname][row_index])
else:
aperture.DetSciYAngle = 180
aperture.VIdlParity = -1
aperture.Sci2IdlDeg = polynomial_degree
aperture_dict[AperName] = aperture
# second pass to set parameters for apertures that depend on other apertures
# calculations emulate the Cox' Excel worksheets as described in JWST-01550
# NIRISS is the same as FGS
for AperName in aperture_name_list:
index = siaf_aperture_definitions['AperName'].tolist().index(AperName)
aperture = aperture_dict[AperName]
if (siaf_aperture_definitions['parent_apertures'][index] is not None) and (siaf_aperture_definitions['dependency_type'][index] == 'default'):
aperture._parent_apertures = siaf_aperture_definitions['parent_apertures'][index]
parent_aperture = aperture_dict[aperture._parent_apertures]
aperture.V3SciYAngle = parent_aperture.V3SciYAngle
aperture.V3SciXAngle = parent_aperture.V3SciXAngle
aperture.V3IdlYAngle = tools.v3sciyangle_to_v3idlyangle(aperture.V3SciYAngle)
aperture = tools.set_reference_point_and_distortion(instrument, aperture, parent_aperture)
aperture.complement()
aperture.Comment = None
aperture.UseAfterDate = '2014-01-01'
aperture_dict[AperName] = aperture
# sort SIAF entries in the order of the aperture definition file
aperture_dict = OrderedDict(sorted(aperture_dict.items(), key=lambda t: aperture_name_list.index(t[0])))
# third pass to set DDCNames apertures, which depend on other apertures
ddc_siaf_aperture_names = np.array([key for key in _ddc_apername_mapping.keys()])
ddc_v2 = np.array([aperture_dict[aperture_name].V2Ref for aperture_name in ddc_siaf_aperture_names])
ddc_v3 = np.array([aperture_dict[aperture_name].V3Ref for aperture_name in ddc_siaf_aperture_names])
for AperName in aperture_name_list:
separation_tel_from_ddc_aperture = np.sqrt((aperture_dict[AperName].V2Ref - ddc_v2)**2 +
(aperture_dict[AperName].V3Ref - ddc_v3)**2)
aperture_dict[AperName].DDCName = _ddc_apername_mapping[ddc_siaf_aperture_names[np.argmin(
separation_tel_from_ddc_aperture)]]
######################################
# SIAF content generation finished
######################################
aperture_collection = pysiaf.ApertureCollection(aperture_dict)
emulate_delivery = True
if emulate_delivery:
pre_delivery_dir = os.path.join(JWST_DELIVERY_DATA_ROOT, instrument)
if not os.path.isdir(pre_delivery_dir):
os.makedirs(pre_delivery_dir)
# write the SIAF files to disk
filenames = pysiaf.iando.write.write_jwst_siaf(aperture_collection, basepath=pre_delivery_dir,
file_format=['xml', 'xlsx'])
pre_delivery_siaf = pysiaf.Siaf(instrument, basepath=pre_delivery_dir)
compare_against_prd = True
compare_against_cdp7b = True
# print('\nRunning regression test of pre_delivery_siaf against test_data:')
# None yet
for compare_to in [pysiaf.JWST_PRD_VERSION]:
if compare_to == 'cdp7b':
ref_siaf = pysiaf.Siaf(instrument,
filename=os.path.join(pre_delivery_dir, 'NIRISS_SIAF_cdp7b.xml'))
else:
# compare new SIAF with PRD version
ref_siaf = pysiaf.Siaf(instrument)
tags = {'reference': compare_to, 'comparison': 'pre_delivery'}
compare.compare_siaf(pre_delivery_siaf, reference_siaf_input=ref_siaf,
fractional_tolerance=1e-6, report_dir=pre_delivery_dir, tags=tags)
compare.compare_transformation_roundtrip(pre_delivery_siaf,
reference_siaf_input=ref_siaf, tags=tags,
report_dir=pre_delivery_dir, )
compare.compare_inspection_figures(pre_delivery_siaf, reference_siaf_input=ref_siaf,
report_dir=pre_delivery_dir, tags=tags,
xlimits=(-210, -370), ylimits=(-780, -615))
# run some tests on the new SIAF
from pysiaf.tests import test_aperture
print('\nRunning aperture_transforms test for pre_delivery_siaf')
test_aperture.test_jwst_aperture_transforms([pre_delivery_siaf], verbose=False, threshold=0.04)
print('\nRunning aperture_vertices test for pre_delivery_siaf')
test_aperture.test_jwst_aperture_vertices([pre_delivery_siaf])
else:
test_dir = os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'generate_test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
# write the SIAFXML to disk
[filename] = pysiaf.iando.write.write_jwst_siaf(aperture_collection, basepath=test_dir,
file_format=['xml'])
print('SIAFXML written in {}'.format(filename))
|
#Programmer: Daiwei Li
#Date: 2019-04-28
import matplotlib.pyplot as plt
import numpy as np
#import pandas as pd
import csv
def read_csv(file_name):
with open('data.csv') as f:
reader = csv.reader(f)
# eliminate blank rows if they exist
rows = [row for row in reader if row]
headings = rows[0]
data = []
for r in rows[1:]:
r = [x for x in r]
d = dict(zip(headings, r))
data.append(d)
return data
def find_data(data, bullet):
raw_data = data
x = []#data of bullet
y = []#data of target which is potential
for index in range(len(raw_data)):
if raw_data[index][bullet] == "":
raw_data[index][bullet] = "0"
x.append(float(raw_data[index][bullet]))
y.append(float(raw_data[index]["Potential"])-float(raw_data[index]["Overall"]))
return x,y
def scattering_visualization(x, y, bullet):
plt.xlabel(bullet)
plt.ylabel("Real_Potential")
plt.scatter(np.array(x), np.array(y), c='g', alpha=0.25)
fname = bullet + ".pdf"
plt.savefig(fname)
plt.show()
def main():
x = []
y = []
print("Let analyze soccer player informaiton in FIFA-2019")
print("We are trying to find all cases could effect a player to improve himself")
print("In this way, we will see some charts about potential with age, overall rate, sprintspeed, drippling, pass, and shoot")
print("Please input file name:(/ --> data.csv) ")
data = read_csv("data.csv")
#age vs potential
print("Analyze with potential:(/ --> Age) ")
x,y = find_data(data, "Age")
scattering_visualization(x, y, "Age")
#overall vs potential
print("Analyze with potential:(/ --> Overall) ")
x,y = find_data(data, "Overall")
scattering_visualization(x, y, "Overall")
#sprintspeed vs potential
print("Analyze with potential:(/ --> SprintSpeed) ")
x,y = find_data(data, "SprintSpeed")
scattering_visualization(x, y, "SprintSpeed")
#drippling vs potential
print("Analyze with potential:(/ --> Dribbling) ")
x,y = find_data(data, "Dribbling")
scattering_visualization(x, y, "Dribbling")
#longpass vs potential
print("Analyze with potential:(/ --> LongPassing) ")
x,y = find_data(data, "LongPassing")
scattering_visualization(x, y, "LongPassing")
#shortpass vs potential
print("Analyze with potential:(/ --> ShortPassing) ")
x,y = find_data(data, "ShortPassing")
scattering_visualization(x, y, "ShortPassing")
#longshots vs potential
print("Analyze with potential:(/ --> LongShots) ")
x,y = find_data(data, "LongShots")
scattering_visualization(x, y, "LongShots")
print("All graphs are saved as pdf. Thank you~")
main()
|
from insurance import Client
import os
def main():
baseURL = "https://sandbox.root.co.za/v1/insurance"
appID = os.environ.get('ROOT_APP_ID')
appSecret = os.environ.get('ROOT_APP_SECRET')
client = Client(baseURL, appID, appSecret)
print(client.gadgets.get_phone_value("iPhone 6 Plus 128GB LTE"))
if __name__ == "__main__":
main()
|
import sys
sys.stdin = open("test.txt", "r")
from collections import deque
n, k = map(int, input().split())
a = deque(map(int,input().split()))
step = 0
robots = deque([])
#0 카운트
while a.count(0) < k:
step += 1
#회전
for i in range(len(robots)):
robots[i] += 1
#a.rotate(1)
temp = a.pop()
a.appendleft(temp)
#N에 도달했으면 로봇 내리기
if robots:
if robots[0] >= n-1:
robots.popleft()
#로봇 이동
for i in range(len(robots)):
if i != 0 and robots[i-1] == robots[i] + 1:
continue
if a[robots[i]+1] != 0:
a[robots[i]+1] -= 1
robots[i] += 1
#N에 도달했으면 로봇 내리기
if robots:
if robots[0] >= n-1:
robots.popleft()
#로봇 올리기
if a[0] != 0:
robots.append(0)
a[0] -= 1
print(step) |
import math
class LZ77(object):
def __init__(self, min_sequence, sequence_length_bits, window_size_bits):
self.min_sequence = min_sequence
self.sequence_length_bits = sequence_length_bits
self.max_sequence = pow(2, sequence_length_bits) + self.min_sequence - 1
self.window_size_bits = int(window_size_bits)
self.window_size = pow(2, window_size_bits) - 1
self.length_format = '{:0%sb}' % sequence_length_bits
self.offset_format = '{:0%sb}' % window_size_bits
def compress(self, data, debug=None):
compressed_data = ''
compressed_control = ''
compressed_offset_high = ''
compressed_offset_low = ''
compressed_length = ''
window = ''
i = 0
while i < len(data):
seq_len = 1
while i + seq_len <= len(data) and seq_len <= self.max_sequence and data[i:i + seq_len] in window:
seq_len += 1
seq_len -= 1
if seq_len >= self.min_sequence and data[i:i + seq_len] in window:
offset = len(window) - window.rfind(data[i:i + seq_len])
compressed_control += '1'
compressed_offset_high += self.offset_format.format(offset)[0:8]
compressed_offset_low += self.offset_format.format(offset)[8:]
compressed_length += self.length_format.format(seq_len)
window += data[i:i + seq_len]
i += seq_len
else:
compressed_control += '0'
compressed_data += data[i]
window += data[i]
i += 1
window = window[-self.window_size:]
compressed_control = self.decode_binary_string(compressed_control)
compressed_length = self.decode_binary_string(compressed_length)
compressed_offset_high = self.decode_binary_string(compressed_offset_high)
compressed_offset_low = self.decode_binary_string(compressed_offset_low)
return compressed_data, compressed_control, compressed_offset_high, compressed_offset_low, compressed_length
# convert binary stream to sring
def decode_binary_string(self, s):
str = ''.join(chr(int(s[i * 8:i * 8 + 8], 2)) for i in range(len(s) / 8))
return str if (len(s) % 8 == 0) else str + (chr(int(s[-(len(s) % 8):], 2))) |
from googleapiclient.discovery import build
from google.auth.transport.requests import Request
import Sheets.credBuilder as cb
import psycopg2
sheetId = 'YOUR-SHEET-ID'
dataRange = 'Sheet1!a:z'
creds = cb.creds()
conn = psycopg2.connect(host='localhost',database='postgres',user='postgres',password='password')
cur = conn.cursor()
values = []
def colList(n):
return n.name
def strList(n):
if not n:
return ''
else:
return str(n)
def getData():
global values
cur.execute(''' SELECT *
FROM commute_datasource
WHERE extract(dow from departure_ts) between 1 and 5
''')
queryData = cur.fetchall()
values = []
values.append(list(map(colList,cur.description)))
for i in queryData:
values.append(list(map(strList,i)))
def update():
global values
service = build('sheets','v4',credentials=creds)
sheet = service.spreadsheets()
body = {'values':values}
if len(values) > 1:
clear = sheet.values().clear(spreadsheetId=sheetId,range=dataRange).execute()
result = sheet.values().update(spreadsheetId=sheetId,range=dataRange,body=body,valueInputOption='RAW').execute()
def run():
getData()
update() |
# 2018年8月14日 14:36:18
# 作为客户端与HTTP服务交互
# 通过HTTP协议访问多种服务,如下载数据或者与基于REST的API进行交互
# 对于简单的事情,使用urllib.request模块就够了,比如发送一个简单的HTTP GET请求到远程的服务上
'''
from urllib import request,parse
# base URL being accessed
url = 'http://httpbin.org/get'
# Dictionary of query parameters(if any)
parms={
'name1':'value1',
'name2':'value2'
}
# Encode the query string
querystring = parse.urlencode(parms)
# Make a POST request and read the response
u=request.urlopen(url+'?'+querystring)
resp=u.read()
'''
# 如果需要使用POST方法在请求主体中发送查询参数,可以将参数编码后作为可选参数提供给urlopen()函数
'''
from urllib import request,parse
url='http://httpbin.org/post'
parms={
'name1'='value1',
'name2'='value2'
}
querystring=parse.urlencode(parms)
u=request.urlopen(url,querystring.encode('ascii'))
resp=u.read()
'''
# 如果需要在发出的请求中提供一些自定义的HTTP头,例如修改user-agent字段,可以创建一个包含字段值的字典,并创建一个request实例然后将其传给urlopen().
'''
from urllib import request,parse
headers = {
'User-agent':'none/ofyourbusiness',
'Spam':'Eggs'
}
req = request.Request(url,querysting.encode('ascii'),headers=headers)
u = request.urlopen(req)
resp = u.read()
'''
# 如果需要交互的服务比上面的例子都要复杂,也许应该去看看 requests 库(https://pypi.python.org/pypi/requests)。
# |
# def isim(adi="",soyadi=""):
# print("Merhaba",adi,soyadi)
# isim(input("Adı:"),input("Soyadı:"))
def tip(*args):
sayim=0
for item in args:
if str(type(item)) == "<class 'str'>":
sayim+=1
print("Bu parametre de {} kadar str deger vardır".format(sayim))
tip(1,2,3,"deneme",["deneme",2.3],2.3)
def yazdir() |
from django import template
from django.shortcuts import reverse
from django.utils.html import format_html
from content.models import Participant, Entry, ParticipantAnswer
register = template.Library()
@register.filter(is_safe=True)
def get_challenge_participants(challenge):
participants = Participant.objects.filter(user__is_staff=False, challenge=challenge)
output = '<h1>' + challenge.name + '</h1>'
for participant in participants:
output += '<table style="background-repeat:no-repeat; width:100%;margin:0;" border="1">'
entry = Entry.objects.filter(participant=participant).first()
participant_answers = ParticipantAnswer.objects.filter(entry=entry)
output += '<tr>'
output += '<th>Participant Name</th>'
output += '<th>Mobile</th>'
output += '<th>Challenge</th>'
output += '<th>Created On</th>'
output += '<th>Completed On</th>'
output += '<th>Read</th>'
output += '<th>Shortlisted</th>'
output += '<th>Winner</th>'
output += '</tr>'
output += '<tr>'
output += '<td>' + str(participant.user) + '</td>'
output += '<td>' + str(participant.get_participant_mobile()) + '</td>'
output += '<td>' + str(challenge.name) + '</td>'
output += '<td>' + str(challenge.activation_date) + '</td>'
output += '<td>' + str(participant.date_completed) + '</td>'
output += '<td>'
if participant.is_read:
output += format_html("<input type='checkbox' id='{}' class='mark-is-read' value='{}' checked='checked' />",
'participant-is-read-%d' % participant.id, participant.id)
else:
output += format_html("<input type='checkbox' id='{}' class='mark-is-read' value='{}' />",
'participant-is-read-%d' % participant.id, participant.id)
output += '</td>'
output += '<td>'
if participant.is_shortlisted:
output += format_html("<input type='checkbox' id='{}' class='mark-is-shortlisted' value='{}' checked='checked' />",
'participant-is-shortlisted-%d' % participant.id, participant.id)
else:
output += format_html("<input type='checkbox' id='{}' class='mark-is-shortlisted' value='{}' />",
'participant-is-shortlisted-%d' % participant.id, participant.id)
output += '</td>'
output += '<td>'
if participant.is_winner:
output += format_html("<input type='checkbox' id='{}' class='mark-is-winner' value='{}' checked='checked' />",
'participant-is-winner-%d' % participant.id, participant.id)
else:
output += format_html("<input type='checkbox' id='{}' class='mark-is-winner' value='{}' />",
'participant-is-winner-%d' % participant.id, participant.id)
output += '</td>'
output += '</tr>'
output += '<tr>'
output += '<th>Question</th>'
output += '<th>Selected Option</th>'
output += '</tr>'
for participant_answer in participant_answers:
output += '<tr>'
output += '<td>'
output += str(participant_answer.question)
output += '</td>'
output += '<td>'
output += str(participant_answer.selected_option)
output += '</td>'
output += '</tr>'
output += '</table><br/>'
return output
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def bstFromPreorder(self, preorder):
"""
:type preorder: List[int]
:rtype: TreeNode
"""
root = TreeNode(preorder[0])
l = []
r = []
for i in preorder[1:]:
if i > root.val:
r = preorder[preorder.index(i):]
break
l.append(i)
print(r)
print(l)
root.left = self.bstFromPreorder_Util(l)
root.right = self.bstFromPreorder_Util(r)
return root
def bstFromPreorder_Util(self, preorder):
if preorder:
node = TreeNode(preorder[0])
l = []
r = []
for i in preorder[1:]:
if i > node.val:
r = preorder[preorder.index(i):]
break
l.append(i)
node.left = self.bstFromPreorder_Util(l)
node.right = self.bstFromPreorder_Util(r)
return node
else:
return None
s = Solution()
s.bstFromPreorder([8,5,1,7,10,12])
|
#!/usr/bin/python3.4
# coding: utf-8
"""
Programme (classe) : CCompteEpargne.py version 1.3
Date : 11-03-2018
Auteur : Hervé Dugast
------- affichage console -----------------------------------------------------------------
*** Création de comptes épargnes
... type compte : EPAR
Saisir le solde minimal (supérieur ou égal à 0) ? 10
solde du compte ? 0
Erreur ! Le solde (0.00 €) doit être supérieur ou égal à 10.00 € !
Saisir un solde valide ? 150
* Récapitulatif *
- Compte EPAR n° 200-1
solde compte = 150.00, solde minimal = 10.00, intérêts = 0.00
... type compte : EPAR
Saisir le solde minimal (supérieur ou égal à 0) ? 10
solde compte = 150.63
* Récapitulatif *
- Compte EPAR n° 200-2
solde compte = 150.63, solde minimal = 10.00, intérêts = 0.00
... type compte : EPAR
Solde minimal = 10.00
solde compte = 0.00
Erreur ! Le solde (0.00 €) doit être supérieur ou égal à 10.00 € !
Saisir un solde valide ? 111.27
* Récapitulatif *
- Compte EPAR n° 200-3
solde compte = 111.27, solde minimal = 10.00, intérêts = 0.00
... type compte : EPAR
Solde minimal = 10.00
solde compte = 1251.24
* Récapitulatif *
- Compte EPAR n° 200-4
solde compte = 1251.24, solde minimal = 10.00, intérêts = 0.00
***Affichage compte
Compte n° 200-1 : solde compte = 150.0, solde minimal = 10.0, intérêts = 0
compte1.__dict__ = {'_numero': '200-1', '_CCompteBanque__solde': 150.0,
'_CCompteEpargne__soldeMinimal': 10.0, '_CCompteEpargne__interets': 0,
'_CCompteEpargne__type': 'EPAR'}
- Compte EPAR n° 200-1
solde compte = 150.00, solde minimal = 10.00, intérêts = 0.00
- Compte EPAR n° 200-2
solde compte = 150.63, solde minimal = 10.00, intérêts = 0.00
- Compte EPAR n° 200-3
solde compte = 111.27, solde minimal = 10.00, intérêts = 0.00
- Compte EPAR n° 200-4
solde compte = 1251.24, solde minimal = 10.00, intérêts = 0.00
Nombre total de comptes bancaires : 4
-------------------------------------------------------------------------------------------
"""
from CCompteBanque import CCompteBanque
class CCompteEpargne(CCompteBanque):
""" gère les opérations courantes d'un compte épargne
"""
TYPE = "EPAR"
def __init__(self, solde=-1, soldeMinimal=-1):
""" constructeur
"""
print("... type compte : {}".format(CCompteEpargne.TYPE))
self.__type = CCompteEpargne.TYPE
self.set_soldeMinimal(soldeMinimal)
self.__interets = 0
# il faut connaître le solde minimal autorisé du compte avant de donner une valeur au solde
CCompteBanque.__init__(self, solde, self.__soldeMinimal, self.__type)
print(" * Récapitulatif *")
self.afficherInformations()
def __str__(self):
""" affichage par défaut de l'objet
"""
return "Compte n° {} : solde compte = {}, solde minimal = {}, intérêts = {}" \
.format(self._numero, self.get_solde(), self.__soldeMinimal, self.__interets)
def afficherInformations(self):
""" Affiche les informations du compte bancaire
"""
print(" - Compte {} n° {}".format(self.__type, self._numero))
print(" solde compte = {:.02f}, solde minimal = {:.02f}, intérêts = {:.02f}" \
.format(self.get_solde(), self.__soldeMinimal, self.__interets))
def set_soldeMinimal(self, soldeMinimal):
""" Renseigne le solde minimal autorisé pour ce compte à partir du clavier ou de la valeur
passée par paramètre.
Le solde minimal d'un compte épargne ne peut pas être négatif
"""
if soldeMinimal >= 0:
# affiche solde minimal si valeur passée par paramètre est valide
print("Solde minimal = {:.02f}".format(soldeMinimal))
else:
# solde minimal non valide ou pas encore saisi
while soldeMinimal < 0:
if soldeMinimal != -1:
print("Erreur ! Le solde minimal ne peut pas être négatif ({:.02f} €) ! " \
.format(soldeMinimal))
soldeMinimal = float(input("Saisir le solde minimal (supérieur ou égal à 0) ? "))
self.__soldeMinimal = soldeMinimal
def get_type(self):
return self.__type
if __name__ == "__main__":
print("*** Création de comptes épargnes")
# crée un compte épargne avec demande saisie solde minimal et solde compte
compte1 = CCompteEpargne() ; print("")
# crée un compte épargne avec demande saisie solde minimal
compte2 = CCompteEpargne(150.63) ; print("")
# crée un compte épargne sans demande de saisie
compte3 = CCompteEpargne(0, 10) ; print("")
# crée un compte épargne sans demande de saisie mais avec une incohérence solde < solde minimal
# demande une valeur de solde valide
compte4 = CCompteEpargne(1251.24, 10)
print("\n***Affichage compte")
print(compte1)
print("compte1.__dict__ = {}".format(compte1.__dict__)) ; print("")
compte1.afficherInformations()
compte2.afficherInformations()
compte3.afficherInformations()
compte4.afficherInformations() ; print("")
CCompteBanque.afficherNombreComptesExistant()
|
#!/usr/bin/env python
from cosymlib import Cosymlib, __version__
from cosymlib import file_io
from cosymlib.file_io.tools import print_header, print_footer, print_input_info
from cosymlib.shape import tools
import argparse
import os
import sys
import yaml
def write_reference_structures(vertices, central_atom, directory):
if central_atom == 0:
output_references = open(directory + '/L{}_refs.xyz'.format(vertices), 'x')
else:
output_references = open(directory + '/ML{}_refs.xyz'.format(vertices), 'x')
print("\nReference structures in file {}\n".format(output_references.name))
for label in tools.get_structure_references(vertices):
ref_structure = tools.get_reference_structure(label, central_atom=central_atom)
output_references.write(file_io.get_file_xyz_txt(ref_structure))
# positional arguments
parser = argparse.ArgumentParser(description='shape', allow_abbrev=False)
parser.add_argument(type=str,
dest='input_file',
nargs='?',
default=None,
help='input file with structures')
parser.add_argument(type=str,
dest="yaml_input",
nargs='?',
default=None,
help='Input file with keywords')
# Main options
parser.add_argument('-m', '--measure',
dest='measure',
metavar='SH',
default=None,
help='compute the SH measure of the input structures (use "custom" to use custom structure')
parser.add_argument('-s', '--structure',
dest='structure',
action='store_true',
default=False,
help='return the nearest structure to the reference shape')
parser.add_argument('-o', '--output_name',
dest='output_name',
metavar='filename',
default=None,
help='store the output into a file')
parser.add_argument('-c', '--central_atom',
dest='central_atom',
metavar='N',
type=int,
default=0,
help='define central atom as the atom in position N in the input structure')
parser.add_argument('-r', '--references',
dest='references',
action='store_true',
default=False,
help='store the coordinates of the reference polyhedra in a file')
parser.add_argument('-m_custom', '--measure_custom',
dest='measure_custom',
metavar='filename',
default=None,
help='define filename containing the structure/s to be used as reference')
# Extra options
parser.add_argument('-l', '--labels', action='store_true',
dest='labels',
default=False,
help='show the reference shape labels')
parser.add_argument('--info',
action='store_true',
default=False,
help='print information about the input structures')
parser.add_argument('-v', '--version',
dest='version',
action='store_true',
default=False,
help='print information about the input structures')
parser.add_argument('--labels_n',
dest='labels_n',
default=False,
help='show the reference shape labels of n vertices')
parser.add_argument('--references_n',
dest='references_n',
default=False,
help='store the coordinates of the reference polyhedra of n vertices in a file')
# Modifiers
parser.add_argument('--fix_permutation',
dest='fix_permutation',
action='store_true',
default=False,
help='do not permute atoms')
args = parser.parse_args()
if args.yaml_input:
with open(args.yaml_input, 'r') as stream:
input_parameters = yaml.safe_load(stream)
for key, value in input_parameters.items():
if key.lower() in args:
setattr(args, key.lower(), value)
else:
raise KeyError("Key %s is not valid" % key)
if args.version:
print('Cosymlib version = {}'.format(__version__))
exit()
common_output = open(args.output_name, 'w') if args.output_name is not None else sys.stdout
print_header(common_output)
if args.labels_n:
common_output.write(tools.get_shape_label_info(int(args.labels_n), with_central_atom=args.central_atom))
exit()
if args.references_n:
input_dir = os.getcwd()
write_reference_structures(int(args.references_n), args.central_atom, input_dir)
exit()
if args.input_file is None:
parser.error('No input file selected! An existing file must be provide')
structures = file_io.read_generic_structure_file(args.input_file, read_multiple=True)
structure_set = Cosymlib(structures)
n_atoms = structure_set.get_n_atoms()
vertices = n_atoms if args.central_atom == 0 else n_atoms - 1
if args.references:
input_dir = os.path.dirname(args.input_file)
write_reference_structures(vertices, args.central_atom, input_dir)
if args.info:
print_input_info(structure_set.get_geometries(), output=common_output)
exit()
# Shape's commands
if args.labels:
common_output.write(tools.get_shape_label_info(n_atoms, with_central_atom=args.central_atom))
exit()
if args.measure_custom:
reference = file_io.read_generic_structure_file(args.measure_custom, read_multiple=True)
args.measure = 'custom'
elif args.measure == 'all':
reference = tools.get_structure_references(vertices)
else:
reference = [args.measure]
if args.structure:
if common_output is sys.stdout:
file_name, file_extension = os.path.splitext(args.input_file)
output_str = open(file_name + '_near.xyz', 'w')
else:
output_str = common_output
structure_set.print_shape_structure(reference,
central_atom=args.central_atom,
fix_permutation=args.fix_permutation,
output=output_str)
if args.measure:
structure_set.print_shape_measure(reference,
central_atom=args.central_atom,
fix_permutation=args.fix_permutation,
output=common_output)
print_footer(common_output)
|
# Slow. Horrible. Ugly. Don't try this at home.
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
@staticmethod
def computeHeight(root, curHeight=0):
if root == None:
return curHeight
return max(
Solution.computeHeight(root.left, 1 + curHeight),
Solution.computeHeight(root.right, 1 + curHeight)
)
"""
@param root: The root of binary tree.
@return: True if this Binary tree is Balanced, or false.
"""
def isBalanced(self, root):
if root == None:
return True
if abs( Solution.computeHeight(root.left)
- Solution.computeHeight(root.right)
) < 2:
return self.isBalanced(root.left) and self.isBalanced(root.right)
return False
|
-X FMLP -Q 0 -L 1 52 175
-X FMLP -Q 0 -L 1 49 150
-X FMLP -Q 0 -L 1 43 250
-X FMLP -Q 0 -L 1 41 400
-X FMLP -Q 0 -L 1 41 125
-X FMLP -Q 1 -L 1 39 125
-X FMLP -Q 1 -L 1 36 125
-X FMLP -Q 1 -L 1 35 250
-X FMLP -Q 1 -L 1 30 125
-X FMLP -Q 1 -L 1 30 300
-X FMLP -Q 2 -L 1 27 200
-X FMLP -Q 2 -L 1 26 175
-X FMLP -Q 2 -L 1 23 100
-X FMLP -Q 2 -L 1 23 250
-X FMLP -Q 2 -L 1 21 175
-X FMLP -Q 3 -L 1 19 150
-X FMLP -Q 3 -L 1 17 125
-X FMLP -Q 3 -L 1 16 125
-X FMLP -Q 3 -L 1 13 175
|
def sol_print(value):
sol_print.line_number += 1;
print "Case #%d: %s"%(sol_print.line_number, value)
sol_print.line_number = 0
T = int(raw_input())
inputs = []
for i in range(T):
inputs.append(raw_input())
for stack in inputs:
idx = 0
operation = 0
while '-' in stack:
countminus = 0
if stack[idx] == '+':
while idx < len(stack) and stack[idx] == '+':
idx += 1
while idx < len(stack) and stack[idx] == '-':
idx += 1
countminus += 1
stack = stack.replace('-', '+', countminus)
operation += 2
else:
while idx < len(stack) and stack[idx] == '-':
idx += 1
countminus += 1
while idx < len(stack) and stack[idx] == '+':
idx += 1
stack = stack.replace('-', '+', countminus)
operation += 1
idx = 0
sol_print(operation)
|
stringVariable = "Hello"
integerVariable = 123
decimalVariable = 1.23
print(stringVariable)
print(integerVariable)
print(decimalVariable)
|
import pandas
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
# loading data
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
filename = '../data/pima-indians-diabetes.data'
names =['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pandas.read_csv(filename, names=names)
# loading to arrays
data = dataframe.values
print(data.shape)
X = data[:, 0:8]
Y = data[:, 8]
# hyper parameter: num_instances
num_instances = len(X)
seed = 7
# executing k fold cross validation on data
kfold = model_selection.KFold(n_splits=10, random_state=seed)
# loading model
model = LogisticRegression()
# checking score
results = model_selection.cross_val_score(model, X, Y, cv=kfold)
# displaying
print("Accuracy: {:.3f}% ({:.3f}%)".format(results.mean()*100, results.std()*100)) |
#!/usr/bin/python
# Copyright: (c) 2019-2021, DellEMC
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: dellemc_powermax_gatherfacts
version_added: '1.0.0'
short_description: Gathers information about PowerMax/VMAX Storage entities
description:
- Gathers the list of specified PowerMax/VMAX storage system entities, such as
the list of registered arrays, storage groups, hosts, host groups, storage
groups, storage resource pools, port groups, masking views, array health
status, alerts and metro DR environments, so on.
extends_documentation_fragment:
- dellemc.powermax.dellemc_powermax.powermax
- dellemc.powermax.dellemc_powermax.powermax_serial_no
author:
- Arindam Datta (@dattaarindam) <ansible.team@dell.com>
- Rajshree Khare (@khareRajshree) <ansible.team@dell.com>
options:
serial_no:
description:
- The serial number of the PowerMax/VMAX array. It is not required for
getting the list of arrays.
type: str
required: False
tdev_volumes:
description:
- Boolean variable to filter the volume list.
This will have a small performance impact.
By default it is set to true, only TDEV volumes will be returned.
- True - Will return only the TDEV volumes.
- False - Will return all the volumes.
required: False
type: bool
choices: [True, False]
default: True
gather_subset:
description:
- List of string variables to specify the PowerMax/VMAX entities for which
information is required.
- Required only if the serial_no is present
- List of all PowerMax/VMAX entities supported by the module
- alert - gets alert summary information
- health - health status of a specific PowerMax array
- vol - volumes
- srp - storage resource pools
- sg - storage groups
- pg - port groups
- host - hosts
- hg - host groups
- port - ports
- mv - masking views
- rdf - rdf groups
- metro_dr_env - metro DR environments
- snapshot_policies - snapshot policies
required: False
type: list
elements: str
choices: [alert, health, vol, srp, sg, pg , host, hg, port, mv, rdf,
metro_dr_env, snapshot_policies]
filters:
description:
- List of filters to support filtered output for storage entities.
- Each filter is a tuple of {filter_key, filter_operator, filter_value}.
- Supports passing of multiple filters.
- The storage entities, 'rdf', 'health', 'snapshot_policies' and
'metro_dr_env', does not support filters. Filters will be ignored
if passed.
required: False
type: list
elements: dict
suboptions:
filter_key:
description:
- Name identifier of the filter.
type: str
required: True
filter_operator:
description:
- Operation to be performed on filter key.
type: str
choices: [equal, greater, lesser, like]
required: True
filter_value:
description:
- Value of the filter key.
type: str
required: True
notes:
- Filter functionality will be supported only for the following
'filter_key' against specific 'gather_subset'.
- vol - allocated_percent, associated, available_thin_volumes, bound_tdev,
cap_cyl, cap_gb, cap_mb, cap_tb, cu_image_num, cu_image_ssid,
data_volume, dld, drv, effective_wwn, emulation, encapsulated,
encapsulated_wwn, gatekeeper, has_effective_wwn, mapped,
mobility_id_enabled, num_of_front_end_paths, num_of_masking_views,
num_of_storage_groups, oracle_instance_name, physical_name, pinned,
private_volumes, rdf_group_number, reserved, split_name, status,
storageGroupId, symmlun, tdev, thin_bcv, type, vdev, virtual_volumes,
volume_identifier, wwn
- srp - compression_state, description, effective_used_capacity_percent,
emulation, num_of_disk_groups, num_of_srp_sg_demands,
num_of_srp_slo_demands, rdfa_dse, reserved_cap_percent,
total_allocated_cap_gb, total_srdf_dse_allocated_cap_gb,
total_subscribed_cap_gb, total_usable_cap_gb
- sg - base_slo_name, cap_gb, child, child_sg_name, ckd, compression,
compression_ratio_to_one, fba, num_of_child_sgs, num_of_masking_views,
num_of_parent_sgs, num_of_snapshots, num_of_vols, parent,
parent_sg_name, slo_compliance, slo_name, srp_name, storageGroupId,
tag, volumeId
- pg - dir_port, fibre, iscsi, num_of_masking_views, num_of_ports
- host - host_group_name, num_of_host_groups, num_of_initiators,
num_of_masking_views, num_of_powerpath_hosts, powerPathHostId
- hg - host_name, num_of_hosts, num_of_masking_views
- port - aclx, avoid_reset_broadcast, common_serial_number, director_status,
disable_q_reset_on_ua, enable_auto_negotive, environ_set, hp_3000_mode,
identifier, init_point_to_point, ip_list, ipv4_address, ipv6_address,
iscsi_target, max_speed, negotiated_speed, neqotiate_reset,
no_participating, node_wwn, num_of_cores, num_of_hypers,
num_of_mapped_vols, num_of_masking_views, num_of_port_groups,
port_interface, port_status, rdf_hardware_compression,
rdf_hardware_compression_supported, rdf_software_compression,
rdf_software_compression_supported, scsi_3, scsi_support1, siemens,
soft_reset, spc2_protocol_version, sunapee, type, unique_wwn, vcm_state,
vnx_attached, volume_set_addressing, wwn_node
- mv - host_or_host_group_name, port_group_name,
protocol_endpoint_masking_view, storage_group_name
- alert - acknowledged, array, created_date, created_date_milliseconds,
description, object, object_type, severity, state, type
'''
EXAMPLES = r'''
- name: Get list of volumes with filter -- all TDEV volumes of size equal
to 5GB
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- vol
filters:
- filter_key: "tdev"
filter_operator: "equal"
filter_value: "True"
- filter_key: "cap_gb"
filter_operator: "equal"
filter_value: "5"
- name: Get list of volumes and storage groups with filter
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- vol
- sg
filters:
- filter_key: "tdev"
filter_operator: "equal"
filter_value: "True"
- filter_key: "cap_gb"
filter_operator: "equal"
filter_value: "5"
- name: Get list of storage groups with capacity between 2GB to 10GB
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- sg
filters:
- filter_key: "cap_gb"
filter_operator: "greater"
filter_value: "2"
- filter_key: "cap_gb"
filter_operator: "lesser"
filter_value: "10"
- name: Get the list of arrays for a given Unisphere host
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
register: array_list
- debug:
var: array_list
- name: Get list of tdev-volumes
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
tdev_volumes: True
gather_subset:
- vol
- name: Get the list of arrays for a given Unisphere host
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
- name: Get array health status
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- health
- name: Get array alerts summary
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- alert
- name: Get the list of metro DR environments for a given Unisphere host
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- metro_dr_env
- name: Get list of Storage groups
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- sg
- name: Get list of Storage Resource Pools
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- srp
- name: Get list of Ports
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- port
- name: Get list of Port Groups
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- pg
- name: Get list of Hosts
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- host
- name: Get list of Host Groups
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- hg
- name: Get list of Masking Views
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- mv
- name: Get list of RDF Groups
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- rdf
- name: Get list of snapshot policies
dellemc_powermax_gatherfacts:
unispherehost: "{{unispherehost}}"
universion: "{{universion}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
serial_no: "{{serial_no}}"
gather_subset:
- snapshot_policies
'''
RETURN = r'''
Arrays:
description: List of arrays in the Unisphere.
returned: When the Unisphere exist.
type: list
Health:
description: Health status of the array.
returned: When the array exist.
type: complex
contains:
health_score_metric:
description: Overall health score for the specified Symmetrix.
type: list
contains:
cached_date:
description: Date Time stamp in epoch format when it was
cached.
type: int
data_date:
description: Date Time stamp in epoch format when it was
collected.
type: int
expired:
description: Flag to indicate the expiry of the score.
type: bool
health_score:
description: Overall health score in numbers.
type: int
instance_metrics:
description: Metrics about a specific instance.
type: list
contains:
health_score_instance_metric:
description: Health score of a specific instance.
type: int
metric:
description: Information about which sub system , such as
SYSTEM_UTILIZATION, CONFIGURATION,CAPACITY,
and so on.
type: str
num_failed_disks:
description: Numbers of the disk failure in this system.
type: int
Alerts:
description: Alert summary of the array.
returned: When the alert exists.
type: list
contains:
acknowledged:
description: Whether or not this alert is acknowledged.
type: str
alertId:
description: Unique ID of alert.
type: str
array:
description: Array serial no.
type: str
created_date:
description: Creation Date.
type: str
created_date_milliseconds:
description: Creation Date in milliseconds.
type: str
description:
description: Description about the alert
type: str
object:
description: Object description
type: str
object_type:
description: Resource class
type: str
severity:
description: Severity of the alert
type: str
state:
description: State of the alert
type: str
type:
description: Type of the alert
type: str
HostGroups:
description: List of host groups present on the array.
returned: When the hostgroups exist.
type: list
Hosts:
description: List of hosts present on the array.
returned: When the hosts exist.
type: list
MaskingViews:
description: List of masking views present on the array.
returned: When the masking views exist.
type: list
PortGroups:
description: List of port groups on the array.
returned: When the port groups exist.
type: list
Ports:
description: List of ports on the array.
returned: When the ports exist.
type: complex
contains:
directorId:
description: Director ID of the port.
type: str
portId:
description: Port number of the port.
type: str
RDFGroups:
description: List of RDF groups on the array.
returned: When the RDF groups exist.
type: complex
contains:
label:
description: Name of the RDF group.
type: str
rdfgNumber:
description: Unique identifier of the RDF group.
type: int
StorageGroups:
description: List of storage groups on the array.
returned: When the storage groups exist.
type: list
StorageResourcePools:
description: List of storage pools on the array.
returned: When the storage pools exist.
type: complex
contains:
diskGroupId:
description: ID of the disk group.
type: list
emulation:
description: Type of volume emulation.
type: str
num_of_disk_groups:
description: Number of disk groups.
type: int
rdfa_dse:
description: Flag for RDFA Delta Set Extension.
type: bool
reserved_cap_percent:
description: Reserved capacity percentage.
type: int
srpId:
description: Unique Identifier for SRP.
type: str
srp_capacity:
description: Different entities to measure SRP capacity.
type: dict
contains:
effective_used_capacity_percent:
description: Percentage of effectively used capacity.
type: int
snapshot_modified_tb:
description: Snapshot modified in TB.
type: int
snapshot_total_tb:
description: Total snapshot size in TB.
type: int
subscribed_allocated_tb:
description: Subscribed allocated size in TB.
type: int
subscribed_total_tb:
description: Subscribed total size in TB.
type: int
usable_total_tb:
description: Usable total size in TB.
type: int
usable_used_tb:
description: Usable used size in TB.
type: int
srp_efficiency:
description: Different entities to measure SRP efficiency.
type: dict
contains:
compression_state:
description: Depicts the compression state of the SRP.
type: str
data_reduction_enabled_percent:
description: Percentage of data reduction enabled in the
SRP.
type: int
data_reduction_ratio_to_one:
description: Data reduction ratio of SRP.
type: int
overall_efficiency_ratio_to_one:
description: Overall effectively ratio of SRP.
type: int
snapshot_savings_ratio_to_one:
description: Snapshot savings ratio of SRP.
type: int
virtual_provisioning_savings_ratio_to_one:
description: Virtual provisioning savings ratio of SRP.
type: int
total_srdf_dse_allocated_cap_gb:
description: Total srdf dse allocated capacity in GB.
type: int
Volumes:
description: List of volumes on the array.
returned: When the volumes exist.
type: list
MetroDREnvironments:
description: List of metro DR environments on the array.
returned: When environment exists.
type: list
SnapshotPolicies:
description: List of snapshot policies on the array.
returned: When snapshot policy exists.
type: list
'''
from ansible_collections.dellemc.powermax.plugins.module_utils.storage.dell \
import dellemc_ansible_powermax_utils as utils
from ansible.module_utils.basic import AnsibleModule
LOG = utils.get_logger('dellemc_powermax_gatherfacts')
HAS_PYU4V = utils.has_pyu4v_sdk()
PYU4V_VERSION_CHECK = utils.pyu4v_version_check()
# Application Type
APPLICATION_TYPE = 'ansible_v1.5.0'
class PowerMaxGatherFacts(object):
"""Class with Gather Fact operations"""
u4v_conn = None
def __init__(self):
"""Define all the parameters required by this module"""
self.module_params = get_powermax_gatherfacts_parameters()
self.module = AnsibleModule(
argument_spec=self.module_params,
supports_check_mode=False)
serial_no = self.module.params['serial_no']
if HAS_PYU4V is False:
self.show_error_exit(msg="Ansible modules for PowerMax require "
"the PyU4V python library to be "
"installed. Please install the library "
"before using these modules.")
if PYU4V_VERSION_CHECK is not None:
self.show_error_exit(msg=PYU4V_VERSION_CHECK)
if self.module.params['universion'] is not None:
universion_details = utils.universion_check(
self.module.params['universion'])
LOG.info("universion_details: %s", universion_details)
if not universion_details['is_valid_universion']:
self.show_error_exit(msg=universion_details['user_message'])
try:
if serial_no == '':
self.u4v_unisphere_con = utils.get_u4v_unisphere_connection(
self.module.params, APPLICATION_TYPE)
self.common = self.u4v_unisphere_con.common
LOG.info("Got PyU4V Unisphere instance for common lib method "
"access on Powermax")
else:
self.module_params.update(
utils.get_powermax_management_host_parameters())
self.u4v_conn = utils.get_U4V_connection(
self.module.params, application_type=APPLICATION_TYPE)
self.provisioning = self.u4v_conn.provisioning
self.u4v_conn.set_array_id(serial_no)
LOG.info('Got PyU4V instance for provisioning on to PowerMax')
self.replication = self.u4v_conn.replication
LOG.info('Got PyU4V instance for replication on to PowerMax')
except Exception as e:
self.show_error_exit(msg=str(e))
def pre_check_for_PyU4V_version(self):
""" Performs pre-check for PyU4V version"""
curr_version = utils.PyU4V.__version__
supp_version = "9.2"
is_supported_version = utils.pkg_resources.parse_version(
curr_version) >= utils.pkg_resources.parse_version(supp_version)
if not is_supported_version:
msg = "Listing of 'MetroDR Environments' and 'Alerts' are " \
"not supported currently by PyU4V version " \
"{0}".format(curr_version)
self.show_error_exit(msg)
def get_system_health(self):
"""Get the System Health information PowerMax/VMAX storage system"""
try:
LOG.info('Getting System Health information ')
health_check = self.u4v_conn.system.get_system_health()
LOG.info('Successfully listed System Health information ')
return health_check
except Exception as e:
self.show_error_exit(msg=str(e))
def get_system_alerts(self, filters_dict=None):
"""Get the alerts information PowerMax/VMAX storage system"""
try:
self.pre_check_for_PyU4V_version()
alerts = []
supported_filters = ['type', 'severity', 'state',
'created_date', 'object',
'object_type', 'acknowledged',
'description']
LOG.info('Getting System alerts summary')
filter_to_apply = {}
if filters_dict:
for key, value in filters_dict.items():
if key in supported_filters:
if key == "object":
filter_to_apply.update({"_object": value})
elif key == "type":
filter_to_apply.update({"_type": value})
else:
filter_to_apply.update({key: value})
alerts_ids = self.u4v_conn.system.get_alert_ids(**filter_to_apply)
for alert_id in alerts_ids:
alerts.append(
self.u4v_conn.system.get_alert_details(
alert_id=alert_id))
LOG.info('Successfully listed %d alerts', len(alerts))
return alerts
except Exception as e:
msg = "Failed to get the alerts with error %s" % str(e)
LOG.error(msg)
self.show_error_exit(msg=msg)
def get_filters(self, filters=None):
"""Get the filters to be applied"""
filters_dict = {}
for item in filters:
if 'filter_key' in item and 'filter_operator' in item\
and 'filter_value' in item:
if item["filter_key"] is None \
or item["filter_operator"] is None \
or item["filter_value"] is None:
error_msg = "Please provide input for filter sub-options."
self.show_error_exit(msg=error_msg)
else:
f_key = item["filter_key"]
if item["filter_operator"] == "equal":
f_operator = ""
elif item["filter_operator"] == "greater":
f_operator = ">"
elif item["filter_operator"] == "lesser":
f_operator = "<"
elif item["filter_operator"] == "like":
f_operator = "<like>"
else:
msg = "The filter operator is not supported -- only" \
" 'equal', 'greater', 'lesser' and 'like' " \
"are supported."
self.show_error_exit(msg=msg)
val = item["filter_value"]
if val == "True" or val == "False":
f_value = val
else:
f_value = f_operator + val
if f_key in filters_dict:
# multiple filters on same key
if isinstance(filters_dict[f_key], list):
# prev_val is list, so append new f_val
filters_dict[f_key].append(f_value)
else:
# prev_val is not list,
# so create list with prev_val & f_val
filters_dict[f_key] = [filters_dict[f_key],
f_value]
else:
filters_dict[f_key] = f_value
else:
msg = 'filter_key and filter_operator and filter_value is ' \
'expected, "%s" given.' % list(item.keys())
self.show_error_exit(msg=msg)
return filters_dict
def get_volume_list(self, tdev_volumes=False, filters_dict=None):
"""Get the list of volumes of a given PowerMax/Vmax storage system"""
try:
LOG.info('Getting Volume List ')
array_serial_no = self.module.params['serial_no']
if tdev_volumes:
if filters_dict:
if "tdev" not in filters_dict.keys():
filters_dict["tdev"] = True
vol_list = self.provisioning.get_volume_list(
filters=filters_dict)
else:
vol_list = self.provisioning.get_volume_list(
filters={"tdev": True})
elif filters_dict:
vol_list = self.provisioning.get_volume_list(
filters=filters_dict)
else:
vol_list = self.provisioning.get_volume_list()
LOG.info('Successfully listed %d volumes from array %s',
len(vol_list), array_serial_no)
return vol_list
except Exception as e:
msg = 'Get Volumes for array %s failed with error %s '\
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_storage_group_list(self, filters_dict=None):
"""Get the list of storage groups of a given PowerMax/Vmax storage
system"""
try:
LOG.info('Getting Storage Group List ')
array_serial_no = self.module.params['serial_no']
if filters_dict:
sg_list = self.provisioning.get_storage_group_list(
filters=filters_dict)
else:
sg_list = self.provisioning.get_storage_group_list()
LOG.info('Successfully listed %d Storage Group from array %s',
len(sg_list), array_serial_no)
return sg_list
except Exception as e:
msg = 'Get Storage Group for array %s failed with error %s' \
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_array_list(self):
"""Get the list of arrays of a given PowerMax/Vmax Unisphere host"""
try:
LOG.info('Getting Array List ')
array_list = self.common.get_array_list()
LOG.info('Got %s Arrays from Unisphere Host %s',
len(array_list), self.module_params['unispherehost'])
return array_list
except Exception as e:
msg = 'Get Array List for Unisphere host %s failed with error ' \
'%s' % (self.module_params['unispherehost'], str(e))
self.show_error_exit(msg=msg)
def get_srp_list(self, filters_dict=None):
"""Get the list of Storage Resource Pools of a given PowerMax/Vmax
storage system"""
try:
LOG.info('Getting Storage Resource Pool List')
array_serial_no = self.module.params['serial_no']
if filters_dict:
srp_list \
= self.provisioning.get_srp_list(filters=filters_dict)
else:
srp_list = self.provisioning.get_srp_list()
LOG.info('Got %d Storage Resource Pool from array %s',
len(srp_list), array_serial_no)
srp_detail_list = []
for srp in srp_list:
srp_details = self.provisioning.get_srp(srp)
srp_detail_list.append(srp_details)
LOG.info('Successfully listed %d Storage Resource Pool details '
'from array %s', len(srp_detail_list), array_serial_no)
return srp_detail_list
except Exception as e:
msg = 'Get Storage Resource Pool details for array %s failed ' \
'with error %s' % (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_portgroup_list(self, filters_dict=None):
"""Get the list of port groups of a given PowerMax/Vmax storage
system"""
try:
LOG.info('Getting Port Group List ')
array_serial_no = self.module.params['serial_no']
if filters_dict:
pg_list = self.provisioning.get_port_group_list(
filters=filters_dict)
else:
pg_list = self.provisioning.get_port_group_list()
LOG.info('Got %d Port Groups from array %s',
len(pg_list), array_serial_no)
return pg_list
except Exception as e:
msg = 'Get Port Group for array %s failed with error %s' \
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_host_list(self, filters_dict=None):
"""Get the list of hosts of a given PowerMax/Vmax storage system"""
try:
LOG.info('Getting Host List ')
array_serial_no = self.module.params['serial_no']
if filters_dict:
host_list = self.provisioning.get_host_list(
filters=filters_dict)
else:
host_list = self.provisioning.get_host_list()
LOG.info('Got %d Hosts from array %s',
len(host_list), array_serial_no)
return host_list
except Exception as e:
msg = 'Get Host for array %s failed with error %s' \
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_hostgroup_list(self, filters_dict=None):
"""Get the list of host groups of a given PowerMax/Vmax storage
system"""
try:
LOG.info('Getting Host Group List ')
array_serial_no = self.module.params['serial_no']
if filters_dict:
hostgroup_list = self.provisioning.get_host_group_list(
filters=filters_dict)
else:
hostgroup_list = self.provisioning.get_host_group_list()
LOG.info('Got %d Host Groups from array %s',
len(hostgroup_list), array_serial_no)
return hostgroup_list
except Exception as e:
msg = 'Get Host Group for array %s failed with error %s ' \
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_port_list(self, filters_dict=None):
"""Get the list of ports of a given PowerMax/Vmax storage system"""
try:
LOG.info('Getting Port List ')
array_serial_no = self.module.params['serial_no']
if filters_dict:
port_list = self.provisioning.get_port_list(
filters=filters_dict)
else:
port_list = self.provisioning.get_port_list()
LOG.info('Got %d Ports from array %s',
len(port_list), array_serial_no)
return port_list
except Exception as e:
msg = 'Get Port for array %s failed with error %s ' \
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_masking_view_list(self, filters_dict=None):
"""Get the list of masking views of a given PowerMax/Vmax storage
system"""
try:
LOG.info('Getting Masking View List')
array_serial_no = self.module.params['serial_no']
if filters_dict:
mv_list = self.provisioning.\
get_masking_view_list(filters=filters_dict)
else:
mv_list = self.provisioning.get_masking_view_list()
LOG.info('Got %d Getting Masking Views from array %s',
len(mv_list), array_serial_no)
return mv_list
except Exception as e:
msg = 'Get Masking View for array %s failed with error %s' \
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_rdfgroup_list(self):
"""Get the list of rdf group of a given PowerMax/Vmax storage system
"""
try:
LOG.info('Getting rdf group List ')
array_serial_no = self.module.params['serial_no']
rdf_list = self.replication.get_rdf_group_list()
LOG.info('Successfully listed %d rdf groups from array %s',
len(rdf_list), array_serial_no)
return rdf_list
except Exception as e:
msg = 'Get rdf group for array %s failed with error %s ' \
% (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_metro_dr_env_list(self):
"""Get the list of metro DR environments of a given PowerMax/Vmax
storage system"""
try:
self.pre_check_for_PyU4V_version()
self.metro = self.u4v_conn.metro_dr
LOG.info("Got PyU4V instance for metro DR on to PowerMax")
LOG.info('Getting metro DR environment list ')
array_serial_no = self.module.params['serial_no']
metro_dr_env_list = self.metro.get_metrodr_environment_list()
LOG.info('Successfully listed %d metro DR environments from array'
' %s', len(metro_dr_env_list), array_serial_no)
return metro_dr_env_list
except Exception as e:
msg = 'Get metro DR environment for array %s failed with error ' \
'%s ' % (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def get_snapshot_policies_list(self):
"""Get the list of snapshot policies of a given PowerMax/Vmax
storage system"""
try:
self.pre_check_for_PyU4V_version()
self.snapshot_policy = self.u4v_conn.snapshot_policy
LOG.info("Got PyU4V instance for snapshot policy on to PowerMax")
LOG.info('Getting snapshot policies list ')
array_serial_no = self.module.params['serial_no']
snapshot_policy_list \
= self.snapshot_policy.get_snapshot_policy_list()
LOG.info('Successfully listed %d snapshot policies from array'
' %s', len(snapshot_policy_list), array_serial_no)
return snapshot_policy_list
except Exception as e:
msg = 'Get snapshot policies for array %s failed with error ' \
'%s ' % (self.module.params['serial_no'], str(e))
self.show_error_exit(msg=msg)
def show_error_exit(self, msg):
if self.u4v_conn is not None:
try:
LOG.info("Closing unisphere connection %s", self.u4v_conn)
utils.close_connection(self.u4v_conn)
LOG.info("Connection closed successfully")
except Exception as e:
err_msg = "Failed to close unisphere connection with " \
"error: %s", str(e)
LOG.error(err_msg)
LOG.error(msg)
self.module.fail_json(msg=msg)
def perform_module_operation(self):
""" Perform different actions on Gatherfacts based on user parameters
chosen in playbook """
serial_no = self.module.params['serial_no']
if serial_no == '':
array_list = self.get_array_list()
self.module.exit_json(Arrays=array_list)
else:
subset = self.module.params['gather_subset']
tdev_volumes = self.module.params['tdev_volumes']
filters = []
filters = self.module.params['filters']
if len(subset) == 0:
self.show_error_exit(msg="Please specify gather_subset")
filters_dict = {}
if filters:
filters_dict = self.get_filters(filters=filters)
health_check = []
vol = []
srp = []
sg = []
pg = []
host = []
hg = []
port = []
mv = []
rdf = []
alert = []
metro_dr_env = []
snapshot_policies = []
if 'alert' in str(subset):
alert = self.get_system_alerts(filters_dict=filters_dict)
if 'health' in str(subset):
health_check = self.get_system_health()
if 'vol' in str(subset):
vol = self.get_volume_list(
tdev_volumes=tdev_volumes, filters_dict=filters_dict)
if 'sg' in str(subset):
sg = self.get_storage_group_list(filters_dict=filters_dict)
if 'srp' in str(subset):
srp = self.get_srp_list(filters_dict=filters_dict)
if 'pg' in str(subset):
pg = self.get_portgroup_list(filters_dict=filters_dict)
if 'host' in str(subset):
host = self.get_host_list(filters_dict=filters_dict)
if 'hg' in str(subset):
hg = self.get_hostgroup_list(filters_dict=filters_dict)
if 'port' in str(subset):
port = self.get_port_list(filters_dict=filters_dict)
if 'mv' in str(subset):
mv = self.get_masking_view_list(filters_dict=filters_dict)
if 'rdf' in str(subset):
rdf = self.get_rdfgroup_list()
if 'metro_dr_env' in str(subset):
metro_dr_env = \
self.get_metro_dr_env_list()
if 'snapshot_policies' in str(subset):
snapshot_policies = \
self.get_snapshot_policies_list()
LOG.info("Closing unisphere connection %s", self.u4v_conn)
utils.close_connection(self.u4v_conn)
LOG.info("Connection closed successfully")
self.module.exit_json(
Alerts=alert,
Health=health_check,
Volumes=vol,
StorageGroups=sg,
StorageResourcePools=srp,
PortGroups=pg,
Hosts=host,
HostGroups=hg,
Ports=port,
MaskingViews=mv,
RDFGroups=rdf,
MetroDREnvironments=metro_dr_env,
SnapshotPolicies=snapshot_policies)
def get_powermax_gatherfacts_parameters():
"""This method provide the parameters required for the ansible
modules on PowerMax"""
return dict(
unispherehost=dict(type='str', required=True),
universion=dict(type='int', required=False, choices=[91, 92]),
verifycert=dict(type='bool', required=True, choices=[True, False]),
user=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
serial_no=dict(type='str', required=False, default=''),
tdev_volumes=dict(type='bool', required=False,
default=True, choices=[True, False]),
gather_subset=dict(type='list', required=False, elements='str',
choices=['alert',
'health',
'vol',
'sg',
'srp',
'pg',
'host',
'hg',
'port',
'mv',
'rdf',
'metro_dr_env',
'snapshot_policies'
]),
filters=dict(type='list', required=False, elements='dict',
options=dict(
filter_key=dict(type='str', required=True,
no_log=False),
filter_operator=dict(type='str', required=True,
choices=['equal', 'greater',
'lesser', 'like']),
filter_value=dict(type='str', required=True))
),
)
def main():
""" Create PowerMaxGatherFacts object and perform action on it
based on user input from playbook """
obj = PowerMaxGatherFacts()
obj.perform_module_operation()
if __name__ == '__main__':
main()
|
from django.db import models
from django.utils import timezone
# Create your models here.
class Mainpage(models.Model):
title = models.CharField(max_length=200)
text = models.TextField()
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
class CoordMetro(models.Model):
metroname = models.CharField(max_length=200)
longitude = models.TextField()
latitude = models.TextField()
color = models.CharField(max_length=50, null=True)
distance_300 = models.ManyToManyField('CoordBar', related_name='metro_300')
distance_500 = models.ManyToManyField('CoordBar', related_name='metro_500')
distance_1000 = models.ManyToManyField('CoordBar',related_name='metro_1000')
def __str__(self):
return self.metroname
class CoordBar(models.Model):
barname = models.CharField(max_length=200)
longitude = models.TextField()
latitude = models.TextField()
address = models.TextField()
district = models.TextField()
area = models.TextField()
phone_number = models.TextField()
def __str__(self):
return self.barname |
from game import player, resources, map, util
import pygame
import random
import math
import sys
import os
# Resoltuion variables, Display is stretched to match Screen which can be set by user
DISPLAY_WIDTH = 640
DISPLAY_HEIGHT = 360
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 360
if os.path.isfile("data/settings.txt"):
print("Settings file found!")
video_settings = open("data/settings.txt").read().splitlines()
for line in video_settings:
if line.startswith("resolution="):
SCREEN_WIDTH = int(line[line.index("=") + 1:line.index("x")])
SCREEN_HEIGHT = int(line[line.index("x") + 1:])
aspect_ratio = SCREEN_WIDTH / SCREEN_HEIGHT
if aspect_ratio == 4 / 3:
DISPLAY_HEIGHT = 480
elif aspect_ratio == 16 / 10:
DISPLAY_HEIGHT = 420
elif aspect_ratio == 16 / 9:
DISPLAY_HEIGHT = 360
else:
print("No settings file found!")
print("Resolution set to " + str(SCREEN_WIDTH) + "x" + str(SCREEN_HEIGHT) + ".")
SCALE = SCREEN_WIDTH / DISPLAY_WIDTH
display_rect = (0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT)
# Timing variables
TARGET_FPS = 60
SECOND = 1000
UPDATE_TIME = SECOND / 60.0
fps = 0
frames = 0
dt = 0
before_time = 0
before_sec = 0
# Handle cli flags
debug_mode = "--debug" in sys.argv
show_fps = "--showfps" in sys.argv # This is for if you want to see fps even in no-debug
# Init pygame
os.environ["SDL_VIDEO_CENTERED"] = '1'
pygame.init()
global screen
if debug_mode:
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
else:
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.FULLSCREEN)
display = pygame.Surface((DISPLAY_WIDTH, DISPLAY_HEIGHT))
clock = pygame.time.Clock()
# Input variables
input_queue = []
input_states = {"player left": False, "player right": False, "player up": False, "player down": False}
mouse_x = 0
mouse_y = 0
# Color variables
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
# Font variables
font_small = pygame.font.SysFont("Serif", 11)
# Game states
EXIT = -1
MAIN_LOOP = 0
DEATH_SCREEN = 1
def game():
player_obj = player.Player(DISPLAY_WIDTH, DISPLAY_HEIGHT)
level = map.Map()
player_obj.x, player_obj.y = level.player_spawn
running = True
next_state = EXIT
while running:
handle_input()
player_obj.handle_input(input_queue, input_states, mouse_x, mouse_y)
"""
BEGIN UPDATING
"""
player_obj.update(dt)
player_interaction = player_obj.click_interaction
if player_obj.ui_substate == player_obj.AIM_SPELL:
if player_obj.pending_spell.NEEDS_ROOM_AIM:
pending_spell_rect = (player_obj.mouse_x + player_obj.get_camera_x(), player_obj.mouse_y + player_obj.get_camera_y(), player_obj.pending_spell.width, player_obj.pending_spell.height)
player_obj.has_room_aim = level.valid_entity_rect(pending_spell_rect)
for spell in player_obj.active_spells:
if not level.rect_in_current_rooms(spell.get_rect()):
spell.end_spell()
continue
if spell.requests_enemies:
spell.enemies = []
for room in level.current_rooms:
for enemy in room.enemies:
if enemy.health > 0:
spell.enemies.append((enemy.x, enemy.y))
if player_obj.ui_state == player_obj.NONE or player_obj.ui_state == player_obj.INVENTORY:
level.update(player_obj)
for room in level.current_rooms:
for collider in room.colliders:
player_obj.check_collision(dt, collider)
for enemy in room.enemies:
enemy.update(dt, player_obj.get_rect())
if enemy.health > 0:
for collider in room.colliders:
enemy.check_collision(dt, collider)
if enemy.check_entity_collisions:
for other_enemy in room.enemies:
if other_enemy != enemy and other_enemy.health > 0:
enemy.check_collision(dt, other_enemy.get_rect())
enemy.check_collision(dt, player_obj.get_rect())
player_obj.check_collision(dt, enemy.get_rect())
for hurtbox in enemy.get_hurtboxes():
if player_obj.collides(hurtbox[0]):
player_obj.add_interaction(hurtbox[1])
if enemy.health > 0:
for spell in player_obj.active_spells:
if spell.handles_collisions:
if spell.collides(enemy.get_rect()):
if spell.interact:
for interaction in spell.get_interactions():
enemy.add_interaction(interaction)
spell.handle_collision()
room.enemies = [enemy for enemy in room.enemies if not enemy.delete_me]
for spell in player_obj.active_spells:
if spell.handles_collisions:
for collider in room.colliders:
if spell.collides(collider):
spell.handle_collision()
for chest in room.chests:
player_obj.check_collision(dt, chest[0])
if not chest[1]:
if player_interaction is not None:
if util.point_in_rect((mouse_x + player_obj.get_camera_x(), mouse_y + player_obj.get_camera_y()), chest[0]):
if util.get_distance(player_obj.get_center(), util.get_center(chest[0])) <= 50:
chest[1] = True
old_spawn_degrees = []
for item in chest[2]:
spawn_degree = 0
distance = 70
x_dist = 0
y_dist = 0
spawn_degree_okay = False
while not spawn_degree_okay:
spawn_degree = random.randint(0, 360)
spawn_degree_okay = True
for degree in old_spawn_degrees:
if abs(degree - spawn_degree) < 10:
spawn_degree_okay = False
if spawn_degree_okay:
x_dist = int(distance * math.cos(math.radians(spawn_degree)))
y_dist = int(distance * math.sin(math.radians(spawn_degree)))
if player_obj.collides((x_dist + util.get_center(chest[0])[0], y_dist + util.get_center(chest[0])[1], 20, 20)):
spawn_degree_okay = False
old_spawn_degrees.append(spawn_degree)
room.items.append([item[0], item[1], (x_dist + util.get_center(chest[0])[0], y_dist + util.get_center(chest[0])[1])])
for item in room.items:
if player_obj.collides((item[2][0], item[2][1], 20, 20)):
player_obj.add_item(item[0], item[1])
item[1] = 0
room.items = [item for item in room.items if item[1] != 0]
player_obj.update_camera()
"""
BEGIN RENDERING
"""
clear_display()
"""
RENDER ROOMS
"""
for room in level.rooms:
for tile in room.render_points:
tile_img = tile[0]
tile_x = tile[1] - player_obj.get_camera_x()
tile_y = tile[2] - player_obj.get_camera_y()
if rect_in_screen((tile_x, tile_y, 50, 50)):
if isinstance(tile_img, str):
display.blit(resources.get_image(tile_img, False), (tile_x, tile_y))
else:
if tile_img[1] != 16:
display.blit(resources.get_tile(tile_img[0], tile_img[1]), (tile_x, tile_y))
for chest in room.chests:
if chest[1]:
display.blit(resources.get_image("chest-open", True), (chest[0][0] - player_obj.get_camera_x(), chest[0][1] - player_obj.get_camera_y()))
else:
display.blit(resources.get_image("chest", True), (chest[0][0] - player_obj.get_camera_x(), chest[0][1] - player_obj.get_camera_y()))
for item in room.items:
display.blit(resources.get_image(item[0], True), (item[2][0] - player_obj.get_camera_x(), item[2][1] - player_obj.get_camera_y()))
"""
RENDER SPELLS
"""
for spell in player_obj.active_spells:
spell_x = spell.get_x() - player_obj.get_camera_x()
spell_y = spell.get_y() - player_obj.get_camera_y()
display.blit(spell.get_image(), (spell_x, spell_y))
for subrenderable in spell.get_subrenderables():
subrender_img = subrenderable[0]
for subrender_coords in subrenderable[1:]:
coords = (subrender_coords[0] - player_obj.get_camera_x(), subrender_coords[1] - player_obj.get_camera_y())
display.blit(subrender_img, coords)
if player_obj.ui_substate == player_obj.AIM_SPELL:
pygame.draw.circle(display, WHITE, (player_obj.get_x() - player_obj.get_camera_x() + player_obj.width // 2, player_obj.get_y() - player_obj.get_camera_y() + player_obj.height // 2), player_obj.pending_spell.AIM_RADIUS, 3)
if player_obj.pending_spell.is_aim_valid(player_obj.get_center(), player_obj.get_aim()):
aim_coords = player_obj.pending_spell.get_aim_coords(player_obj.get_center(), player_obj.get_aim())
aim_coords = (int(aim_coords[0] - player_obj.get_camera_x()), int(aim_coords[1] - player_obj.get_camera_y()))
display.blit(player_obj.pending_spell.get_image(), aim_coords)
"""
RENDER ENEMIES
"""
for room in level.rooms:
for enemy in room.enemies:
for subrenderable in enemy.get_subrenderables():
coords = (int(subrenderable[1][0]) - player_obj.get_camera_x(), int(subrenderable[1][1]) - player_obj.get_camera_y())
display.blit(resources.get_image(subrenderable[0], True), coords)
if enemy.health > 0:
enemy_x = enemy.get_x() - player_obj.get_camera_x()
enemy_y = enemy.get_y() - player_obj.get_camera_y()
if rect_in_screen((enemy_x, enemy_y, enemy.width, enemy.height)):
if enemy.attacking:
pygame.draw.rect(display, RED, (enemy_x, enemy_y, enemy.width, enemy.height), False)
else:
display.blit(enemy.get_image(), (enemy_x, enemy_y))
hitbox_rects = enemy.get_hurtboxes()
for rect in hitbox_rects:
to_draw = (rect[0][0] - player_obj.get_camera_x(), rect[0][1] - player_obj.get_camera_y(), rect[0][2], rect[0][3])
pygame.draw.rect(display, RED, to_draw, False)
if not enemy.is_boss:
healthbar_rect = (enemy_x - 5, enemy_y - 5, int((10 + enemy.width) * (enemy.health / enemy.max_health)), 2)
pygame.draw.rect(display, RED, healthbar_rect, False)
if enemy.get_plague_meter_percent() is not None:
plaguebar_rect = (enemy_x - 5, enemy_y - 2, int((10 + enemy.width) * (1 - enemy.get_plague_meter_percent())), 2)
pygame.draw.rect(display, YELLOW, plaguebar_rect, False)
"""
RENDER PLAYER
"""
display.blit(player_obj.get_image(), (player_obj.get_x() - player_obj.get_camera_x(), player_obj.get_y() - player_obj.get_camera_y()))
if player_obj.get_chargebar_percentage() > 0:
chargebar_rect = (player_obj.get_x() - player_obj.get_camera_x() - 2, player_obj.get_y() - player_obj.get_camera_y() - 5, int(round(30 * player_obj.get_chargebar_percentage())), 5)
pygame.draw.rect(display, YELLOW, chargebar_rect, False)
for room in level.rooms:
for tile in room.render_points:
tile_img = tile[0]
tile_img = tile[0]
tile_x = tile[1] - player_obj.get_camera_x()
tile_y = tile[2] - player_obj.get_camera_y()
if rect_in_screen((tile_x, tile_y, 50, 50)):
if not isinstance(tile_img, str):
if tile_img[1] == 16:
display.blit(resources.get_tile(tile_img[0], tile_img[1]), (tile_x, tile_y))
for i in range(0, player_obj.max_health):
x_coord = 5 + (30 * i)
y_coord = 5
if player_obj.health == i + 0.5:
display.blit(resources.get_subimage("heart", True, (0, 0, 10, 20)), (x_coord, y_coord))
display.blit(resources.get_subimage("heart-empty", True, (10, 0, 10, 20)), (x_coord + 10, y_coord))
elif player_obj.health > i:
display.blit(resources.get_image("heart", True), (x_coord, y_coord))
else:
display.blit(resources.get_image("heart-empty", True), (x_coord, y_coord))
if player_obj.recent_spell is not None:
display.blit(resources.get_image(player_obj.recent_spell, True), (DISPLAY_WIDTH - 36 - 5, 5))
spell_count = 0
if player_obj.recent_spell == "needle":
spell_count = int(player_obj.health)
else:
spell_count = player_obj.inventory["spellbook-" + player_obj.recent_spell]
count_surface = font_small.render(str(spell_count), False, WHITE)
display.blit(count_surface, (DISPLAY_WIDTH - 36 - 5 + int(36 * 0.8), 5 + int(36 * 0.8)))
if player_obj.recent_item is not None:
display.blit(pygame.transform.scale(resources.get_image(player_obj.recent_item, True), (36, 36)), (DISPLAY_WIDTH - 36 - 5 - 36 - 5, 5))
item_count = player_obj.inventory[player_obj.recent_item]
count_surface = font_small.render(str(item_count), False, WHITE)
display.blit(count_surface, (DISPLAY_WIDTH - 36 - 5 - 36 - 5 + int(36 * 0.8), 5 + int(36 * 0.8)))
# Render boss UI
for room in level.current_rooms:
for enemy in room.enemies:
if enemy.is_boss:
healthbar_width = int(DISPLAY_WIDTH * 0.5 * (enemy.health / enemy.max_health))
healthbar_rect = ((DISPLAY_WIDTH - healthbar_width) // 2, 0, healthbar_width, 20)
pygame.draw.rect(display, RED, healthbar_rect, False)
"""
RENDER SPELLWHEEL UI
"""
if player_obj.ui_state == player_obj.SPELLWHEEL and player_obj.ui_substate == player_obj.CHOOSE_SPELL:
fade_surface = pygame.Surface((DISPLAY_WIDTH, DISPLAY_HEIGHT), pygame.SRCALPHA)
fade_surface.fill((0, 0, 0, player_obj.fade_alpha))
display.blit(fade_surface, (0, 0))
if player_obj.fade_alpha == 100:
display.blit(resources.get_image("spellwheel", True), ((DISPLAY_WIDTH // 2) - 150, (DISPLAY_HEIGHT // 2) - 150))
for item in player_obj.spellcircle_items:
display.blit(resources.get_image(item[0][item[0].index("-") + 1:], True), (item[2][0], item[2][1]))
count_surface = font_small.render(str(item[1]), False, BLUE)
display.blit(count_surface, ((item[2][0] + int(item[2][2] * 0.8), item[2][1] + int(item[2][3] * 0.8))))
"""
RENDER INVENTORY UI
"""
if player_obj.ui_state == player_obj.INVENTORY:
ICON_SIZE = 40
ICON_RENDER_SIZE = 36
RENDER_OFFSET = (ICON_SIZE - ICON_RENDER_SIZE) // 2
INVENTORY_ROWS = 3
INVENTORY_COLUMNS = 4
INVENTORY_WIDTH = ICON_SIZE * INVENTORY_COLUMNS
INVENTORY_HEIGHT = ICON_SIZE * INVENTORY_ROWS
inventory_rect = ((640 // 2) - (INVENTORY_WIDTH // 2), (360 // 2) - (INVENTORY_HEIGHT // 2), INVENTORY_WIDTH, INVENTORY_HEIGHT)
pygame.draw.rect(display, WHITE, inventory_rect, True)
for i in range(1, INVENTORY_ROWS):
pygame.draw.line(display, WHITE, (inventory_rect[0], inventory_rect[1] + (i * ICON_SIZE)), (inventory_rect[0] + inventory_rect[2] - 1, inventory_rect[1] + (i * ICON_SIZE)))
for i in range(1, INVENTORY_COLUMNS):
pygame.draw.line(display, WHITE, (inventory_rect[0] + (i * ICON_SIZE), inventory_rect[1]), (inventory_rect[0] + (i * ICON_SIZE), inventory_rect[1] + inventory_rect[3] - 1))
item_coords = (0, 0)
for name in player_obj.inventory.keys():
if name in player_obj.equipped_spellbooks or name == player_obj.recent_item:
pygame.draw.rect(display, YELLOW, (inventory_rect[0] + item_coords[0], inventory_rect[1] + item_coords[1], ICON_SIZE, ICON_SIZE), True)
display.blit(pygame.transform.scale(resources.get_image(name, True), (ICON_RENDER_SIZE, ICON_RENDER_SIZE)), (inventory_rect[0] + item_coords[0] + RENDER_OFFSET, inventory_rect[1] + item_coords[1] + RENDER_OFFSET))
count_surface = font_small.render(str(player_obj.inventory[name]), False, WHITE)
display.blit(count_surface, (inventory_rect[0] + item_coords[0] + RENDER_OFFSET + int(ICON_RENDER_SIZE * 0.8), inventory_rect[1] + item_coords[1] + RENDER_OFFSET + int(ICON_RENDER_SIZE * 0.8)))
item_coords = (item_coords[0] + ICON_SIZE, item_coords[1])
if item_coords[0] >= INVENTORY_WIDTH:
item_coords = (0, item_coords[1] + ICON_SIZE)
if debug_mode or show_fps:
render_fps()
flip_display()
# pygame.display.flip()
tick()
if player_obj.health <= 0:
running = False
next_state = DEATH_SCREEN
if next_state == DEATH_SCREEN:
death_screen()
def death_screen():
continue_button_rect = ((DISPLAY_WIDTH // 2) - 50, (DISPLAY_HEIGHT // 2) + 40, 100, 20)
exit_button_rect = ((DISPLAY_WIDTH // 2) - 50, (DISPLAY_HEIGHT // 2) + 70, 100, 20)
death_message = font_small.render("You Heckin Died", False, WHITE)
continue_text = font_small.render("Play Again", False, WHITE)
continue_text_x = continue_button_rect[0] + (continue_button_rect[2] // 2) - (continue_text.get_width() // 2)
continue_text_y = continue_button_rect[1] + (continue_button_rect[3] // 2) - (continue_text.get_height() // 2)
exit_text = font_small.render("Yeet", False, WHITE)
exit_text_x = exit_button_rect[0] + (exit_button_rect[2] // 2) - (exit_text.get_width() // 2)
exit_text_y = exit_button_rect[1] + (exit_button_rect[3] // 2) - (exit_text.get_height() // 2)
running = True
next_state = EXIT
while running:
hovered_button = 0
if util.point_in_rect((mouse_x, mouse_y), continue_button_rect):
hovered_button = 1
elif util.point_in_rect((mouse_x, mouse_y), exit_button_rect):
hovered_button = 2
handle_input()
while len(input_queue) != 0:
event = input_queue.pop()
if event == ("left click", True):
if hovered_button == 1:
next_state = MAIN_LOOP
running = False
elif hovered_button == 2:
next_state = EXIT
running = False
clear_display()
display.blit(death_message, ((DISPLAY_WIDTH // 2) - (death_message.get_width() // 2), (DISPLAY_HEIGHT // 2) - (death_message.get_height() // 2) - 100))
pygame.draw.rect(display, WHITE, continue_button_rect, hovered_button != 1)
display.blit(continue_text, (continue_text_x, continue_text_y))
pygame.draw.rect(display, WHITE, exit_button_rect, hovered_button != 2)
display.blit(exit_text, (exit_text_x, exit_text_y))
if debug_mode or show_fps:
render_fps()
flip_display()
tick()
if next_state == MAIN_LOOP:
game()
def handle_input():
global input_states, mouse_x, mouse_y
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
input_queue.append(("player up", True))
input_states["player up"] = True
elif event.key == pygame.K_s:
input_queue.append(("player down", True))
input_states["player down"] = True
elif event.key == pygame.K_d:
input_queue.append(("player right", True))
input_states["player right"] = True
elif event.key == pygame.K_a:
input_queue.append(("player left", True))
input_states["player left"] = True
elif event.key == pygame.K_SPACE:
input_queue.append(("spellwheel", True))
elif event.key == pygame.K_q:
input_queue.append(("quickcast", True))
elif event.key == pygame.K_i:
input_queue.append(("inventory", True))
elif event.key == pygame.K_e:
input_queue.append(("quickitem", True))
elif event.type == pygame.KEYUP:
if event.key == pygame.K_w:
input_queue.append(("player up", False))
input_states["player up"] = False
elif event.key == pygame.K_s:
input_queue.append(("player down", False))
input_states["player down"] = False
elif event.key == pygame.K_d:
input_queue.append(("player right", False))
input_states["player right"] = False
elif event.key == pygame.K_a:
input_queue.append(("player left", False))
input_states["player left"] = False
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == pygame.BUTTON_LEFT:
input_queue.append(("left click", True))
elif event.button == pygame.BUTTON_RIGHT:
input_queue.append(("right click", True))
elif event.type == pygame.MOUSEMOTION:
mouse_pos = pygame.mouse.get_pos()
mouse_x = int(mouse_pos[0] / SCALE)
mouse_y = int(mouse_pos[1] / SCALE)
def rect_in_screen(rect):
center_x = DISPLAY_WIDTH // 2
other_center_x = rect[0] + (rect[2] // 2)
center_y = DISPLAY_HEIGHT // 2
other_center_y = rect[1] + (rect[3] // 2)
return abs(center_x - other_center_x) * 2 < rect[2] + DISPLAY_WIDTH and abs(center_y - other_center_y) * 2 < rect[3] + DISPLAY_HEIGHT
def clear_display():
pygame.draw.rect(display, BLACK, (0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT), False)
def flip_display():
global frames
pygame.transform.scale(display, (SCREEN_WIDTH, SCREEN_HEIGHT), screen)
pygame.display.flip()
frames += 1
def render_fps():
text = font_small.render("FPS: " + str(fps), False, YELLOW)
display.blit(text, (0, 0))
def tick():
global before_time, before_sec, fps, frames, dt
# Update delta based on time elapsed
after_time = pygame.time.get_ticks()
dt = (after_time - before_time) / UPDATE_TIME
# Update fps if a second has passed
if after_time - before_sec >= SECOND:
fps = frames
frames = 0
before_sec += SECOND
before_time = pygame.time.get_ticks()
# Update pygame clock
clock.tick(TARGET_FPS)
if __name__ == "__main__":
before_time = pygame.time.get_ticks()
before_sec = before_time
game()
pygame.quit()
|
'''
Напишите программу, которая предлагает ввести пароль и не переходит к выполнению основной части, пока не введён правильный пароль. Основная часть – вывод на экран «секретных сведений».
Sample Input 1:
1501
Sample Output 1:
Введите пароль:
Пароль верный!
Секретные сведения: я учусь в IT-классе.
Sample Input 2:
0115
1501
Sample Output 2:
Введите пароль:
Неверный пароль!
Введите пароль:
Пароль верный!
Секретные сведения: я учусь в IT-классе.
'''
print('Введите пароль:')
s = input()
while s != '1501':
print('Неверный пароль!')
print('Введите пароль:')
s = input()
print('Пароль верный!')
print('Секретные сведения: я учусь в IT-классе.')
|
from flask import Flask
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy.pool import Pool
from flask_cors import CORS
app = Flask(__name__)
app.config.from_object('chartingperformance.default_settings')
app.config.from_envvar('HOMEPERFORMANCE_SETTINGS')
engine = create_engine(app.config['DATABASE_URI'])
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
# optional - dispose the whole pool
# instead of invalidating one at a time
# connection_proxy._pool.dispose()
# raise DisconnectionError - pool will try
# connecting again up to three times before raising.
raise exc.DisconnectionError()
cursor.close()
CORS(app, resources=r'/api/*', allow_headers='Content-Type')
import chartingperformance.routes
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run(host=app.config['HOST'], port=app.config['PORT'], debug=app.config['DEBUG'])
|
import socket
# Create a UDP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
IP = input('Enter a destination IP: ')
port = int(input('Enter a port number: '))
while True:
print('Enter a message or click Enter to exit:')
message = input()
if message == '':
break
else:
print("UDP target IP:", IP)
print("UDP target port:", port)
print("message:", message)
sock.sendto(bytes(message, "utf-8"), (IP, port)) |
import cv2
import numpy as np
import time
def existence_mask(frame, threshhold):
resized = cv2.resize(frame, (60,60))
#lur = cv2.GaussianBlur(resized,(21, 21),0)
hsv = cv2.cvtColor(resized, cv2.COLOR_RGB2HSV)
target = np.uint8([[[25,77,249]]])
hsv_target = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
#print(hsv_target)
low = hsv_target[0][0][0] - 10
upper = hsv_target[0][0][0] + 10
lower_bound = np.array([low, 219, 239])
upper_bound = np.array([upper, 239, 255])
mask = cv2.inRange(hsv, lower_bound, upper_bound)
blue_points = np.count_nonzero(mask)
if blue_points/24300 >= threshhold:
return 1
return 0
#Alternative, using contour
def existence_contour(frame, threshhold):
resized = cv2.resize(frame, (60,60))
# blur = cv2.GaussianBlur(resized,(21, 21),0)
hsv = cv2.cvtColor(resized, cv2.COLOR_RGB2HSV)
target = np.uint8([[[25,77,249]]])
hsv_target = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
#print(hsv_target)
low = hsv_target[0][0][0] - 10
upper = hsv_target[0][0][0] + 10
lower_bound = np.array([low, 219, 239])
upper_bound = np.array([upper, 239, 255])
mask = cv2.inRange(hsv, lower_bound, upper_bound)
contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if len(contours) >= threshhold:
return 1
return 0
cap1 = cv2.VideoCapture(0) #camera 1
cap2 = cv2.VideoCapture(1) #camera 2
cap3 = cv2.VideoCapture(2) #camera 3
cap4 = cv2.VideoCapture(3) #camera 4
cap5 = cv2.VideoCapture(4) #camera 5
cap1.set(cv2.CAP_PROP_FPS, 30) #camera1's fps
cap2.set(cv2.CAP_PROP_FPS, 30) #camera2's fps
cap3.set(cv2.CAP_PROP_FPS, 30) #camera3's fps
cap4.set(cv2.CAP_PROP_FPS, 30) #camera4's fps
cap5.set(cv2.CAP_PROP_FPS, 30) #camera5's fps
n = 1
running_sum = [0,0,0,0,0]
result = [0,0,0,0,0]
while(True):
# Capture camera1 frame-by-frame
ret1, frame1 = cap1.read()
ret2, frame2 = cap2.read()
ret3, frame3 = cap3.read()
ret4, frame4 = cap4.read()
ret5, frame5 = cap5.read()
n += 1
running_sum[0] += existence_contour(frame1, 4);
running_sum[1] += existence_contour(frame2, 4);
running_sum[2] += existence_contour(frame3, 4);
running_sum[3] += existence_contour(frame4, 4);
running_sum[4] += existence_contour(frame5, 4);
if n == 10:
for i in range(5):
if running_sum[i] >=6:
print("Camera%d has detected an enemy"%i)
else:
print("Camera%d is safe"%i)
result[i] = (running_sum[i] + 5)//10
#write result to txt if needed
# Display the resulting frame
# cv2.imshow('frame1',frame)
# if cv2.waitKey(10) & 0xFF == ord('q'):
# break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() |
def verify_arrays_have_same_content(res, expected):
_expected = set(expected)
assert len(res) == len(expected)
for el in res:
if el in _expected:
_expected.remove(el)
else:
assert False
assert len(_expected) == 0 |
from django.forms import ModelForm, HiddenInput, NumberInput
from user_stats.models import UserStats
class UserStatisticForm(ModelForm):
''''''
class Meta:
model = UserStats
fields = ('activity', 'period', 'method', 'user')
widgets = {
'user': HiddenInput,
'activity': NumberInput(
attrs={
'placeholder':'Enter time in hours'
}),
'period': NumberInput(
attrs={
'placeholder':'Enter day number'
})
} |
# Generated by Django 3.1.1 on 2020-10-17 05:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tobacco', '0005_auto_20201017_1050'),
]
operations = [
migrations.AlterField(
model_name='board',
name='boardid',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='village',
name='villageid',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False),
),
]
|
import string
def countFileWords(file_path):
f=open(file_path,'r')
word_list=f.read().split()
total_words=len(word_list)
return total_words
countFileWords('C:\\Users\\emenya\\Desktop\\smile.txt') |
import asyncio
import traceback
import logging
from aiosmb import logger
from aiosmb._version import __banner__
from aiosmb.commons.connection.factory import SMBConnectionFactory
from aiosmb.dcerpc.v5.interfaces.even6 import Even6RPC
"""
Query example:
"*[System/EventID=5312]"
"""
async def amain(url, src = "Security", query = '*', max_entries = 100):
su = SMBConnectionFactory.from_url(url)
conn = su.get_connection()
_, err = await conn.login()
if err is not None:
print('Failed to connect to server! %s' % err)
return False, err
else:
logger.debug('SMB Connected!')
ei, err = await Even6RPC.from_smbconnection(conn)
if err is not None:
print('Error during DCE setup! %s' % err)
return False, err
logger.debug('DCE Connected!')
sec_handle, err = await ei.register_query(src, query=query)
if err is not None:
print(err)
return False, err
async for res, err in ei.query_next(sec_handle, max_entries, as_xml=True):
if err is not None:
print(err)
break
try:
print(res)
except Exception as e:
print(e)
pass
await ei.close()
await conn.disconnect()
return True, None
def main():
import argparse
parser = argparse.ArgumentParser(description='Event query example')
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('--src', default="Security", help = 'log source to query')
parser.add_argument('-q', '--query', default="*", help = 'query string')
parser.add_argument('-m', '--max_entries', type=int, default=100, help = 'max element count to retrieve')
parser.add_argument('smb_url', help = 'Connection string that describes the authentication and target. Example: smb+ntlm-password://TEST\\Administrator:password@10.10.10.2')
args = parser.parse_args()
print(__banner__)
if args.verbose >=1:
logger.setLevel(logging.DEBUG)
asyncio.run(amain(args.smb_url, src = args.src, query = args.query, max_entries = args.max_entries))
if __name__ == '__main__':
main() |
# Web flask library url, file upload, bootstrap, csv
import os
from flask import Flask, flash, render_template, url_for, request, redirect
from werkzeug.utils import secure_filename
from flask_bootstrap import Bootstrap
import csv
# machine learning import lib
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
import numpy as np
from keras.models import model_from_json
from tfidf import TFIDF
from random import shuffle
from keras import backend as keras
# setup dir
UPLOAD_FOLDER = '/flask/aplikasi/data'
ALLOWED_EXTENTIONS = set(['csv'])
folder = "data/"
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
Bootstrap(app)
# Create a directory in a known location to save files to.
# uploads_dir = os.path.join(app.instance_path, 'data')
# os.makedirs(uploads_dir)
#setup variabel
xdata = []
ydata = []
clasification = []
#ML function
def preproses(filepath='data/jokpra.csv'):
global ydata
global xdata
f = open(filepath)
sents = f.read().split('\n')
shuffle(sents)
for sent in sents:
temp = sent.split(';')
if len(temp) == 2:
xdata.append(temp[0])
ydata.append([int(temp[1])])
def getBinaryResult(x):
return "POSITIF" if x >= 0.5 else "NEGATIF"
def testFromTrained(x):
model = Sequential()
# load json and create model
json_file = open('models/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new self.model
model.load_weights("models/model_trainb1.h5")
# print("Loaded model from disk")
sgd = SGD(lr=0.01)
model.compile(loss='binary_crossentropy', optimizer=sgd)
return getBinaryResult(model.predict_proba(np.array(x)))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENTIONS
#ML Fnction
@app.route('/')
def index():
return render_template('index.html')
@app.route('/parsing')
def parsing():
with open('data/test.csv', 'r') as csv_par:
preproses()
td = TFIDF([xdata, ydata])
rowdata = []
clasification = []
csv_reader = csv_par.read().split('\n')
for row in csv_reader:
rowdata.append(row)
clasification.append(testFromTrained([td.transform(row)]))
keras.clear_session()
labels, values = np.unique(clasification, return_counts=True)
lbls, vals = np.unique(clasification, return_counts=True)
pie_labels = labels
pie_values = values
colors = ["#F7464A", "#46BFBD"]
return render_template('hasil.html', set=zip(values, labels, colors), clasification=zip(csv_reader, clasification), legenda=zip(lbls, vals))
@app.route('/predict', methods=['POST', 'GET'])
def predict():
preproses()
td = TFIDF([xdata, ydata])
clasification = []
# Receives the input query from form
if request.method == 'POST':
namequery = request.form['namequery']
spliter = namequery.split(',')
for row in spliter:
clasification.append(testFromTrained([td.transform(row)]))
print (clasification)
keras.clear_session()
labels, values = np.unique(clasification, return_counts=True)
lbls, vals = np.unique(clasification, return_counts=True)
pie_labels = labels
pie_values = values
colors = ["#F7464A", "#46BFBD"]
return render_template('hasil.html', set=zip(values, labels, colors), clasification=zip(spliter, clasification), legenda=zip(lbls, vals))
@app.route('/coba')
def coba():
return render_template('upload.html')
@app.route('/upload_file', methods=['POST', 'GET'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('Not file part')
# return redirect(request.url)
file = request.files['file']
if file.filename == '':
flask('not select file')
# return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return redirect(url_for('upload_file', filename=filename))
print (filename)
fold = "data/"+filename
print (fold)
with open(fold, 'r') as csv_par:
preproses()
td = TFIDF([xdata, ydata])
clasification = []
csv_reader = csv_par.read().split('\n')
for row in csv_reader:
clasification.append(testFromTrained([td.transform(row)]))
keras.clear_session()
labels, values = np.unique(clasification, return_counts=True)
lbls, vals = np.unique(clasification, return_counts=True)
pie_labels = labels
pie_values = values
colors = ["#F7464A", "#46BFBD"]
return render_template('hasil.html', set=zip(values, labels, colors), clasification=zip(csv_reader, clasification), legenda=zip(lbls, vals))
if __name__ == '__main__':
app.run(debug=True) |
from django.contrib import admin
from usuarios.models import *
from mensajes.models import *
from salas.models import *
from usu_salas.models import *
admin.site.register(Usuarios.usu_nombre)
admin.site.register(Mensajes)
admin.site.register(Salas)
admin.site.register(Usu_Salas) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class DtrackingConfig(AppConfig):
name = 'dtracking'
verbose_name = 'Control de Avalúos'
|
from data_preprocessing import *
import torch
client_2 = {}
n = list(users_split[1])
Load_2 = Data_division(data_file,n)
print("***********************************")
print("Splitting Data between clients")
print("***********************************")
client_2['dataset'] = torch.utils.data.DataLoader(Load_2) #reading data file from the dataset
print("***********************************")
model_2 = torch.load("Global_model.pt")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(7, 10)
self.fc2 = nn.Linear(10, 10)
self.fc3 = nn.Linear(10, 6)
def forward(self, x):
x = F.relu( self.fc1 (x))
x = F.relu( self.fc2 (x))
x = torch.sigmoid(self.fc3 (x))
return x
torch.manual_seed(args.torch_seed)
client_2['model'] = Net()
client_2['model'].load_state_dict(model_2)
client_2['optim'] = optim.SGD(client_2['model'].parameters(), lr=args.lr)
client_2['model'].train()
loss_func = torch.nn.CrossEntropyLoss() # apply log-softmax()
for epoch in range(1, args.epochs + 1):
for (batch_idx, batch) in enumerate(client_2['dataset']):
X = batch['predictors'] # inputs
Y = batch['targets']
client_2['optim'].zero_grad()
output = client_2['model'](X)
loss_val = loss_func(output, Y) # avg loss in batch
loss_val.backward()
client_2['optim'].step()
print('client Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx , len(client_2['dataset']) ,
100. * batch_idx / len(client_2['dataset']), loss_val))
client_2_model=client_2['model']
torch.save(client_2_model.state_dict(),"fedavg_2.pt")
|
class Contact:
def __init__(self, name, email, phone):
self.name = name
self.email = email
self.phone = phone
def print_contact(self):
print("Nombre: {} ; Email: {} ; Phone: {} ".format(self.name, self.email, self.phone))
|
# Generated by Django 2.2.6 on 2020-01-26 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Notes', '0007_auto_20200126_0044'),
]
operations = [
migrations.AddField(
model_name='semester1subject',
name='teacher',
field=models.CharField(default='pratyush', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='semester2subject',
name='teacher',
field=models.CharField(default='pratyush', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='semester3subject',
name='teacher',
field=models.CharField(default='pratyush', max_length=30),
preserve_default=False,
),
]
|
import tensorflow as tf
from time import time
from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.metrics import AUC
from model_tf2 import DIN
from utils_tf2 import *
import os
import pickle
from tensorflow.keras.utils import plot_model
from IPython.display import Image
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3,4,5,6,7'
if __name__ == '__main__':
# ========================= Hyper Parameters =======================
embed_dim = 8
att_hidden_units = [80, 40]
ffn_hidden_units = [256, 128, 64]
dnn_dropout = 0.5
att_activation = 'sigmoid'
ffn_activation = 'prelu'
learning_rate = 0.001
batch_size = 4096
epochs = 50
data_file_tf1 = 'raw_data/ad_dataset_lookalike_11192021_10_faezeh.2compare.sort.pkl'
lbl_file_tf1 = 'raw_data/label_lookalike_11192021_10_faezeh.2compare.sort.pkl'
if not os.path.exists('raw_data/train_test_lookalike.pkl'):
feature_columns, behavior_list, train, test, maxlen = tf1_to_tf2_data_conversion(data_file_tf1, lbl_file_tf1, embed_dim)
with open('raw_data/train_test_lookalike.pkl', 'wb') as f:
pickle.dump(feature_columns, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(behavior_list, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(train, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(test, f, pickle.HIGHEST_PROTOCOL)
else:
with open('raw_data/train_test_lookalike.pkl', 'rb') as f:
feature_columns = pickle.load(f)
behavior_list = pickle.load(f)
train = pickle.load(f)
test = pickle.load(f)
train_X, train_y = train
# val_X, val_y = val
test_X, test_y = test
s = time()
# ============================Build Model==========================
maxlen = train_X[2].shape[1]
print(f'maxlen: {maxlen}')
model = DIN(feature_columns, behavior_list, att_hidden_units, ffn_hidden_units, att_activation,
ffn_activation, maxlen, dnn_dropout)
model.summary()
# ============================model checkpoint======================
# check_path = 'save/din_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
# checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,
# verbose=1, period=5)
# =========================Compile============================
model.compile(loss=binary_crossentropy, optimizer=Adam(learning_rate=learning_rate),
metrics=[AUC()])
# ===========================Fit==============================
model.fit(
train_X,
train_y,
epochs=epochs,
# callbacks=[EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True)], # checkpoint
# validation_data=(val_X, val_y),
validation_data=(test_X, test_y),
batch_size=batch_size,
)
# ===========================Test==============================
print('test AUC: %f' % model.evaluate(test_X, test_y, batch_size=batch_size)[1])
print(f'elapse: {time()-s} secs') |
# Esto me muestra el texto en consola o pantalla
print("Hola")
print("Mundo")
print("!!")
"""
Esto no se muestra por las comillas
print("Hola")
print("Mundo")
print("!!")
"""
print("!!")
|
#%%
print('startup')
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, Softmax, Flatten
#%%
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
print(X_train.shape)
#%%
# Select 4 indeces of images of X_train to display
images = [0,1,2,3]
print('Here are some example images from the MNIST dataset:')
plt.subplot(221)
plt.title('image number: {}'.format(images[0]))
plt.imshow(X_train[images[0]])
plt.axis('off')
plt.subplot(222)
plt.title('image number: {}'.format(images[1]))
plt.imshow(X_train[images[1]])
plt.axis('off')
plt.subplot(223)
plt.title('image number: {}'.format(images[2]))
plt.imshow(X_train[images[2]])
plt.subplot(224)
plt.axis('off')
plt.title('image number: {}'.format(images[3]))
plt.imshow(X_train[images[3]])
plt.axis('off')
plt.show()
#%%
# Standardise the images
X_test = X_test / 255.0
X_train = X_train / 255.0
num_pixels = X_train.shape[1]*X_train.shape[2]
print('number of pixels per image = {}'.format(num_pixels))
#%%
num_classes = np.max(np.max(Y_train))+1
print('number of classes = {}'.format(num_classes))
#%%
# making the model
model = Sequential()
model.add(Flatten())
model.add(Dense(64, activation = 'relu'))
model.add(Dense(num_classes,activation = 'softmax'))
# compiling the model
model.compile(optimizer = 'adam', loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
#%%
# training the network
model.fit(x = X_train, y = Y_train,
batch_size= 16, epochs = 5, )
#%%
print('Evaluation with test set:')
score = model.evaluate(x = X_test, y = Y_test, batch_size=16)
#%%
print('Loss = {:4.2f}'.format(score[0]))
print('Accuracy = {:4.2f}%'.format(score[1]*100))
#%%
test_print = [8737, 883, 937, 12]
prediction_probs = model.predict(X_test)
prediction = np.argmax(prediction_probs, axis=1)
#%%
print('Here are examples of predictions:')
test_print = [0, 1, 2, 3]
plt.subplot(221)
plt.title('Prediction: {}'.format(prediction[test_print[0]]))
plt.imshow(X_test[test_print[0]])
plt.axis('off')
plt.subplot(222)
plt.title('Prediction: {}'.format(prediction[test_print[1]]))
plt.imshow(X_test[test_print[1]])
plt.axis('off')
plt.subplot(223)
plt.title('Prediction: {}'.format(prediction[test_print[2]]))
plt.imshow(X_test[test_print[2]])
plt.axis('off')
plt.subplot(224)
plt.title('Prediction: {}'.format(prediction[test_print[3]]))
plt.imshow(X_test[test_print[3]])
plt.axis('off')
plt.show()
#%%
|
from django.urls import path, include
urlpatterns = [
# path('tst/', include('backend.api.v2.tst.urls')),
path('forum/', include('backend.api.v2.forum.urls')),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
class UserWithProfile(AbstractUser):
def save(self, *args, **kwargs):
if not self.pk:
self.profile = UserProfile(user=self)
super(UserWithProfile, self).save(*args, **kwargs)
class UserProfile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name="profile",
on_delete=models.CASCADE,
)
|
#!/usr/bin/env python3
import unittest
from src.main.python.main import main
class TestStringMethods(unittest.TestCase):
def test_main_output_value(self):
self.assertEqual(main(), "Hello from main!")
def test_main_output_type(self):
self.assertIsInstance(main(), str)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.2.6 on 2021-09-08 02:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='teacher_id',
new_name='teacher',
),
migrations.RenameField(
model_name='groupchat',
old_name='id_course',
new_name='course',
),
migrations.RenameField(
model_name='message',
old_name='id_user',
new_name='user',
),
migrations.RenameField(
model_name='teacher',
old_name='user_id',
new_name='user',
),
]
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api.urlfetch import fetch
from django.utils import simplejson
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('go away');
class ExpandHandler(webapp.RequestHandler):
def __bail(self):
self.response.status_code = 500
self.response.out.write('Something went wrong')
def get(self):
self.response.headers['Content-Type'] = 'text/plain';
short_url = self.request.get("shortUrl");
if not short_url:
self.__bail
return
response = fetch(short_url, method='HEAD', follow_redirects=False);
if response.status_code != 301 or not response.headers['Location']:
self.__bail
return
self.response.out.write(response.headers['Location']);
def main():
application = webapp.WSGIApplication([('/', MainHandler), ('/expand_shorturl', ExpandHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
from esl.agent import Agent
from esl.interaction.header import MessageCode
from esl.interaction.message import Message
from esl.simulation.identity import Identity
from esl.simulation.time import TimePoint
class QuoteMessage(Message):
def __init__(self, sender: Identity[Agent], recipient: Identity[Agent], sent: TimePoint = TimePoint(),
received: TimePoint = TimePoint(), type: MessageCode = 0):
super().__init__(sender, recipient, sent, received, type) |
import urllib
import requests
from app import consts
def vk_get_name(user_id, access_token):
if not user_id:
return {}
params = {
'user_ids': str(user_id),
'fields': 'photo_50'
}
user_info = vk_method('users.get', params, access_token)
if not user_info or not user_info[0]:
return {}
user_info = user_info[0]
vk_photo = user_info.get('photo_50') or None
names = [user_info.get('first_name'), user_info.get('last_name')]
names = [n for n in names if n]
vk_name = ' '.join(names) or None
return {
'vk_name': vk_name,
'vk_photo': vk_photo
}
def vk_get_friends(access_token):
res = vk_method('friends.get', {}, access_token)
if not res or not res.get('items'):
return []
return [int(fid) for fid in res['items']]
def vk_method(method_name, params, access_token):
if not access_token:
return None
base_url = 'https://api.vk.com/method/'
params['v'] = '5.103'
params['access_token'] = access_token
url = '{}{}?{}'.format(
base_url,
method_name,
urllib.parse.urlencode(params)
)
response = requests.get(url)
if not response.ok:
return None
data = response.json()
if not data:
return None
return data['response'] or None
def vk_auth(vk_code):
params = {
'client_id': consts.VK_APP_ID,
'client_secret': consts.VK_SECRET,
'redirect_uri': 'http://{domen}{root}'.format(
domen=consts.SPORT_DOMEN,
root=('/' + consts.SPORT_ROOT_PATH if consts.SPORT_ROOT_PATH else '')
),
'code': vk_code
}
url = 'https://oauth.vk.com/access_token?{}'.format(urllib.parse.urlencode(params))
response = requests.get(url)
if not response.ok:
return
return response.json()
|
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import numpy as np
import tkinter as tk
from tkinter import ttk
# IMPORTS
import serial
import csv
import time
HEADLINE= ("Verdana", 30)
LARGE_FONT = ("Verdana", 20)
style.use("ggplot")
# LOOP for continuous running
def animate(i):
global t, T1, T2
if measureOn:
# #Brownian motion for testing
#alpha = 0.1
# t.append(t[-1]+1)
# T1.append(T1[-1]+np.random.standard_normal()-alpha*(T1[-1]-getRefTemp()))
# refTempList.append(getRefTemp())
# updateGraph()
if(arduinoSerial.inWaiting()>0):
# # READ DATA FROM ARDUINO TO LIST OF FLOATS
# # TODO: Check length of read line, if wrong throw error
dataRaw = arduinoSerial.readline().decode().strip('\r\n')
print(dataRaw)
dataList = dataRaw.split(",")
dataNum = [float(i) for i in dataList]
# # STORE TIMESTAMP AND TEMPERATURES
t.append(t[-1]+1)
# t.append(dataNum[0]/1000)
T1.append(dataNum[1])
T2.append(dataNum[2])
refTempList.append(getRefTemp())
updateGraph()
def getRefTemp():
return refTemp[brewStep]
# Manually setting reference temperature
def setRefTemp(newSetpoint):
global refTemp
refTemp = newSetpoint
def getReceptStr(i):
ReceptStr = ['','','','','','','']
# ReceptStr[0]= "Recept: English bitter"
# ReceptStr[1]="1. Värm vatten till " + str(refTemp[0]) +"\u00b0C"
# ReceptStr[2]="2. Mäska i " + str(timers[0]) + "min"
# ReceptStr[3]="3. Värm till " + str(refTemp[1]) +"\u00b0C"
# ReceptStr[4]="4. Vila i " + str(timers[1]) + " min"
# ReceptStr[5]="5. Mäska ur med 2l. vatten. Värm till " + str(refTemp[2]) +"\u00b0C"
# ReceptStr[6]="6. Koka i " + str(timers[2]) +" min"
# Christmas Edition
ReceptStr[0] = "GLÖGG"
ReceptStr[1] = "Heat with caution to 77\u00b0C"
ReceptStr[2] = "Ethanol boils at 78\u00b0C"
ReceptStr[3] = "Stop heating when a light smoke rises"
return ReceptStr[i]
def getTimer():
timer = timers[brewStep]
timeStr=['','','']
for i in range(len(timers)):
Seconds = 60-(t[-1]-timeInStep[i-1])%60
if Seconds == 60:
Seconds = "00"
totMinPassed = np.floor(t[-1]/60+timeInStep[i])
Minutes = int(timers[i] - totMinPassed)-1
timeStr[i] = str(Minutes)+":"+str(Seconds)
return timeStr
def updateGraph():
minWidth = 120
a.clear()
a.set_ylim(0,110)
if t[-1]<minWidth:
a.set_xlim(0,t[-1]+10)
a.text(0,110,"T1: " + str(np.round_(T1[-1])),fontsize=16)
a.text(0,115,"T2: " + str(np.round_(T2[-1])),fontsize=16)
a.text(0,120,"Timer: " + getTimer()[brewStep],fontsize=16)
else:
a.set_xlim(t[-1]-minWidth,t[-1]+10)
a.text(t[-1]-minWidth,110,"T1: " + str(np.round_(T1[-1])),fontsize=16)
a.text(t[-1]-minWidth,115,"T2: " + str(np.round_(T2[-1])),fontsize=16)
a.text(t[-1]-minWidth,120,"Timer: " + getTimer()[brewStep],fontsize=16)
a.plot(t,T1,'r')
a.plot(t,T2,'b')
a.plot(t,refTempList,'k')
a.set_axis_bgcolor('white')
def resetGraph():
global t, T1, T2, refTempList, brewStep
t = [0]
T1 = [T1[-1]]
T2 = [T2[-1]]
brewStep = 0
refTempList = [getRefTemp()]
a.clear()
def switchMeasureMode():
global measureOn
measureOn = not measureOn
def next():
global brewStep, timeInStep
if brewStep == len(refTemp)-1:
timeInStep = [0,0,0]
brewStep = 0
print('Brewsteps finished')
else:
timeInStep[brewStep] = t[-1]
brewStep += 1
def writeToFile():
writeTempList = [t, T1]
writeTempList = list(map(list, zip(*writeTempList))) #Transpose list for readability
date = (time.strftime("%y%m%d"))
with open("./Brewdata/ESB"+date+".csv", "w") as f:
writer = csv.writer(f)
writer.writerows(writeTempList)
class BryggansApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#tk.Tk.iconbitmap(self, default="clienticon.ico")
tk.Tk.wm_title(self, "Bryggans Bryggeri")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
# for F in (StartPage, Recipes):
# frame = F(container, self)
# self.frames[F] = frame
# frame.grid(row=0, column=0, sticky="nsew")
frame = StartPage(container, self)
self.frames[StartPage] = frame
frame.grid(row=0, column=1, sticky="nswe")
frame.configure(bg='white')
self.show_frame(StartPage)
frame = Recipes(container, self)
self.frames[Recipes] = frame
frame.grid(row=0, column=0, sticky="nswe")
self.show_frame(Recipes)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Bryggning", font=LARGE_FONT)
label.pack(side="top")
resetButton = ttk.Button(self, text="Reset",command=resetGraph)
resetButton.pack(side="top", anchor="w", fill="x")
toggleMeasureButton = ttk.Button(self, text="Pause/Unpuase", command=switchMeasureMode)
toggleMeasureButton.pack(side="top", anchor="w",fill="x")
nextButton = ttk.Button(self, text="Next",command=next)
nextButton.pack(side="top", anchor="w",fill="x")
nextButton = ttk.Button(self, text="Save",command=writeToFile)
nextButton.pack(side="top", anchor="w",fill="x")
canvas = FigureCanvasTkAgg(f, self)
canvas.show()
# canvas.get_tk_widget().gre
canvas.get_tk_widget().pack(side=tk.LEFT,fill=tk.BOTH, expand=True)
# toolbar = NavigationToolbar2TkAgg(canvas, self)
# toolbar.update()
# canvas._tkcanvas.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
class Recipes(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# for i in range(6):
# if i==0:
# alignment = 'center'
# label = tk.Label(self, text=getReceptStr(i), font=HEADLINE,anchor=alignment)
# label.pack(pady=10,padx=10)
# else:
# alignment = 'w'
# label = tk.Label(self, text=getReceptStr(i), font=LARGE_FONT,anchor=alignment)
# label.pack(pady=10,padx=10)
# # Christmas Edition
for i in range(6):
if i==0:
alignment = 'center'
label = tk.Label(self, text=getReceptStr(i), font=HEADLINE,anchor=alignment)
label.pack(pady=10,padx=10)
else:
alignment = 'w'
label = tk.Label(self, text=getReceptStr(i), font=LARGE_FONT,anchor=alignment)
label.pack(pady=10,padx=10)
myButton = tk.Button(self,borderwidth=0)
photoimage = tk.PhotoImage(file="Graphics/christmas1.png")
photoimage.zoom(1)
myButton.image = photoimage
myButton.configure(image=photoimage)
myButton.pack(pady=100,padx=100)
# christmasCanvas = tk.Canvas(self)
# christmas_image = tk.PhotoImage(file ="Graphics/christmas.png")
# #Resizing
# christmas_image = christmas_image.subsample(1, 1) #See below for more:
# #Shrinks the image by a factor of 2 effectively
# christmasCanvas.create_image(0, 0, image = christmas_image, anchor = "nw")
# self.christmas_image = christmas_image
# christmasCanvas.pack()
# # Configure arduino port communication. Make sure port adress is correct
#arduinoSerial = serial.Serial('/dev/ttyACM0',9600) #CHECK!
f = Figure()
a = f.add_subplot(111)
t = [0]
T1 = [15]
T2 = [50]
refTemp = [75,80,100]
timers = [60,10,60]
timeInStep = [0,0,0]
brewStep = 0
refTempList = [getRefTemp()]
measureOn = False
beige = '#CABBA0'
green = '#008200'
red = '#FF0000'
# print(dir(a))
# TODO Database recipes, getLiters.
app = BryggansApp()
app.geometry("1280x780")
app.tk_setPalette(background='white', foreground=red,
activeBackground='white', activeForeground=red)
ani = animation.FuncAnimation(f, animate, interval=1000)
app.mainloop()
# "Recept: Epo IPA \n 1. Värm vatten till " + str(refTemp[0]) +"\u00b0C \n 2. Mäska i " + str(timers[0]) + "min \n 3. Värm till " + str(refTemp[1]) +"\u00b0C\n 4. Mäska i " + str(timers[1]) + " min \n 5. Värm till " + str(refTemp[2]) +"\u00b0C \n 6. Koka i " + str(timers[2]) +" min"
|
# -*- coding: utf-8 -*-
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('global_co2.csv')
X = dataset.iloc[219:,:1].values
Y = dataset.iloc[219:,1:2].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 1/3, random_state = 0)
# Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
# Predicting the Test set results
Y_pred = regressor.predict(X_test)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 6)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, Y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, Y)
# Visualising the Data set to see the relation between X and Y
plt.scatter(X_train, Y_train, color = 'pink')
plt.plot(X_train, regressor.predict(X_train), color = 'black')
plt.title(' CO2 production from 1970s (Training set)')
plt.xlabel('Years ')
plt.ylabel('CO2 Production')
plt.show()
# Visualising the Test set results
plt.scatter(X_test, Y_test, color = 'brown')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title(' CO2 production from 1970s (Training set)')
plt.xlabel('Years ')
plt.ylabel('CO2 Production')
plt.show()
#since the data is non linear, Polynomial egression will be applied
# Visualising the Polynomial Regression results
plt.scatter(X, Y, color = 'orange')
plt.plot(X, lin_reg_2.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.title('CO2 production from 1970s')
plt.xlabel('Years')
plt.ylabel('CO2 Production')
plt.show()
print('CO2 production in 2011 is')
print(regressor.predict([[2011]]))
print('CO2 production in 2012 is')
print(regressor.predict([[2012]]))
print('CO2 production in 2013 is')
print(regressor.predict([[2013]]))
################# checking accuracy
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(Y_test, Y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(Y_test, Y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(Y_test, Y_pred)))
|
import os.path
from pathlib import Path
from qtpy import QtCore, QtWidgets
from qtpy.QtCore import Qt
class FilenameModel(QtCore.QStringListModel):
"""
Model used by QCompleter for file name completions.
Constructor options:
`filter_` (None, 'dirs') - include all entries or folders only
`fs_engine` ('qt', 'pathlib') - enumerate files using `QDir` or `pathlib`
`icon_provider` (func, 'internal', None) - a function which gets path
and returns QIcon
"""
def __init__(self, filter_=None, fs_engine='qt', icon_provider='internal'):
super().__init__()
self.current_path = None
self.fs_engine = fs_engine
self.filter = filter_
if icon_provider == 'internal':
self.icons = QtWidgets.QFileIconProvider()
self.icon_provider = self.get_icon
else:
self.icon_provider = icon_provider
def data(self, index, role):
"Get names/icons of files"
default = super().data(index, role)
if role == Qt.DecorationRole and self.icon_provider:
# self.setData(index, dat, role)
return self.icon_provider(super().data(index, Qt.DisplayRole))
if role == Qt.DisplayRole:
return Path(default).name
return default
def get_icon(self, path):
"Internal icon provider"
return self.icons.icon(QtCore.QFileInfo(path))
def get_file_list(self, path):
"List entries in `path` directory"
lst = None
if self.fs_engine == 'pathlib':
lst = self.sort_paths([i for i in path.iterdir()
if self.filter != 'dirs' or i.is_dir()])
elif self.fs_engine == 'qt':
qdir = QtCore.QDir(str(path))
qdir.setFilter(qdir.NoDotAndDotDot | qdir.Hidden |
(qdir.Dirs if self.filter == 'dirs' else qdir.AllEntries))
names = qdir.entryList(sort=QtCore.QDir.DirsFirst |
QtCore.QDir.LocaleAware)
lst = [str(path / i) for i in names]
return lst
@staticmethod
def sort_paths(paths):
"Windows-Explorer-like filename sorting (for 'pathlib' engine)"
dirs, files = [], []
for i in paths:
if i.is_dir():
dirs.append(str(i))
else:
files.append(str(i))
return sorted(dirs, key=str.lower) + sorted(files, key=str.lower)
def setPathPrefix(self, prefix):
path = Path(prefix)
if not prefix.endswith(os.path.sep):
path = path.parent
if path == self.current_path:
return # already listed
if not path.exists():
return # wrong path
self.setStringList(self.get_file_list(path))
self.current_path = path
class MenuListView(QtWidgets.QMenu):
"""
QMenu with QListView.
Supports `activated`, `clicked`, `setModel`.
"""
max_visible_items = 16
def __init__(self, parent=None):
super().__init__(parent)
self.listview = lv = QtWidgets.QListView()
lv.setFrameShape(lv.NoFrame)
lv.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
pal = lv.palette()
pal.setColor(pal.Base, self.palette().color(pal.Window))
lv.setPalette(pal)
act_wgt = QtWidgets.QWidgetAction(self)
act_wgt.setDefaultWidget(lv)
self.addAction(act_wgt)
self.activated = lv.activated
self.clicked = lv.clicked
self.setModel = lv.setModel
lv.sizeHint = self.size_hint
lv.minimumSizeHint = self.size_hint
lv.mousePressEvent = self.mouse_press_event
lv.mouseMoveEvent = self.update_current_index
lv.setMouseTracking(True) # receive mouse move events
lv.leaveEvent = self.clear_selection
lv.mouseReleaseEvent = self.mouse_release_event
lv.keyPressEvent = self.key_press_event
lv.setFocusPolicy(Qt.NoFocus) # no focus rect
lv.setFocus()
self.last_index = QtCore.QModelIndex() # selected index
self.flag_mouse_l_pressed = False
def key_press_event(self, event):
key = event.key()
if key in (Qt.Key_Return, Qt.Key_Enter):
if self.last_index.isValid():
self.activated.emit(self.last_index)
self.close()
elif key == Qt.Key_Escape:
self.close()
elif key in (Qt.Key_Down, Qt.Key_Up):
model = self.listview.model()
row_from, row_to = 0, model.rowCount()-1
if key == Qt.Key_Down:
row_from, row_to = row_to, row_from
if self.last_index.row() in (-1, row_from): # no index=-1
index = model.index(row_to, 0)
else:
shift = 1 if key == Qt.Key_Down else -1
index = model.index(self.last_index.row()+shift, 0)
self.listview.setCurrentIndex(index)
self.last_index = index
def update_current_index(self, event):
self.last_index = self.listview.indexAt(event.pos())
self.listview.setCurrentIndex(self.last_index)
def clear_selection(self, event=None):
self.listview.clearSelection()
# selectionModel().clear() leaves selected item in Fusion theme
self.listview.setCurrentIndex(QtCore.QModelIndex())
self.last_index = QtCore.QModelIndex()
def mouse_press_event(self, event):
if event.button() == Qt.LeftButton:
self.flag_mouse_l_pressed = True
self.update_current_index(event)
def mouse_release_event(self, event):
"""
When item is clicked w/ left mouse button close menu, emit `clicked`.
Check if there was left button press event inside this widget.
"""
if event.button() == Qt.LeftButton and self.flag_mouse_l_pressed:
self.flag_mouse_l_pressed = False
if self.last_index.isValid():
self.clicked.emit(self.last_index)
self.close()
def size_hint(self):
lv = self.listview
width = lv.sizeHintForColumn(0)
width += lv.verticalScrollBar().sizeHint().width()
if isinstance(self.parent(), QtWidgets.QToolButton):
width = max(width, self.parent().width())
visible_rows = min(self.max_visible_items, lv.model().rowCount())
return QtCore.QSize(width, visible_rows * lv.sizeHintForRow(0))
|
from requests import Session
import requests
import io
headers = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'en-GB,en-US;q=0.8,en;q=0.6',
'Connection': 'keep-alive',
'Host': 'www1.nseindia.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
webSession = requests.Session()
webSession.headers.update(headers)
# request = webSession.request('GET','https://www1.nseindia.com/archives/equities/mkt/MA200520.csv')
# url_content = request.content
# csv_file = open('downloaded.csv', 'wb')
#
# csv_file.write(url_content)
# csv_file.close()
import zipfile, urllib.request, shutil
url = 'https://www1.nseindia.com/archives/equities/bhavcopy/pr/PR200520.zip'
r = webSession.request('GET',url, stream =True)
check = zipfile.is_zipfile(io.BytesIO(r.content))
while not check:
r = requests.get(url, stream =True)
check = zipfile.is_zipfile(io.BytesIO(r.content))
else:
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall() |
"""Mymood URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Mymood.controler import *
from django.conf.urls import include, url
urlpatterns = [
# url(r'^admin/$', admin.site.urls, name='admin'),
# url(r'^$', mood),
# url(r'^sign_in/$', query_happiness),
# url(r'^redirect_sign_up/$', redirect_sign_up),
# url(r'^sign_up/$', sign_up),
# url(r'^select_emoji/(.+)/$', select_emoji, name='select_emoji'),
# url(r'^submit_emoji/$', submit_emoji, name='submit_emoji'),
# url(r'^query_happiness/$', query_happiness),
path('admin/', admin.site.urls),
path('', mood),
path('sign_in/', sign_in),
path('redirect_sign_up/', redirect_sign_up),
path('sign_up/', sign_up),
path('select_emoji/<int:user_id>/', select_emoji),
path('submit_emoji/', submit_emoji),
path('register_messenger/<int:user_id>/', register_messenger),
path('submit_register/', submit_register),
path('query_happiness/', query_happiness),
path('refresh_happiness/', refresh_happiness),
path('export_csv/', export_csv),
path('jump_members_in_teams/', jump_members_in_teams),
path('query_members_in_teams/', query_members_in_teams),
path('switch_members/', switch_members),
path('create_teams/', create_teams),
path('check_team_name/', check_team_name),
path('jump_events/', jump_events),
path('save_event/', save_event),
path('query_events/', query_events),
path('query_teams/', query_teams),
path('get_webhook/<int:psid>/', get_webhook),
path('save_happiness_level/<happiness_level>', save_happiness_level),
path('success/', success),
path('jump_teams/', jump_teams),
path('load_teams/', load_teams),
path('get_team/', get_team),
path('delete_team/', delete_team),
path('get_event/', get_event),
path('delete_event/', delete_event),
path('event_detail/', event_detail),
# path('query_happiness/', include('models_app.urls'))
]
# urlpatterns += staticfiles_urlpatterns()
|
#!/usr/bin/python
# -*- coding=utf-8 -*-
#************************************************************************
# $Id: conjugate.py,v 0.7 2009/06/02 01:10:00 Taha Zerrouki $
#
# ------------
# Description:
# ------------
# Copyright (c) 2009, Arabtechies, Arabeyes Taha Zerrouki
#
# This file is the main file to execute the application in the command line
#
# -----------------
# Revision Details: (Updated by Revision Control System)
# -----------------
# $Date: 2009/06/02 01:10:00 $
# $Author: Taha Zerrouki $
# $Revision: 0.7 $
# $Source: arabtechies.sourceforge.net
#
#***********************************************************************/
from __future__ import print_function
from .verb_const import *
# from ar_ctype import *
from .classverb import *
from .mosaref_main import *
import sys,re,string
import sys, getopt, os
scriptname = os.path.splitext(os.path.basename(sys.argv[0]))[0]
scriptversion = '0.1'
AuthorName="Taha Zerrouki"
def usage():
# "Display usage options"
print("(C) CopyLeft 2009, %s"%AuthorName)
print("Usage: %s -f filename [OPTIONS]" % scriptname)
#"Display usage options"
print("\t[-h | --help]\t\toutputs this usage message")
print("\t[-V | --version]\tprogram version")
print("\t[-f | --file= filename]\tinput file to %s"%scriptname)
print("\t[-d | --display= format]\t display format as html,csv, tex, xml")
print("\t[-a | --all ]\t\tConjugate in all tenses")
print("\t[-i | --imperative]\tConjugate in imperative")
print("\t[-F | --future]\t\tconjugate in the present and the future")
print("\t[-p | --past]\t\tconjugate in the past")
print("\t[-c | --confirmed]\t\tconjugate in confirmed ( future or imperative) ")
print("\t[-m | --moode]\t\tconjugate in future Subjunctive( mansoub) or Jussive (majzoom) ")
print("\t[-v | --passive]\tpassive form");
print("\r\nN.B. FILE FORMAT is descripted in README")
print("\r\nThis program is licensed under the GPL License\n")
def grabargs():
# "Grab command-line arguments"
all = False;
future=False;
past=False;
passive=False;
imperative=False;
confirmed=False;
future_moode=False;
fname = ''
display_format = 'csv'
if not sys.argv[1:]:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "hVvcmaiFpi:d:f:",
["help", "version","imperative", "passive",'confirmed','moode', "past","all",
"future", "file=","display="],)
except getopt.GetoptError:
usage()
sys.exit(0)
for o, val in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
if o in ("-V", "--version"):
print(scriptversion)
sys.exit(0)
if o in ("-v", "--passive"):
passive = True
if o in ("-f", "--file"):
fname = val
if o in ("-d", "--display"):
display_format = val.upper();
if o in ("-F", "--future"):
future = True
if o in ("-a", "--all"):
all=True;
if o in ("-p", "--past"):
past =True;
if o in ("-i","--imperative"):
imperative=True;
if o in ("-c","--confirmed"):
confirmed=True;
if o in ("-m","--moode"):
future_moode=True;
return (fname,all,future,past,passive,imperative,confirmed,future_moode,display_format)
def main():
filename,all,future,past,passive,imperative,confirmed,future_moode,display_format= grabargs()
try:
fl=open(filename);
except:
print(" Error :No such file or directory: %s" % filename)
sys.exit(0)
print(filename,all,future,past,passive,imperative,confirmed,future_moode)
line=fl.readline().decode("utf");
text=u""
verb_table=[];
nb_field=2;
while line :
if not line.startswith("#"):
text=text+" "+chomp(line)
liste=line.split("\t");
if len(liste)>=nb_field:
verb_table.append(liste);
line=fl.readline().decode("utf8");
fl.close();
for tuple_verb in verb_table:
word=tuple_verb[0];
if not is_valid_infinitive_verb(word):
print(u"is invalid verb ", end=' ')
print(word.encode("utf8"))
else:
future_type=u""+tuple_verb[1];
future_type=get_future_type_entree(future_type);
transitive=u""+tuple_verb[2];
if transitive in (u"متعدي",u"م",u"مشترك",u"ك","t","transitive"):
transitive=True;
else :
transitive=False;
text=do_sarf(word,future_type,all,past,future,passive,imperative,future_moode,confirmed,transitive,display_format);
print(text.encode("utf8"))
if __name__ == "__main__":
main()
|
import common
result_file = open("results.json", "a")
# OH
# micro-snake
protection_time = common.measure_protection_time(["../introspection-oblivious-hashing/run-oh.sh", "inputs/snake.bc", "inputs/micro-snake.in"])
print('OH snake protection time ' + str(protection_time))
runtime_overhead = common.measure_runtime_overhead(["python", "inputs/ptypipe.py", "inputs/micro-snake.in", "input_programs/snake"], ["python", "inputs/ptypipe.py", "inputs/micro-snake.in", "./out"])
print('runtime overhead ' + str(runtime_overhead) + '%')
size_overhead = common.measure_binary_overhead("input_programs/snake", "./out")
print('size overhead ' + str(size_overhead) + '%')
memory_overhead = common.measure_memory_overhead(["python", "inputs/ptypipe.py", "inputs/micro-snake.in", "input_programs/snake"], ["python", "inputs/ptypipe.py", "inputs/micro-snake.in", "./out"])
print('memory overhead ' + str(memory_overhead) + '%')
snippet = common.create_snippet('micro-snake', 'OH', protection_time, runtime_overhead, memory_overhead, size_overhead)
result_file.write(snippet)
# c-snake
protection_time = common.measure_protection_time(["../introspection-oblivious-hashing/run-oh.sh", "inputs/csnake.bc", "inputs/c-snake.in"])
print('OH csnake protection time ' + str(protection_time))
runtime_overhead = common.measure_runtime_overhead(["python", "inputs/ptypipe.py", "inputs/c-snake.in", "input_programs/csnake"], ["python", "inputs/ptypipe.py", "inputs/c-snake.in", "./out"])
print('runtime overhead ' + str(runtime_overhead) + '%')
size_overhead = common.measure_binary_overhead("input_programs/csnake", "./out")
print('size overhead ' + str(size_overhead) + '%')
memory_overhead = common.measure_memory_overhead(["python", "inputs/ptypipe.py", "inputs/c-snake.in", "input_programs/csnake"], ["python", "inputs/ptypipe.py", "inputs/c-snake.in", "./out"])
print('memory overhead ' + str(memory_overhead) + '%')
snippet = common.create_snippet('c-snake', 'OH', protection_time, runtime_overhead, memory_overhead, size_overhead)
result_file.write(snippet)
'''
# tetris
protection_time = common.measure_protection_time(["../introspection-oblivious-hashing/run-oh.sh", "inputs/tetris.bc", "inputs/tetris.in"])
print('OH tetris protection time ' + str(protection_time))
runtime_overhead = common.measure_runtime_overhead(["python", "inputs/ptypipe.py", "inputs/tetris.in", "input_programs/tetris"], ["python", "inputs/ptypipe.py", "inputs/tetris.in", "./out"])
print('runtime overhead ' + str(runtime_overhead) + '%')
size_overhead = common.measure_binary_overhead("input_programs/tetris", "./out")
print('size overhead ' + str(size_overhead) + '%')
memory_overhead = common.measure_memory_overhead(["python", "inputs/ptypipe.py", "inputs/tetris.in", "input_programs/tetris"], ["python", "inputs/ptypipe.py", "inputs/tetris.in", "./out"])
print('memory overhead ' + str(memory_overhead) + '%')
snippet = common.create_snippet('tetris', 'OH', protection_time, runtime_overhead, memory_overhead, size_overhead)
result_file.write(snippet)
'''
# zopfli
protection_time = common.measure_protection_time(["./run-oh-zopfli.sh", "./zopfli_whole.bc", "inputs/zopfli.in"])
print('OH zopfli protection time ' + str(protection_time))
runtime_overhead = common.measure_runtime_overhead(["input_programs/zopfli", "inputs/zopfli.in"], ["OH-build/protected", "inputs/zopfli.in"])
print('runtime overhead ' + str(runtime_overhead) + '%')
size_overhead = common.measure_binary_overhead("input_programs/zopfli", "OH-build/protected")
print('size overhead ' + str(size_overhead) + '%')
memory_overhead = common.measure_memory_overhead(["input_programs/zopfli", "inputs/zopfli.in"], ["OH-build/protected", "inputs/zopfli.in"])
print('memory overhead ' + str(memory_overhead) + '%')
snippet = common.create_snippet('zopfli', 'OH', protection_time, runtime_overhead, memory_overhead, size_overhead)
result_file.write(snippet)
result_file.close()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 5 22:11:43 2015
@author: lucas
"""
from negocio.regras_de_negocio import *
# from coleta.coleta_tempo_real import ColetaTempoReal
from nltk.classify import NaiveBayesClassifier
from util.corpus_util import CorpusUtil
from datetime import datetime
from util.classificador_util import ClassificadorUtil
class Fachada(object):
def __init__(self):
# Commands disponiveis.
# Regras de negócio para processar os documentos.
# Existe uma ordem correta de processamento das regras,
# portanto a ordem das regras nessa lista é importante.
#
# No momento, notações como 'emoticon_feliz' e 'simbolo_monetario'
# também são alterados pelo stemmer.
# Processar 'DivideCamelCaseCommand' antes de 'ProcessaNegacaoCommand',
# evita erros como 'NAO_JKRowling'.
self.regras_negocio = [CorrigeAbreviacaoContracaoCommand(),
RemoveRTCommand(),
CorrigeRisadaCommand(),
RemoveNumerosCommand(),
PadronizaSimboloMonetarioCommand(),
PadronizaLinksCommand(),
RemoveEmailCommand(),
ProcessaEmoticonsCommand(),
DivideCamelCaseCommand(),
ConverteLowerCaseCommand(),
# StemmerCommand(),
# ProcessaNegacaoCommand(),
RemovePontuacaoCommand(),
RemoveStopwordsCommand(),
RemoveEspacosExtraCommand()]
'''self.regras_negocio = [StemmerCommand()]'''
self._CLASSIFICADOR_PATH = "/home/lucas/Documents/mineracao_opiniao/classificadores_treinados/" \
"naive_bayes_3860_resenhas_2015-09-25.00.52.23.944959.pickle"
'''self._CLASSIFICADOR_PATH = "/home/lucas/Documents/mineracao_opiniao/classificadores_treinados/" \
"naive_bayes_3860_resenhas_negacao_2015-10-10.16.54.27.113297.pickle"'''
self._CORPUS_RAIZ = "/home/lucas/Documents/mineracao_opiniao/corpus_resenhas_livros_processadas"
self._PATH_PALAVRAS_FREQUENTES_CORPUS = "/home/lucas/Documents/mineracao_opiniao/palavras_frequentes_corpus/" \
"palavras_frequentes_2015-09-25.15.05.22.013713.pickle"
# self._MODO_ANALISE_PROBABILISTICO = "modo_probabilistico"
# self._MODO_ANALISE_DICIONARIO = "modo_dicionario"
self._CORPUS = CorpusUtil(self._CORPUS_RAIZ)
"""
def coletar_documentos(self, palavras_chave=['dilma', 'lula'],
qtde_tweets=50, dir_destino=""):
'''Coleta uma porção de tweets que mencionam
determinada entidade, salvando cada tweet coletado
em um arquivo de texto, no direório especificado.
'''
coleta_tr = ColetaTempoReal(palavras_chave=palavras_chave,
qtde_tweets=qtde_tweets,
dir_destino=dir_destino,
idiomas=['pt'])
coleta_tr.coletar_tweets()
return True
"""
def analisar_sentimento_documento(self, texto):
# Instancia um documento com o texto recebido por parametro
documento = Documento(texto, None, self._CORPUS, "nome")
# Processa o documento, antes de analisar o sentimento deste
documento = self.processar_documento(documento)
# Abre classificador
classificador = ClassificadorUtil().abrir_classificador_treinado(self._CLASSIFICADOR_PATH)
# Abre o arquivo com as palavras frequentes do corpus
palavras_frequentes = self._CORPUS.abrir_arquivo_palavras_frequentes(self._PATH_PALAVRAS_FREQUENTES_CORPUS)
# palavras_frequentes = None
# Analisa o sentimento do texto do documento e o retorna
sentimento = classificador.classify(Documento.get_features(documento, palavras_frequentes))
documento.categoria = sentimento
return documento
@staticmethod
def treinar_classificador(raiz_corpus):
"""Documentar
"""
minuto_incio = datetime.now().minute
corpus = CorpusUtil(raiz_corpus)
train_set = corpus.get_train_set()
'''print " ++ len(train_set): %d" % len(train_set)
print " ++ type(train_set): %s" % type(train_set)
test_set = corpus.get_test_set()
print " ++ len(test_set): %d" % len(test_set)
print " ++ type(test_set): %s" % type(test_set)
# test_set
qtde_pos = 0
qtde_neg = 0
for features in test_set:
if features[1] == 'pos':
qtde_pos += 1
elif features[1] == 'neg':
qtde_neg += 1
# print " ++ features[1]: ", features[1]
print "Quantidade de documentos positivos em 'test_set': %d" % qtde_pos
print "Quantidade de documentos negativos em 'test_set': %d" % qtde_neg'''
classificador = NaiveBayesClassifier.train(train_set)
print " -- Classificador treinado com sucesso."
minuto_fim = datetime.now().minute
tempo_decorrido = minuto_fim - minuto_incio
print "-- Tempo decorrido: %s minutos." % str(tempo_decorrido)
return classificador
def processar_documentos_corpus(self, documentos, diretorio_destino):
"""Passa todos os documentos de um corpus, por
todas as regras de negócio de processamento.
"""
qtde_docs_processados = 0
porcentagem_processada = 0
for documento in documentos:
doc_processado = documento # Cópia do documento a ser processada
# print "\n\n"
# print doc_processado.nome
# Passa por todas as regras de negócio
for cmd in self.regras_negocio:
# print " -- Command: %s" % cmd.__class__.__name__
doc_processado = cmd.execute(doc_processado)
# print doc_processado.texto
ArquivoUtil().gravar_documento_processado(doc_processado, diretorio_destino)
qtde_docs_processados += 1
porcentagem_corrente = qtde_docs_processados * 100 / len(documentos)
if porcentagem_processada != porcentagem_corrente:
porcentagem_processada = porcentagem_corrente
print "%s%% dos documentos foram processados." % str(porcentagem_processada)
# documentos_processados.append(doc_processado)
return True
def processar_documento(self, documento):
"""Documentar
"""
doc_processado = documento
for cmd in self.regras_negocio:
print " -- Command: %s" % cmd.__class__.__name__
doc_processado = cmd.execute(doc_processado)
print doc_processado.texto
print "\n"
return doc_processado
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import Physician
class PhysicianSerializer(serializers.ModelSerializer):
class Meta:
model = Physician
fields = ['id', 'firstName', 'lastName', 'maxShiftLoad', 'phoneNumber', 'specialty']
class PhysicianMiniSerializer(serializers.ModelSerializer):
class Meta:
model = Physician
fields = ['id', 'lastName']
|
#coding: utf-8
''' mbinary
#######################################################################
# File : isBipartGraph.py
# Author: mbinary
# Mail: zhuheqin1@gmail.com
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-12-21 15:00
# Description: Judge if a graph is bipartite
The theorical idea is to judge whether the graph has a odd-path circle. However, it's hard to implement.
The following codes show a method that colors the nodes by dfs.
#######################################################################
'''
def isBipartite(self, graph):
"""
:type graph: List[List[int]]
:rtype: bool
"""
n = len(graph)
self.node = graph
self.color = {i: None for i in range(n)}
return all(self.dfs(i, True) for i in range(n) if self.color[i] is None)
def dfs(self, n, col=True):
if self.color[n] is None:
self.color[n] = col
return all(self.dfs(i, not col) for i in self.node[n])
else:
return col == self.color[n]
|
# By Hadil Alsudies
# !/usr/bin/env python3
import psycopg2
def query1(db):
# Runs the first query and prints it
c1 = db.cursor()
c1.execute("""SELECT A.title AS ArticleName, count(log.logSlug) AS views
FROM articles A, (SELECT regexp_replace(log.path, '^.+[/\\\]','')
AS logSlug FROM log) AS log
WHERE A.slug = log.logSlug GROUP BY A.title ORDER BY views desc LIMIT 3
""")
print(" Top 3 popular articles are: "+"\n")
while True:
row = c1.fetchone()
if row is None:
break
print("\""+row[0]+"\" -- "+str(row[1])+" views"+"\t\t")
def query2(db):
# Runs the second query and prints it
c2 = db.cursor()
c2.execute("""SELECT authors.name AS AuthorName,
count(log.logSlug) AS views FROM articles A, authors,
(SELECT regexp_replace(log.path, '^.+[/\\\]','') AS logSlug FROM log)
AS log WHERE A.slug = log.logSlug AND A.author = authors.id
GROUP BY authors.name ORDER BY views desc
""")
print("\n\n"+" The most popular article authors of all time are: "+"\n")
while True:
row = c2.fetchone()
if row is None:
break
print(row[0]+" -- "+str(row[1])+" views"+"\t\t")
def query3(db):
# Runs the third query and prints it
c3 = db.cursor()
c3.execute("""SELECT dates, ErrorStatus
FROM (SELECT to_char(time,'MON DD,YYYY') AS dates,
ROUND(SUM(CASE WHEN CAST(substring(status,1,1) AS INTEGER)
IN (4,5) THEN 1 ELSE 0 END) * 100.0 / COUNT(to_char(time,'MON DD,YYYY')),2)
AS ErrorStatus FROM log
GROUP BY DATES) AS DateByError WHERE ErrorStatus >= 1
""")
print("\n\n"+" Day did more than 1% of requests lead to errors: "+"\n")
while True:
row = c3.fetchone()
if row is None:
break
print(row[0]+" -- "+str(row[1])+" % errors"+"\t\t")
def myquestions():
# Runs all 3 queries functions
db = None
try:
db = psycopg2.connect('dbname=news user=postgres password=123')
query1(db)
query2(db)
query3(db)
except psycopg2.DatabaseError:
print("<error message>")
def main():
# Runs a function that prints out the results of 3 questions: Q1, Q2 and Q3
myquestions()
if __name__ == "__main__":
main()
|
"""
For example, given minion ID n = 1211, k = 4, b = 10, then x = 2111, y = 1112 and z = 2111 - 1112 = 0999. Then the next minion ID will be n = 0999 and the algorithm iterates again: x = 9990, y = 0999 and z = 9990 - 0999 = 8991, and so on.
[210111, 122221, 102212]
"""
number = 210111
count = 0
def answer(n, b):
resultList = []
resultReal =[]
def inanswer(n,b):
originalN =n
numberList = []
for i in str(n):
numberList.append(i)
x = sorted(numberList,reverse=True)
y = sorted(numberList)
zList =[]
lendOne = 0
for index in range(len(x)-1,-1,-1):
x[index]=int(x[index])-lendOne
if int(x[index]) >= int(y[index]):
resultNumber = int(x[index])-int(y[index])
lendOne = 0
else:
resultNumber = b-(int(y[index])-int(x[index]))
lendOne = 1
zList.insert(0,resultNumber)
zList =[str(i) for i in zList]
n="".join(zList)
b=b
if n in resultList:
result=(len(resultList[resultList.index(n):]))
resultReal.append(result)
return resultReal
else:
resultList.append(n)
inanswer(n,b)
result=inanswer(n,b)
return resultReal[0]
print(answer(210022,3))
|
import shutil
import sys
from flask import Flask, send_from_directory
from flask import jsonify
from flask import request
from flask_cors import CORS
from waitress import serve
shutil.rmtree('../models/metadata/webdriver/temp')
from ModelHandler import ModelHandler
from threading import Thread
app = Flask(__name__)
# use cors to accept Cross-Origin Requests
# this allows requests from a SPA served separately, for example from npm run serve
# makes development more streamlined
# cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
cors = CORS(app)
handler = ModelHandler()
t = Thread(target=handler)
t.start()
@app.route('/api/<token>')
def api_resp(token):
if token == str(-1):
result = {}
else:
result = handler.ask(token)
return jsonify(result)
@app.route('/api', methods=['POST'])
def new_request():
# token = handler.submit('IMDB', {'IMDB': 'tt4154796'})
# token = handler.submit('YT', {'YT': 'https://www.youtube.com/watch?v=2NwHpkEjn84'})
# token = handler.submit('BOTH', {'IMDB': 'tt3480822', 'YT': 'https://www.youtube.com/watch?v=2NwHpkEjn84'})
TYPE = request.form['TYPE']
IMDB = request.form['IMDB']
YT = request.form['YT']
if TYPE == 'IMDB':
token = handler.submit('IMDB', {'IMDB': IMDB})
elif TYPE == 'YT':
token = handler.submit('YT', {'YT': 'https://www.youtube.com/watch?v=' + YT})
elif TYPE == 'BOTH':
token = handler.submit('BOTH', {'IMDB': IMDB, 'YT': 'https://www.youtube.com/watch?v=' + YT})
else:
token = -1
return str(token)
@app.route('/<path:path>', methods=['GET'])
def static_proxy(path):
return send_from_directory('../frontend2/dist', path)
@app.route('/')
def root():
return send_from_directory('../frontend2/dist', 'index.html')
@app.errorhandler(500)
def server_error(e):
return 'An internal error occurred [server.py] %s' % e, 500
if __name__ == '__main__':
# This is used when running locally only. When deploying use a web-server process
# such as waitress to serve the app.
try:
set_port = (sys.argv[1])
except IndexError:
set_port = 80
# app.run(host='0.0.0.0', port = set_port, debug=True, threaded=True)
# Serve with waitress
serve(app, host='0.0.0.0', port=set_port)
|
# -*- coding: UTF-8 -*-
# 排序冒泡法https://www.jianshu.com/p/e8ae3a0bc2e4
def main():
li = [10, 8, 4, 7, 5]
for i in range(len(li) - 1):
for j in range(len(li) - 1 - i):
if li[j] > li[j + 1]:
# 多元赋值
li[j], li[j + 1] = li[j + 1], li[j]
print(j)
print(li)
li1 = [10, 8, 4, 7, 5]
li1.sort()
print(li1)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponsePermanentRedirect, Http404
from products.models import Category, Product
from shops.models import Delivery
from utils.views import handle404
def read(request, parent_slug, slug, product_id):
"""Просмотр информации о товаре"""
category_parent = get_object_or_404(Category, slug=parent_slug)
try:
category = Category.objects.get(slug=slug, parent__in=category_parent.children.all(), is_visible=True)
except Category.DoesNotExist:
category = Category.objects.get(slug=slug, parent=category_parent, is_visible=True)
product = get_object_or_404(Product, categories=category, id=product_id)
# , is_visible=True, quantity__gt=0
if not product.is_visible or product.quantity <= 0:
return handle404(request, product, category)
if request.user.is_anonymous():
if not product.is_retail:
raise Http404()
else:
if request.user.show_wholesale() and not product.is_wholesale:
raise Http404()
elif not request.user.show_wholesale() and not product.is_retail:
raise Http404()
similar = Product.objects.for_user(request.user).filter(categories=category).exclude(id=product.id).order_by('?')
if request.user.is_authenticated() and request.user.show_wholesale():
similar = similar[:10]
else:
similar = similar[:6]
delivery = Delivery.objects.all().order_by('position')
if request.user.is_authenticated() and request.user.show_wholesale():
delivery = delivery.filter(is_wholesale=True)
else:
delivery = delivery.filter(is_retail=True)
price = int(product.price or 0)
discount = int(product.discount or 0)
if request.user.is_authenticated():
if request.user.is_wholesale:
price = product.wholesale_price
discount = product.wholesale_discount
elif request.user.is_franchisee:
price = product.franchisee_price
discount = product.franchisee_discount
# Сопутствующие категории, только для розницы
if request.user.is_anonymous() or (request.user.is_authenticated() and not request.user.show_wholesale()):
complementary_categories = Category.objects.filter(is_complementary=True, is_visible=True)
else:
complementary_categories = ()
context = {
'category': category,
'category_parent': category_parent,
'product': product,
'photos': list(product.photos.all().order_by('-is_main')),
'similar': similar,
'complementary_categories': complementary_categories,
'delivery': delivery,
'price': price,
'discount': discount,
}
request.breadcrumbs.add(category_parent.title, category_parent.get_absolute_url())
request.breadcrumbs.add(category.title, category.get_absolute_url())
return render(request, 'products/products/read.html', context)
def legacy_redirect(request, product_id):
"""Переадресация продуктов со старых ссылок"""
product = get_object_or_404(Product, id=product_id)
url = product.get_absolute_url()
if url is None:
raise Http404()
return HttpResponsePermanentRedirect(url)
def fast_view(request, product_id):
product = get_object_or_404(Product, id=product_id)
price = int(product.price or 0)
discount = int(product.discount or 0)
if request.user.is_authenticated():
if request.user.is_wholesale:
price = product.wholesale_price
discount = product.wholesale_discount
elif request.user.is_franchisee:
price = product.franchisee_price
discount = product.franchisee_discount
context = {
'product': product,
'user': request.user,
'price': price,
'discount': discount,
}
return render(request, 'products/products/fast_view.html', context)
|
import torch
import torch.nn as nn
from torch.nn import LSTM
class BaseRNNBlock(nn.Module):
'''
O bloco impar passa a RNN ao longo da dimensão do tempo, ou seja,
ao longo de R chunks de tamanho K
'''
def __init__(self, Parameter=128, hidden_size=128, **kwargs):
super(BaseRNNBlock, self).__init__()
self.lstm1 = LSTM(input_size=Parameter, hidden_size=hidden_size, batch_first=True, bidirectional=True, **kwargs)
self.lstm2 = LSTM(input_size=Parameter, hidden_size=hidden_size, batch_first=True, bidirectional=True, **kwargs)
#no paper P
self.P = nn.Linear(hidden_size*2 + Parameter, Parameter)
def forward(self, x):
outp1, _ = self.lstm1(x)
outp2, _ = self.lstm2(x)
outp = torch.cat((outp1 * outp2, x), dim=-1)
#O tensor volta a NxKxR
return self.P(outp)
class RNNBlock(nn.Module):
'''
Contem um bloco par e um bloco impar
'''
def __init__(self, K, R, hidden_size=128,**kwargs):
super(RNNBlock, self).__init__()
self.oddBlock = BaseRNNBlock(Parameter=K, hidden_size=hidden_size, **kwargs)
self.evenBlock = BaseRNNBlock(Parameter=R, hidden_size=hidden_size, **kwargs)
def forward(self, x):
outp = self.oddBlock(x)
#skip connection
outp += x
#Tensor NxRxK -> NxKxR
outp1 = torch.transpose(outp, 1,-1)
outp1 = self.evenBlock(outp1)
#Tensor NxKxR -> NxRxK
outp1 = torch.transpose(outp1, 1,-1)
#skip connection
outp1 += outp
return outp1
class FacebookModel(nn.Module):
'''
Modelo do facebook v2
'''
def __init__(self, n, k, r, c=2, l=8, b=1, **kwargs):
super(FacebookModel, self).__init__()
self.c = c
self.encoder = nn.Conv1d(1, n, l, int(l/2))
self.rnnblocks = [RNNBlock(k, r, **kwargs) for _ in range(b)]
#register the layers into the class
for i, block in enumerate(self.rnnblocks):
nn.Module.add_module(self, 'rnn_block_%s' % i, block)
self.d = nn.Conv1d(r, c*r, kernel_size=1)
self.activation = torch.nn.PReLU(num_parameters=1, init=0.25)
self.decoder = nn.ConvTranspose1d(n, 1, kernel_size=l, stride=int(l/2))
self.device= 'cpu'
#teste de decode com uma convolucao 2d
self.decoder2d = nn.ConvTranspose2d(n, 1, kernel_size=(1, l), stride=(1, int(l/2)))
def forward(self, x):
encoded = self.encoder(x).squeeze(0)
chunks = self.chunk(encoded)
outps = list()
for block in self.rnnblocks:
chunks = block(chunks.clone())
res = self.d(self.activation(chunks))
outps.append(res)
outps = self.apply_overlap_and_add(outps)
return self.decode2d(outps)
def chunk(self, x):
x = torch.cat((x, torch.zeros((64, 110)).to(self.device)), dim=-1)
x = torch.cat((torch.zeros((64, 89)).to(self.device), x), dim=-1)
return x.unfold(-1, 178, 89)
def decode2d(self, x):
'''
Testar o decode com uma convolucao de 2 dimensoes, audios das c fontes
juntos no mesmo tensor
'''
restored = []
for a in x:
a = a[...,89:-110].unsqueeze(0)
d = self.decoder2d(a).squeeze(1)
restored.append(d)
return restored
def decode(self, x):
'''
Decode de separacao com convolucao 1d, os audios das c fontes diferentes sao separados
previamente
'''
restored = [[] for _ in range(self.c)]
for a in x:
for i in range(self.c):
t = a[:,i,89:-110].unsqueeze(0)
t = self.decoder(t)
restored[i].append(t.squeeze(0))
return restored
def apply_overlap_and_add(self, x):
overlapped_added = list()
for el in x:
result = self.overlap_and_add(el)
overlapped_added.append(result)
return overlapped_added
def overlap_and_add(self, x):
'''
Faz overlap and add usando pytorch fold
'''
x = torch.transpose(x, -2, -1)
result = torch.nn.functional.fold(x, (self.c, 16198) ,kernel_size=(1,178), stride=(1,89))
return result.squeeze(1)
def to(self, device):
model = super(FacebookModel,self).to(device)
model.device = device
return model
|
import random
# 랜덤으로 정답 생성
answer = random.sample(range(1,10),3)
print("정답은=", answer)
# 초기화
cnt = 0
guess = []
strikecnt = 0
# 게임진행
while strikecnt <3:
strikecnt = 0
ballcnt = 0
guess = []
for i in range(3):
num = int(input("{}, 1~9까지 숫자를 입력하세요:".format(i)))
guess.append(num)
print(guess)
for i in range(3):
if guess[i] == answer[i]:
strikecnt = strikecnt + 1
elif guess[i] in answer:
ballcnt +=1
print("strike = {}, ball = {}".format(strikecnt, ballcnt))
cnt +=1
print("{}번 만에 정답".format(cnt))
|
"""
CI tests for VMware API utility module
"""
from oslo.vmware import api
from oslo.vmware import vim_util
import unittest
import json
class VimUtilCITest(unittest.TestCase):
"""Test class for utility methods in vim_util.py"""
def setUp(self):
"""Run before each test method to initialize test environment"""
with open("vim_util_test.json") as template:
self.spec = json.load(template)
env = self.spec.get("environment")
self.session = api.VMwareAPISession(host=env.get("host"),
port=env.get("port"),
server_username=env.get("server_username"),
server_password=env.get("server_password"),
api_retry_count=env.get("api_retry_count"),
task_poll_interval=env.get("task_poll_interval"),
scheme=env.get("scheme"))
self.vim = self.session.vim
def test_get_objects_without_properties(self):
"""Get the host info from rootFolder"""
test_spec = self.spec.get("test_get_objects")
expected_type = test_spec.get("_type")
expected_host_ips = test_spec.get("host_ips")
object_content = self.session.invoke_api(vim_util,
'get_objects',
self.vim,
'HostSystem',
100)
host_ip_list = []
if object_content:
self.assertIsNotNone(object_content.objects)
for one_object in object_content.objects:
self.assertEqual(one_object.obj._type, expected_type)
if hasattr(one_object, 'propSet'):
dynamic_properties = one_object.propSet
for prop in dynamic_properties:
host_ip_list.append(prop.val)
for each_ip in expected_host_ips:
self.assertTrue(each_ip in host_ip_list)
def test_get_objects_with_properties(self):
"""Get the aggregate resource stats of a cluster"""
expected_result = self.spec.get("test_get_objects_with_properties")
expected_type = expected_result.get("_type")
expected_datastore_list = []
for each_datastore in expected_result.get("datastore_infos"):
datastore_name = each_datastore["name"]
expected_datastore_list.append(datastore_name)
datastore_list = []
object_content = self.session.invoke_api(vim_util,
'get_objects',
self.vim,
'Datastore',
100,
['name'])
for one_object in object_content.objects:
self.assertEqual(one_object.obj._type, expected_type)
if hasattr(one_object, 'propSet'):
dynamic_properties = one_object.propSet
prop_dict = {}
for prop in dynamic_properties:
if prop.name == "name":
datastore_list.append(prop.val)
for each_ds_name in datastore_list:
self.assertTrue(each_ds_name in datastore_list)
def test_get_object_properties(self):
"""Get host properties with properties specified """
test_spec = self.spec.get("test_get_object_properties")
host_moref = vim_util.get_moref(test_spec.get("host_id"), 'HostSystem')
objects = self.session.invoke_api( vim_util,
'get_object_properties',
self.vim,
host_moref,
["summary.hardware.numCpuCores", "summary.hardware.numCpuThreads"])
self.assertIsNotNone(objects)
expected_numCpuCores = test_spec.get("numCpuCores")
expected_numCpuThreads = test_spec.get("numCpuThreads")
numCpuCores = 0
numCpuThreads = 0
if hasattr(objects[0], 'propSet'):
dynamic_properties = objects[0].propSet
for prop in dynamic_properties:
if prop.name == "summary.hardware.numCpuCores":
numCpuCores = prop.val
else:
numCpuThreads = prop.val
self.assertEqual(expected_numCpuCores, numCpuCores)
self.assertEqual(expected_numCpuThreads, numCpuThreads)
def test_cancel_retrievcal(self):
object_content = self.session.invoke_api(vim_util, 'get_objects', self.vim, 'VirtualMachine', 1)
token = vim_util._get_token(object_content)
self.assertIsNotNone(token)
vim_util.cancel_retrieval(self.vim, object_content)
def test_continue_retrieval(self):
object_content = self.session.invoke_api(vim_util, 'get_objects', self.vim, 'VirtualMachine', 1)
token = vim_util._get_token(object_content)
self.assertIsNotNone(token)
result = vim_util.continue_retrieval(self.vim, object_content)
self.assertIsNotNone(result.objects)
def test_register_extension(self):
"""test register extension"""
test_spec = self.spec.get("test_register_extension")
key = test_spec.get("key")
extension_manager = self.vim.service_content.extensionManager
self.vim.client.service.UnregisterExtension(extension_manager, key)
extention_object = vim_util.find_extension(self.vim, key)
self.assertIsNone(extention_object)
vim_util.register_extension(self.vim, key, None)
extention_object = vim_util.find_extension(self.vim, key)
self.assertIsNotNone(extention_object)
def test_get_vc_version(self):
test_spec = self.spec.get("test_get_vc_version")
vc_version = vim_util.get_vc_version(self.session)
expected_version = test_spec.get("vc_version")
self.assertEqual(vc_version, expected_version)
if __name__ == "__main__":
unittest.main()
|
### Script to be executed only in Flask shell to use proper App's ENV variables
from app import app, db, s3
from app.models import Paper, Tag, File
from werkzeug.utils import secure_filename
import re
import pandas as pd
REPO_FOLDER = '/Users/lakshmanan/Downloads/SEGarage final data'
excel_file = '/Users/lakshmanan/Downloads/april23_clean.xlsx'
def add_temp(paper, tags, filenames, db, s3):
if tags is not None:
delimiters = ",", ";"
regexPattern = '|'.join(map(re.escape, delimiters))
## Stripping whitespaces and making unique list
tags = [s.lower().strip() for s in re.split(regexPattern, tags)]
tags = [s for s in tags if (s is not '' and s is not None)]
tags = list(set(tags))
print(tags)
for tag in tags:
tag_obj = db.session.query(Tag).filter(Tag.tagname==tag).first()
if tag_obj is None:
tag_obj = Tag(tagname=tag)
paper.tags.append(tag_obj)
db.session.add(paper)
db.session.flush()
# if filenames is not None:
# new_filenames = []
# fileurls = []
# for filename in filenames.split(","):
# filename = secure_filename(filename)
# new_filenames.append(filename)
# new_filename = '{}/{}'.format(paper.id, filename) # updating the name with paper.id for easy access
# s3_url = "{0}/{1}/{2}".format(app.config['S3_ENDPOINT'], app.config['S3_BUCKET'], new_filename)
# s3.upload_file('{}/{}'.format(REPO_FOLDER, filename), 'segarage_test', new_filename)
# fileurls.append(s3_url)
# for filename, fileurl in zip(new_filenames, fileurls):
# paper.files.append(File(filename=filename, filetype='Other', fileurl=fileurl))
db.session.flush()
db.session.commit()
df = pd.read_excel(excel_file, sheet_name='Sheet1') ## Reading the excel file
dfs = df.where((pd.notnull(df)), None) ## Changing nan to None in the dataframe
for i in range(len(dfs)):
# print(dfs['filenames'][i])
paper = Paper(paper_name=dfs['title'][i], author_name=dfs['NAME OF AUTHOR'][i], author_email=dfs['email-primary'][i], tool_name=dfs['File name for tool'][i].split('-')[0], link_to_pdf=dfs['acm'][i], link_to_archive='Not provided', link_to_tool_webpage=dfs['available(link)'][i], link_to_demo=dfs['video'][i], bibtex=dfs['bibtext'][i], description=dfs['intro'][i], view_count=0, conference=dfs['conference'][i], year=dfs['year'][i])
taglist_one = dfs['categories-key words'][i] or ''
taglist_two = dfs['categories-descirption '][i] or ''
tags = '{}, {}'.format(taglist_one, taglist_two)
filenames = dfs['File name for tool'][i]
add_temp(paper, tags, filenames, db, s3)
Paper.reindex();
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class pos_category(models.Model):
_inherit = 'pos.category'
background_color = fields.Char('Background Color', default='#FFFFFF',) |
#!/usr/bin/env python3
# Fibonacci Series:
# Implementation (c) 2016,2017 Brig Young (github.com/Sonophoto)
# License: BSD-2c, i.e. Cite.
#
# Reference for this Series: https://oeis.org/search?q=fibonacci
# OEIS: Online Encyclopedia of Integer Sequences
# F(n) = (F(n-1) + F(n-2) with F(0) == 0 and F(1) == 1
# F(0) = 0, F(1) == 1, F(2) == 1, F(3) == 2, F(4) == 3, ...
# Thanks to http://gozips.uakron.edu/~crm23/fibonacci/fibonacci.htm
# for the closed form of the Fibonacci sequence used in the generator.
import unittest
import fibonacci as fm
class test_Fibonacci(unittest.TestCase):
"""Test all Fibonacci functions against a list of known Fibonacci numbers
Values indexed 0 - 38 are from OEIS https://oeis.org/search?q=fibonacci
39 - 50 manually calculated and verified by Brig Young
"""
century_fib = 354224848179261915075
"""A curated value of fibonacci(100) ref: M.L. Hetland pp. 177.
Note that Magnus is skipping the beginning zero so his indices are
actually (N-1). Also see:
http://www.maths.surrey.ac.uk/hosted-sites/R.Knott/Fibonacci/fibtable.html
"""
fibs = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377,\
610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657,\
46368, 75025, 121393, 196418, 317811, 514229, 832040,\
1346269, 2178309, 3524578, 5702887, 9227465, 14930352,\
24157817, 39088169, 63245986, 102334155, 165580141,\
267914296, 433494437, 701408733, 1134903170, 1836311903,\
2971215073, 4807526976, 7778742049, 12586269025]
"""A curated list of Fibonacci numbers with correctly matched indexes
"""
def test_Sequencer(self):
"""Create a list of fibonacci numbers via sequencing over count of known
good values and compare to list of known good values.
"""
sequenced_list = []
[sequenced_list.append(fm.fibonacciSequencer(x)) for x in range(len(self.fibs))]
self.assertEqual(sequenced_list, self.fibs,\
"fibonacciSequencer() has calculated incorrect values")
def test_Generator(self):
"""Create a list of fibonacci numbers via a python generator indexed by
count of known good values and compare to list of known good values.
"""
generated_list = []
fibgen = fm.fibonacciGenerator()
[generated_list.append(next(fibgen)) for x in range(len(self.fibs))]
self.assertEqual(generated_list, self.fibs,\
"fibonacciGenerator() has caculated incorrect values")
def test_ClosedForm(self):
"""Create a list of fibonacci numbers via closed form equation indexed by
count of known good values and compare to list of known good values.
"""
closedform_list = []
[closedform_list.append(fm.fibonacciClosedForm(x)) for x in range(len(self.fibs))]
self.assertEqual(closedform_list, self.fibs,\
"fibonacciClosedForm() has calculated incorrect values")
def test_NaiveRecursion(self):
"""Create a list of fibonacci numbers via naive recursion indexed by
count of known good values and compare to list of known good values.
"""
fibs_short_list = self.fibs[:20]
recursion_list = []
[recursion_list.append(fm.fibonacciNaiveRecursion(x)) for x in range(len(fibs_short_list))]
self.assertEqual(recursion_list, fibs_short_list,\
"fibonacciRecursion() has calculated incorrect values")
def test_MemoRecursion(self):
"""Create a list of fibonacci numbers via recursion with a memo-ized
cache of previously calculated values. The returned list of values
are then compared to a list of known good values.
"""
fibs_short_list = self.fibs[:20]
memo_recursion_list = []
[memo_recursion_list.append(fm.fibonacciMemoRecursion(x)) for x in range(len(fibs_short_list))]
self.assertEqual(memo_recursion_list, fibs_short_list,\
"fibonacciMemoRecursion() has calculated incorrect values")
def test_DeepRecursion(self):
"""Evaluate Memo Recursion at N=100 and compared to known good result
"""
self.assertEqual(fm.fibonacciMemoRecursion(100), self.century_fib,\
"fibonacciMemoRecursion() has caluculated incorrect deep result")
unittest.main(verbosity=2)
|
# Declarando uma variável
our_text = 'Python is the best language'
# Apresentando o valor na tela do tamanho do texto na variável
print(len(our_text))
# Apresentado na Tela partes do texto da variável
print(our_text[0])
print(our_text[3])
print(our_text[-1])
print(our_text[0:3])
print(our_text[0:])
print(our_text[:3])
print(our_text[:])
|
import numpy as np
def generate_ma_part(length = 100):
return np.random.normal(size = (length,1))
class TimeSeries():
def __init__(self, a_coef:np.array, b_coef:np.array, ma_part = None, length = 100):
self.ar_coef = a_coef
self.ma_coef = b_coef
self.length = length
self.ma_part = generate_ma_part(length) if ma_part is None else ma_part.T
self.ar = len(a_coef)-1
self.ma = len(b_coef)-1
def time_series_generate(self):
self.time_series = np.zeros((self.length,1))
self.time_series[0:self.ar] = self.ar_coef[0]+self.ma_coef[0]*self.ma_part[0:self.ar].copy()
for i in range(self.ar, self.length):
self.time_series[i] = self.ar_coef[0]+(
sum([self.ar_coef[j]*self.time_series[i-j] for j in range(1, self.ar+1)])+
self.ma_coef[0]*self.ma_part[i]+
sum([self.ma_coef[j]*self.ma_part[i-j] for j in range(1, self.ma+1)]))
def save_time_series(self, file):
np.savetxt(file, self.time_series, fmt = "%.5f")
def save_ma_part(self, file):
np.savetxt(file, self.ma_part, fmt = "%.5f") |
"""
@file results.py
handler for saving and loading results.
"""
import os
import json
import numpy as np
class Results():
"""
class for easily saving and loading already calculated clustering results<br>
<br>
every dataset has a folder containing subfolders for every clustering algorithm
containing more subfolders for every distance measure. Clustering results are saved as
json files in their respective folders.
"""
def __init__(self, parentpath):
"""
constructor. needs the filepath to the parent directory where the json files are
suposed to be saved
@param parentpath filepath to the parent directory
"""
self.parent = parentpath
def get_path(self, dataset, algorithm, metric, **kwargs):
"""
builds and returns the filepath to the json file fitting the given parameters
@param dataset string with the name of the dataset ("iris", "wine", "diabetes", "DBSCAN")
@param algorithm string with the name of the algorithm ("kmeans", "kmedians", "kmedoids", "DBSCAN")
@param metric string with the name of the distance measure ("euclidean", "cosine", "chebyshev", "manhattan")
@param **kwargs algorithm specific parameters. Needs to be either "k" or "minpts" and "eps"
@returns filepath to the correct json file
"""
path = f"{self.parent}/{dataset}/{algorithm}/{metric}/"
if "k" in kwargs.keys():
path += f'k_{kwargs["k"]}'
else:
path += f'minpts_{kwargs["minpts"]}_eps_{str(kwargs["eps"])}'
path += ".json"
return path
def set_exists(self, dataset, algorithm, metric, **kwargs):
"""
checks if a file for a result defined by the parameters exists
@param dataset string with the name of the dataset ("iris", "wine", "diabetes", "DBSCAN")
@param algorithm string with the name of the algorithm ("kmeans", "kmedians", "kmedoids", "DBSCAN")
@param metric string with the name of the distance measure ("euclidean", "cosine", "chebyshev", "manhattan")
@param **kwargs algorithm specific parameters. Needs to be either "k" or "minpts" and "eps"
@returns True if file exists, False if not
"""
path = self.get_path(dataset, algorithm, metric, **kwargs)
return os.path.exists(path)
def load_set(self, dataset, algorithm, metric, **kwargs):
"""
loads results fitting the given parameters from a json file
@param dataset string with the name of the dataset ("iris", "wine", "diabetes", "DBSCAN")
@param algorithm string with the name of the algorithm ("kmeans", "kmedians", "kmedoids", "DBSCAN")
@param metric string with the name of the distance measure ("euclidean", "cosine", "chebyshev", "manhattan")
@param **kwargs algorithm specific parameters. Needs to be either "k" or "minpts" and "eps"
@returns loaded clustering results (clusters and centers)
"""
path = self.get_path(dataset, algorithm, metric, **kwargs)
with open(path, 'r') as f:
data = json.load(f)
return data["clusters"], data["centers"]
def save_set(self, dataset, algorithm, metric, clusters, centers, **kwargs):
"""
saves cluster results in a json file
@param dataset string with the name of the dataset ("iris", "wine", "diabetes", "DBSCAN")
@param algorithm string with the name of the algorithm ("kmeans", "kmedians", "kmedoids", "DBSCAN")
@param metric string with the name of the distance measure ("euclidean", "cosine", "chebyshev", "manhattan")
@param **kwargs algorithm specific parameters. Needs to be either "k" or "minpts" and "eps"
"""
path = self.get_path(dataset, algorithm, metric, **kwargs)
centers = centers.tolist() if isinstance(centers, np.ndarray) else centers
with open(path, 'w') as f:
json.dump({"clusters" : clusters, "centers" : centers}, f, ensure_ascii=False)
|
from django.contrib import admin
from .models import Book,Writer
# Register your models here.
admin.site.register(Book)
admin.site.register(Writer)
|
#!/usr/bin/env python
# calculate the DOS up to a particular band index, default is the last band calculated in vasp
from pv_anal_utils import vasp_anal_read_eigen
from argparse import ArgumentParser
import sys,math
import numpy as np
import subprocess as sp
def gaussian(x_array,bande,sigma):
a = 1.0E0/math.sqrt(2.0E0*math.pi)/sigma
exp = np.exp(-np.power((x_array-bande)/sigma,2)/2.0E0)
return a * exp
def Main(ArgList):
# =================== Parser ==========================
description = '''
Calculate the DOS up to a particular band index. Default is the last band calculated in vasp.
'''
parser = ArgumentParser(description=description)
parser.add_argument("-D",dest='debug',help="flag for debug mode",action='store_true')
parser.add_argument("-p",dest='prev',help="flag for preview the DOS",action='store_true')
parser.add_argument("-g",dest='vbm_zero',help="flag to set the VBM as energy zero. Default the Fermi energy",action='store_true')
parser.add_argument("-n",dest='bandf',help="the last band till which you want to calculate the DOS",type=int,default=10000)
parser.add_argument("-s",dest='sigma',help="the Gaussian smearing for DOS (in eV)",type=float,default=0.05)
opts = parser.parse_args()
# =================== Parameters ======================
# number of DOS grid points
ngrid = 3000
# Gaussian smearing parameter
sigma = opts.sigma
# Fermi energy extracted from OUTCAR
efermi = float(sp.check_output("grep E-fermi OUTCAR | tail -1",shell=True).split()[2])
# =====================================================
band_struct = vasp_anal_read_eigen(debug=opts.debug)
if opts.debug:
print band_struct[0],len(band_struct)
nelec = band_struct[0][0]
nkp = band_struct[0][1]
bandmax = band_struct[0][2]
if opts.bandf == 10000 or opts.bandf > bandmax:
nbands = bandmax
else:
nbands = opts.bandf
if opts.debug:
print nbands
VBM = max([band_struct[ikp+1][nelec/2] for ikp in xrange(nkp)])
dos_min = min([band_struct[ikp+1][1] for ikp in xrange(nkp)])
dos_max = max([band_struct[ikp+1][nbands] for ikp in xrange(nkp)])
dos_min = dos_min - 5*sigma
dos_max = dos_max + 5*sigma
grid_array = np.linspace(dos_min,dos_max,ngrid)
dos_array = np.zeros(ngrid)
for ikp in xrange(1,nkp+1):
for bande in band_struct[ikp][1:nbands+1]:
dos_array = dos_array + gaussian(grid_array,bande,sigma)
if opts.vbm_zero:
grid_array = grid_array - VBM
else:
grid_array = grid_array - efermi
with open('dos.dat','w') as o:
for i in xrange(ngrid):
o.write("%8.4f %15.6f\n" % (grid_array[i],dos_array[i]))
if opts.prev:
import pylab
pylab.xlabel("Energy (eV)")
pylab.ylabel("Density of States (arb. unit)")
pylab.plot(grid_array,dos_array,color='black')
pylab.xlim([-4,4])
pylab.ylim([0,300])
pylab.show()
# =====================================================
if __name__ == "__main__":
Main(sys.argv)
|
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
import re
import urllib
from urllib.request import urlopen
import http.client
import string
from ast import literal_eval
# pip install BeautifulSoup4
class Scraper:
def __init__(self, link, keyword):
self.link = link
self.content = self.store(link)
self.keyword = keyword
def store(self, link):
try:
# bs = BeautifulSoup(urllib.build_opener(urllib.HTTPCookieProcessor).open(link),'html.parser')
bs = BeautifulSoup(urlopen(link) , "html.parser")
return bs
except urllib.error.HTTPError:
return None
except urllib.error.URLError:
return None
except ValueError:
return None
except:
return None
def scrape(self, tag):
tagList = []
if self.content is not None:
for node in self.content.findAll(tag):
paragraph = node.findAll(text=True)
for sentence in paragraph:
keywordList = self.keyword.split(" ")
for keyword in keywordList:
if keyword in sentence.lower() and len(sentence) > 100:
tagList.append(node.findAll(text=True))
break
return tagList
# todo to filter out those without http infront of the link
def scrapeLinks(self, absoluteLink):
sanitisedLinkList = []
if self.content is not None:
unsanitisedLinkList = self.content.find_all('a')
for a in unsanitisedLinkList:
item = a.get("href")
if item is not None:
if not "http" in item:
item = absoluteLink + item
sanitisedLinkList.append(item)
return sanitisedLinkList
return None
def scrapeBBCNewsArticle(self):
listOfKnownHeaders = ["ideas-page__header", "story-body__h1"]
filteredData = ["Email", "Facebook", "Messenger", "Twitter", "Pinterest", "Whatsapp", "LinkedIn"]
h1List = []
title = ""
contents = []
date = ""
dict = {}
h1List = self.scrape('h1')
for item in h1List:
title = item.encode_contents()
if listOfKnownHeaders in item:
# print(item.encode_contents())
title = item.encode_contents()
tempContent = self.scrape('p')
if self.content is not None:
for node in self.content.findAll('p'):
paragraph = node.findAll(text=True)
for sentence in paragraph:
if self.checkIfContainsKeywordPerParagraph(sentence):
contents.append(node.findAll(text=True))
tempDivContents = self.content.findAll('div')
for node in tempDivContents:
if node.get("data-datetime") is not None:
date = node.get("data-datetime")
break
dict["title"] = title
dict["content"] = contents
dict["date_created"] = date
return dict
return None
def checkIfContainsKeywordPerParagraph(self, sentence):
if self.keyword in sentence:
return True
return False
if __name__ == '__main__':
scraper = Scraper("http://www.businessinsider.com/13-burning-personal-finance-questions-2013-3/?IR=T", "")
dictionary = scraper.scrape('p')
list = []
# print(dictionary)
for item in dictionary:
for item2 in item:
item2 = item2.replace(" ", "").replace("\n", " ").replace("\r", " ")
list.append(item2)
for item in list:
print(item)
|
import os
import re
import sys
import textwrap
import time
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from . import resource_suite
from .. import lib
from .. import paths
from ..core_file import temporary_core_file
from ..configuration import IrodsConfig
class Test_Quotas(resource_suite.ResourceBase, unittest.TestCase):
plugin_name = IrodsConfig().default_rule_engine_plugin
class_name = 'Test_Quotas'
def setUp(self):
super(Test_Quotas, self).setUp()
def tearDown(self):
super(Test_Quotas, self).tearDown()
def test_iquota__3044(self):
pep_map = {
'irods_rule_engine_plugin-irods_rule_language': textwrap.dedent('''
acRescQuotaPolicy {
msiSetRescQuotaPolicy("on");
}
'''),
'irods_rule_engine_plugin-python': textwrap.dedent('''
def acRescQuotaPolicy(rule_args, callback, rei):
callback.msiSetRescQuotaPolicy('on')
''')
}
filename_1 = 'test_iquota__3044_1'
filename_2 = 'test_iquota__3044_2'
with temporary_core_file() as core:
core.add_rule(pep_map[self.plugin_name])
for quotatype in [['sgq', 'public']]: # group
for quotaresc in [self.testresc, 'total']: # resc and total
cmd = 'iadmin {0} {1} {2} 10000000'.format(quotatype[0], quotatype[1], quotaresc) # set high quota
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'Nearing quota') # not over yet
lib.make_file(filename_1, 1024, contents='arbitrary')
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_1) # should succeed
self.admin.assert_icommand(cmd.split())
cmd = 'iadmin cu' # calculate, update db
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'Nearing quota') # not over yet
cmd = 'iadmin {0} {1} {2} 40'.format(quotatype[0], quotatype[1], quotaresc) # set low quota
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'OVER QUOTA') # confirm it's over
lib.make_file(filename_2, 1024, contents='arbitrary')
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_2) # should fail
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'SYS_RESC_QUOTA_EXCEEDED')
cmd = 'istream write nopes'
self.admin.assert_icommand(cmd.split(), 'STDERR', 'Error: Cannot open data object.', input='some data')
cmd = 'iadmin {0} {1} {2} 0'.format(quotatype[0], quotatype[1], quotaresc) # remove quota
self.admin.assert_icommand(cmd.split())
cmd = 'iadmin cu' # update db
self.admin.assert_icommand(cmd.split())
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_2) # should succeed again
self.admin.assert_icommand(cmd.split())
cmd = 'irm -rf {0}'.format(filename_1) # clean up
self.admin.assert_icommand(cmd.split())
cmd = 'irm -rf {0}'.format(filename_2) # clean up
self.admin.assert_icommand(cmd.split())
def test_iquota_empty__3048(self):
cmd = 'iadmin suq' # no arguments
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'ERROR: missing username parameter') # usage information
cmd = 'iadmin sgq' # no arguments
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'ERROR: missing group name parameter') # usage information
def test_filter_out_groups_when_selecting_user__issue_3507(self):
self.admin.assert_icommand(['igroupadmin', 'mkgroup', 'test_group_3507'])
try:
# Attempt to set user quota passing in the name of a group; should fail
self.admin.assert_icommand(['iadmin', 'suq', 'test_group_3507', 'demoResc', '0'], 'STDERR_SINGLELINE', 'CAT_INVALID_USER')
finally:
self.admin.assert_icommand(['iadmin', 'rmgroup', 'test_group_3507'])
|
from django.db import models
from users import models as user_model
from timezone_field import TimeZoneField
import pytz
from django.utils import timezone
class Clients(models.Model):
client_name = models.CharField(
max_length=50, blank=True, null=True)
client_email_id = models.CharField(
max_length=50, blank=False, null=False, unique=False)
client_contact_mobile_number = models.CharField(
max_length=50, blank=True, null=True)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
last_updated_on = models.DateTimeField(auto_now=True, blank=False, null=False)
class Meta:
db_table = 'clients'
def __str__(self):
return "{}".format(self.client_name)
class ClientMeta(models.Model):
client = models.ForeignKey(
Clients, null=False, blank=False, on_delete=models.CASCADE)
meta_key = models.CharField(
max_length=255, blank=False, null=False)
meta_value = models.CharField(
max_length=255, blank=False, null = False)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
last_updated_on = models.DateTimeField(auto_now=True, blank=False, null=False)
class Meta:
unique_together = ('client', 'meta_key')
db_table = 'client_meta'
class ClientUserMapping(models.Model):
client = models.ForeignKey(
Clients, null=False, blank=False, on_delete=models.CASCADE)
user = models.ForeignKey(
user_model.CustomUser, on_delete=models.CASCADE, null=False, blank=False)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
last_updated_on = models.DateTimeField(auto_now=True, blank=False, null=False)
# created_by = models.ForeignKey(
# user_model.CustomUser, related_name='client_user_map_created_by', null=False, blank=False, on_delete=models.CASCADE)
# last_updated_by = models.ForeignKey(
# user_model.CustomUser, related_name='client_user_map_last_updated_by', null=False, blank=False, on_delete=models.CASCADE)
class Meta:
db_table = 'client_user_mapping'
unique_together = ('client', 'user')
def __str__(self):
return "{} - {}".format(self.client, self.user)
class Services(models.Model):
service_name = models.CharField(
max_length=255, blank=False, null=False, unique=True)
location = models.CharField(
max_length=255, blank=True, null=True)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
last_updated_on = models.DateTimeField(auto_now=True, blank=False, null=False)
created_by = models.ForeignKey(
user_model.CustomUser, related_name='service_created_by', null=False, blank=False,
on_delete=models.CASCADE)
last_updated_by = models.ForeignKey(
user_model.CustomUser, related_name='service_last_updated_by', null=False, blank=False,
on_delete=models.CASCADE)
class Meta:
db_table = 'services'
def __str__(self):
return "{}".format(self.service_name)
class ServicesMeta(models.Model):
service = models.ForeignKey(
Services, null=False, blank=False, on_delete=models.CASCADE)
meta_key = models.CharField(
max_length=255, blank=False, null=False)
meta_value = models.CharField(
max_length=255, blank=False, null = False)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
last_updated_on = models.DateTimeField(auto_now=True, blank=False, null=False)
created_by = models.ForeignKey(
user_model.CustomUser, related_name='service_meta_created_by', null=False, blank=False, on_delete=models.CASCADE)
last_updated_by = models.ForeignKey(
user_model.CustomUser, related_name='service_meta_last_updated_by', null=False, blank=False,
on_delete=models.CASCADE)
class Meta:
unique_together = ('service', 'meta_key')
db_table = 'service_meta'
class UserServiceMapping(models.Model):
service = models.ForeignKey(
Services, null=False, blank=False, on_delete=models.CASCADE)
user = models.ForeignKey(
user_model.CustomUser, on_delete=models.CASCADE, null=False, blank=False)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
last_updated_on = models.DateTimeField(auto_now=True, blank=False, null=False)
created_by = models.ForeignKey(
user_model.CustomUser, related_name='user_service_map_created_by', null=False, blank=False, on_delete=models.CASCADE)
last_updated_by = models.ForeignKey(
user_model.CustomUser, related_name='user_service_map_last_updated_by', null=False, blank=False, on_delete=models.CASCADE)
class Meta:
db_table = 'user_service_mapping'
unique_together = ('service', 'user')
def __str__(self):
return "{} - {}".format(self.user_username, self.service_name)
class Appointments(models.Model):
date_created = models.DateTimeField(
auto_now_add=True, blank=False, null=False)
user = models.ForeignKey(
user_model.CustomUser, on_delete=models.CASCADE, null=False, blank=False)
client = models.ForeignKey(
Clients, null=False, blank=False, on_delete=models.CASCADE)
start_time = models.DateTimeField(blank=False, null=False)
end_time = models.DateTimeField(blank=False, null=False)
cancelled = models.BooleanField(default=False)
cancellation_reason = models.CharField(
max_length=255, blank=True, null=True)
service = models.ForeignKey(
Services, null=False, blank=False, on_delete=models.CASCADE)
timezone_field = TimeZoneField(default='UTC', choices=[(tz, tz) for tz in pytz.all_timezones])
notes = models.TextField(
max_length=255, blank=True, null=True)
lead_status = models.CharField(
max_length=50, blank=False, null=False, default='Pending', choices = [
('Pending', 'Pending'),
('Closed', 'Closed'),
('Not Interested', 'Not Interested')
])
event_identifier = models.CharField(
max_length=255, blank=True, null=True, unique=False)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
last_updated_on = models.DateTimeField(auto_now=True, blank=False, null=False)
class Meta:
db_table = 'appointments'
ordering = ['start_time']
def __str__(self):
return "{} - {}".format(self.start_time, self.end_time) |
'''
implements Caesar substitution cipher
Author: James Lyons
Created: 2012-04-28
'''
from .base import Cipher
class Caesar(Cipher):
def __init__(self,key=13):
''' key is an integer 0-25 used to encipher characters '''
self.key = key % 26
def encipher(self,string,keep_punct=False):
if not keep_punct: string = self.remove_punctuation(string)
ret = ''
for c in string:
if c.isalpha(): ret += self.i2a( self.a2i(c) + self.key )
else: ret += c
return ret
def decipher(self,string,keep_punct=False):
if not keep_punct: string = self.remove_punctuation(string)
ret = ''
for c in string:
if c.isalpha(): ret += self.i2a( self.a2i(c) - self.key )
else: ret += c
return ret
if __name__ == '__main__':
print 'use "import pycipher" to access functions'
|
import sys
from learning_to_learn import train
from learning_to_learn.cdqn import create_agent_cdqn
ENV_NAME = 'Adam-Polynomial-Continuous-v0'
def main(argv):
train.main(argv,
"output/polynomial-adam-cdqn/cdqn.h5",
ENV_NAME,
create_agent_cdqn)
if __name__ == '__main__':
main(sys.argv[1:])
|
n = int(input("Podaj ilość liczb: "))
liczby = []
for i in range(1 , n+1, 1):
print("[",i,"]:" ,end='')
liczby.append(input())
for i in range(0 , n, 1):
l = int(liczby[i]) ** 2
print(str(liczby[i]),"^2 =",str(l)) |
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from copy import deepcopy
import mock
import pytest
from .common import FIXTURES_PATH
from .utils import mocked_perform_request
def test_flatten_json(check):
check = check({})
with open(os.path.join(FIXTURES_PATH, 'nginx_plus_in.json')) as f:
parsed = check.parse_json(f.read())
parsed.sort()
with open(os.path.join(FIXTURES_PATH, 'nginx_plus_out.python')) as f:
expected = eval(f.read())
# Check that the parsed test data is the same as the expected output
assert parsed == expected
def test_flatten_json_timestamp(check):
check = check({})
assert (
check.parse_json(
"""
{"timestamp": "2018-10-23T12:12:23.123212Z"}
"""
)
== [('nginx.timestamp', 1540296743, [], 'gauge')]
)
def test_plus_api(check, instance, aggregator):
instance = deepcopy(instance)
instance['use_plus_api'] = True
check = check(instance)
check._perform_request = mock.MagicMock(side_effect=mocked_perform_request)
check.check(instance)
total = 0
for m in aggregator.metric_names:
total += len(aggregator.metrics(m))
assert total == 1180
def test_nest_payload(check):
check = check({})
keys = ["foo", "bar"]
payload = {"key1": "val1", "key2": "val2"}
result = check._nest_payload(keys, payload)
expected = {"foo": {"bar": payload}}
assert result == expected
@pytest.mark.parametrize(
'test_case, extra_config, expected_http_kwargs',
[
(
"legacy auth config",
{'user': 'legacy_foo', 'password': 'legacy_bar'},
{'auth': ('legacy_foo', 'legacy_bar')},
),
("new auth config", {'username': 'new_foo', 'password': 'new_bar'}, {'auth': ('new_foo', 'new_bar')}),
("legacy ssl config True", {'ssl_validation': True}, {'verify': True}),
("legacy ssl config False", {'ssl_validation': False}, {'verify': False}),
],
)
def test_config(check, instance, test_case, extra_config, expected_http_kwargs):
instance = deepcopy(instance)
instance.update(extra_config)
c = check(instance)
with mock.patch('datadog_checks.base.utils.http.requests') as r:
r.get.return_value = mock.MagicMock(status_code=200, content='{}')
c.check(instance)
http_wargs = dict(
auth=mock.ANY, cert=mock.ANY, headers=mock.ANY, proxies=mock.ANY, timeout=mock.ANY, verify=mock.ANY
)
http_wargs.update(expected_http_kwargs)
r.get.assert_called_with('http://localhost:8080/nginx_status', **http_wargs)
|
"""
demo15_vc.py 视频捕获
"""
import cv2 as cv
# 获取视频捕获设备
video_capture = cv.VideoCapture(0)
while True:
frame = video_capture.read()[1]
cv.imshow('frame', frame)
# 每隔33毫秒自动更新图像
if cv.waitKey(33) == 27:
break
video_capture.release()
cv.destroyAllWindows()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.