blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1fc94bb3c75f361277aaab9e3bdbef6a2fb527d9 | b34c4cb5d20e9a54b86fc90360e3ec44ea2ee4e7 | /Parser/DataTypes/ClassBaseNetwork.py | b8e177f147ae9515441e8832da423c8c4e5c983e | [
"MIT"
] | permissive | Errare-humanum-est/HeteroGen | 3bceb756d9edfd3fa771848ace2c16993142ec71 | 600a7bde441cc1365a465746e15564bd8de8fc37 | refs/heads/main | 2023-04-13T17:13:38.489786 | 2022-01-07T00:45:34 | 2022-01-07T00:45:34 | 426,768,831 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 12,549 | py | # Copyright (c) 2021. Nicolai Oswald
# Copyright (c) 2021. University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#
#
from typing import Dict, List, Union
from antlr3.tree import CommonTree
from collections import OrderedDict
from Parser.ProtoCCLexer import tokenNamesMap
from Debug.Monitor.ClassDebug import Debug
from Algorithms.General.AuxStateHandler import AuxStateHandler
from DataObjects.ClassMultiDict import MultiDict
from Parser.NetworkxParser.ClassProtoParserBase import ProtoParserBase
class NetworkType:
k_ordered = "Ordered"
k_unordered = "Unordered"
# Routing variables
k_src = "src"
k_dst = "dst"
## MsgType
#
# Generic message type format, it holds the information defined in the message object definition of pcc files
# Dependency: None
class MsgType:
def __init__(self, msg_object: CommonTree):
children = msg_object.getChildren()
self.msg_type: str = str(children[0])
self.msg_vars: OrderedDict[str, CommonTree] = OrderedDict()
self.data_flag: bool = False
self.register_var(children)
def __str__(self):
return self.msg_type
def register_var(self, payload_objects: List[CommonTree]):
for ind in range(1, len(payload_objects)):
if payload_objects[ind].getText() == ProtoParserBase.k_data:
self.data_flag = True
for var_name in payload_objects[ind].getChildren():
if str(var_name) not in tokenNamesMap.values():
self.msg_vars[str(var_name)] = payload_objects[ind]
break
def update_variable_names(self, new_sub_id: str) -> List[str]:
variable_keys = list(self.msg_vars.keys())
ret_variable_keys = []
for ind in range(0, len(variable_keys)):
variable = variable_keys[ind]
# Do not rename the cache line
if str(self.msg_vars[variable]) == ProtoParserBase.k_data:
# Make a temporary pointer on data object
tmp_var = self.msg_vars[variable]
# Clear old dict entry
self.msg_vars.pop(variable)
# Preserve position of data object in ordered dict
self.msg_vars[variable] = tmp_var
continue
ret_variable_keys.append(variable)
# Update dict entry
self.msg_vars[variable + new_sub_id] = AuxStateHandler.cond_rename_operation(self.msg_vars[variable],
variable,
variable + new_sub_id,
[])
# Clear old dict entry
self.msg_vars.pop(variable)
return ret_variable_keys
## Channel
#
# Virtual channel in the network
# Dependency: MsgType
class Channel(NetworkType):
def __init__(self, vc_name: str, vc_type: str):
self.vc_name: str = vc_name # virtual channel name
self.vc_type: str = vc_type # ordered / unordered
self.msg_types: List[MsgType] = [] # Record all message types that are handled by this VC
def __str__(self):
return self.vc_name
## BaseMessage
#
# Generic message format
# Dependency: MsgType, Channel
class BaseMessage:
def __init__(self,
name: str,
msg_type: MsgType = None,
vc: Channel = None):
self.id: str = name
self.msg_type: MsgType = msg_type
self.vc: Channel = vc
# List of message objects that are based on this base message
self.msg_obj_list: List = []
if self.vc and self.msg_type not in self.vc.msg_types:
self.vc.msg_types.append(self.msg_type) # Add message type to the virtual channel
def __str__(self):
return self.id
def __hash__(self):
return hash((self.id, str(self.msg_type), str(self.vc)))
def has_data(self) -> bool:
if self.msg_type:
return self.msg_type.data_flag
return False
def set_vc(self, vc: Channel):
self.vc = vc
if self.vc and self.msg_type not in self.vc.msg_types:
self.vc.msg_types.append(self.msg_type) # Add message type to the virtual channel
def register_message_object(self, msg_object: 'Message'):
self.msg_obj_list.append(msg_object)
def remove_message_object(self, msg_object: 'Message') -> bool:
if msg_object in self.msg_obj_list:
self.msg_obj_list.remove(msg_object)
return True
return False
def p_message(self):
return str(self.id) + ', ' + str(self.msg_type) + ', ' + str(self.vc)
## BaseNetwork
#
# Generic message format
# Dependency: MsgType, Channel
class BaseNetwork(NetworkType):
def __init__(self):
self.network_node: CommonTree = None
self.ordered_networks: Dict[str, Channel] = {}
self.unordered_networks: Dict[str, Channel] = {}
self.msg_types: Dict[str, MsgType] = {}
self.data_msg_types: Dict[str, MsgType] = {} # Data msg type names, should be included in the message
# and is subset of msgNode
# There must not be two messages with the same message identifiers in different virtual channels or different
# msg_types
#self.base_message_dict: Dict[str, BaseMessage] = {}
self.base_message_dict: MultiDict = MultiDict()
def gen_virtual_channels(self, network: CommonTree):
self.network_node = network
for channel_obj in network.getChildren():
channel_def = channel_obj.getChildren()
if str(channel_def[0]) == self.k_unordered and str(channel_def[1]) not in self.unordered_networks:
Debug.perror("Network has been declared to be ordered before",
str(channel_def[1]) not in self.ordered_networks)
self.unordered_networks[str(channel_def[1])] = Channel(str(channel_def[1]), self.k_unordered)
if str(channel_def[0]) == self.k_ordered and str(channel_def[1]) not in self.ordered_networks:
Debug.perror("Network has been declared to be unordered before",
str(channel_def[1]) not in self.unordered_networks)
self.ordered_networks[str(channel_def[1])] = Channel(str(channel_def[1]), self.k_ordered)
def gen_msg_type(self, msg_object: CommonTree):
msg_type = MsgType(msg_object)
self.msg_types[str(msg_type)] = msg_type
if msg_type.data_flag:
self.data_msg_types[str(msg_type)] = msg_type
def get_virtual_channel(self, channel_name: str) -> Union[Channel, None]:
if channel_name in self.ordered_networks:
return self.ordered_networks[channel_name]
elif channel_name in self.unordered_networks:
return self.unordered_networks[channel_name]
return None
def add_new_base_message(self, new_base_message: BaseMessage) -> BaseMessage:
if str(new_base_message) in self.base_message_dict:
exist_msgs = self.base_message_dict[str(new_base_message)]
# Check if exactly the same message already exists
for exist_msg in exist_msgs:
if new_base_message.msg_type == exist_msg.msg_type and new_base_message.vc == exist_msg.vc:
return exist_msg
# Otherwise throw warnings
# The base messages should have the same message type and virtual channel, throw warnings
Debug.pwarning("Messages have same identifiers, but are of different message type: " +
new_base_message.p_message() + ' | ' +
exist_msgs[0].p_message(),
new_base_message.msg_type == exist_msgs[0].msg_type)
Debug.pwarning("Messages have same identifiers, but are assigned to different virtual channels: " +
new_base_message.p_message() + ' | ' +
exist_msgs[0].p_message(),
new_base_message.vc == exist_msgs[0].vc)
self.base_message_dict[str(new_base_message)] = new_base_message
return new_base_message
## By modifying the existing dict all pointers to the dict remain valid and don't need to be updated
# Dependency: FlatArchitecture
def update_base_message_names(self, new_sub_id: str):
for base_msg_name in list(self.base_message_dict.keys()):
for base_msg in self.base_message_dict[base_msg_name]:
# Update msg_id_s
base_msg.id = base_msg.id + new_sub_id
# Update dict entry
self.base_message_dict[base_msg_name+new_sub_id] = self.base_message_dict[base_msg_name]
# Clear old dict entry
self.base_message_dict.pop(base_msg_name)
def update_msg_type_names(self, new_sub_id: str):
msg_types_keys = list(self.msg_types.keys())
for msg_type in msg_types_keys:
# Update msg types
self.msg_types[msg_type].msg_type = self.msg_types[msg_type].msg_type + new_sub_id
# Update dict entry
self.msg_types[msg_type + new_sub_id] = self.msg_types[msg_type]
# Update the variable names
self.msg_types[msg_type + new_sub_id].update_variable_names(new_sub_id)
# Clear old dict entry
self.msg_types.pop(msg_type)
def modify_variable_names(self, cur_const: str, new_const: str):
for msg_type in self.msg_types:
for msg_var in self.msg_types[msg_type].msg_vars:
msg_var_object = self.msg_types[msg_type].msg_vars[msg_var]
self.msg_types[msg_type].msg_vars[msg_var] = \
AuxStateHandler.cond_rename_operation(msg_var_object, cur_const, new_const)
def merge_networks(self, other: 'BaseNetwork'):
self.ordered_networks.update(other.ordered_networks)
self.unordered_networks.update(other.unordered_networks)
self.msg_types.update(other.msg_types)
self.data_msg_types.update(other.data_msg_types) # Data msg type names, should be included in the message
# and is subset of msgNode
for base_message in other.base_message_dict:
if base_message in self.base_message_dict:
self.base_message_dict[base_message] += other.base_message_dict[base_message]
else:
self.base_message_dict[base_message] = other.base_message_dict[base_message]
# Print all the messages
def p_messages(self):
pass
#self.p_header("\nMessages")
#for entry in self.msgTypes:
# self.pdebug(entry)
#self.pdebug('\n') | [
"nicolai.oswald@ed.ac.uk"
] | nicolai.oswald@ed.ac.uk |
3951a840c8f398942a0c28d4e5f40a8e9f9f69a1 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2015_03_17_plastic_polycrystal/calibration.py | f385c0c6a6ac236b52df962dea3b4c3b32f84600 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 23 14:25:50 2014
This script performs the MKS calibration given the microstructure function
and the FIP response, both in frequency space.
@author: nhpnp3
"""
import time
import numpy as np
import functions as rr
from functools import partial
import tables as tb
def calibration_procedure(el, H, ns, set_id, step, comp, wrt_file):
# open HDF5 file
base = tb.open_file("D_%s%s_s%s.h5" % (ns, set_id, step), mode="r")
# retrieve data from HDF5 file
resp = base.get_node('/', 'r%s' % comp)
r_fft = resp.r_fft[...]
M = base.root.msf.M[...]
# close the HDF5 file
base.close()
start = time.time()
specinfc = np.zeros((H, el**3), dtype='complex64')
# here we perform the calibration for the scalar FIP
specinfc[:, 0] = rr.calib(0, M, r_fft, 0, H, el, ns)
[specinfc[:, 1], p] = rr.calib(1, M, r_fft, 0, H, el, ns)
# calib_red is simply calib with some default arguments
calib_red = partial(rr.calib, M=M, r_fft=r_fft,
p=p, H=H, el=el, ns=ns)
specinfc[:, 2:(el**3)] = np.asarray(map(calib_red, range(2, el**3))).swapaxes(0, 1)
# open HDF5 file
base = tb.open_file("infl_%s%s_s%s.h5" % (ns, set_id, step), mode="a")
# create a group one level below root called infl[comp]
group = base.create_group('/',
'infl%s' % comp,
'influence function for component %s' % comp)
base.create_array(group,
'infl_coef',
specinfc,
'array of influence coefficients')
# close the HDF5 file
base.close()
end = time.time()
timeE = np.round((end - start), 3)
msg = 'Calibration, component %s: %s seconds' % (comp, timeE)
rr.WP(msg, wrt_file)
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
a01a5b1f58abd518d11593d8bf433d842f768037 | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/pyface/i_image_cache.py | 4a5bd9b1c0f48877eacba33f5ee9f7ac19573d67 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 50 | py | # proxy module
from pyface.i_image_cache import *
| [
"ischnell@enthought.com"
] | ischnell@enthought.com |
881c1ecfecdff27a1af746af1927b4b29affa6c8 | 643e2e1b0a143b30998da4d37c1423860ec4f525 | /api/models.py | 034291c4b46426454b34640ec42af2915fe8c258 | [] | no_license | SergeRNR/players | 75c9347d070818b1915c663aefc64a0b8ac3e470 | ae9533ba2106d78c4322a0e4ec26e2f3c325fbd9 | refs/heads/master | 2021-01-13T15:55:05.154445 | 2017-01-10T08:17:52 | 2017-01-10T08:17:52 | 76,788,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | from __future__ import division
from django.db import models
class Player(models.Model):
name = models.CharField(max_length=50)
battles_total = models.IntegerField(default=0)
wins_total = models.IntegerField(default=0)
days_total = models.IntegerField(default=0)
vehicles_x = models.IntegerField(default=0)
exp_total = models.IntegerField(default=0)
is_hidden = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
def rating(self):
if self.days_total > 0:
return round(self.wins_total / self.days_total, 2)
else:
return 0
def exp_avg(self):
if self.days_total > 0:
return round(self.exp_total / self.days_total, 2)
else:
return 0
| [
"rnrserge@gmail.com"
] | rnrserge@gmail.com |
213b50314ea66a06fd151659d002ca34ca944bd1 | 845427e729f6f525c8903364850d6b9b9cd0e36a | /plot_creation_scripts/centrality_historgrams/SET_lap_cen_dis_200_epochs_fashion.py | c4ec73ac49e0396397fb87638a5cd04ddc856cdc | [
"MIT"
] | permissive | andrewjh9/CenBench | 96f25cfc4314a021d19eff11a15a933d9e0436ee | afd960b77ade05be2d2368bed3b47d54f7e229b6 | refs/heads/main | 2023-06-05T13:55:36.385142 | 2021-06-27T14:56:53 | 2021-06-27T14:56:53 | 364,514,761 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,188 | py | import matplotlib.pyplot as plt
import numpy as np
import tikzplotlib
axis_label_size = 16
font = {'family' : 'normal',
'weight' : 'bold',
'size' : axis_label_size}
plt.rc('font', **font)
read_dataset_0_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_0__sd_dis_.csv',delimiter='')
read_dataset_25_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_25__sd_dis_.csv',delimiter='')
read_dataset_50_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_50__sd_dis_.csv',delimiter='')
read_dataset_75_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_75__sd_dis_.csv',delimiter='')
read_dataset_100_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_100__sd_dis_.csv',delimiter='')
read_dataset_125_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_125__sd_dis_.csv',delimiter='')
read_dataset_150_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_150__sd_dis_.csv',delimiter='')
read_dataset_175_fminst = np.genfromtxt('results/base_line_set/fashion/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_cen_dis_lap_epoch_175__sd_dis_.csv',delimiter='')
min = min(min(read_dataset_0_fminst),min(read_dataset_25_fminst),min(read_dataset_50_fminst),min(read_dataset_75_fminst),min(read_dataset_100_fminst),min(read_dataset_125_fminst),min(read_dataset_150_fminst),min(read_dataset_175_fminst))
max = max(max(read_dataset_0_fminst),max(read_dataset_25_fminst),max(read_dataset_50_fminst),max(read_dataset_75_fminst),max(read_dataset_100_fminst),max(read_dataset_125_fminst),max(read_dataset_150_fminst),max(read_dataset_175_fminst))
# plt.hist(read_dataset_175_fminst , bins= np.arange(min, max, 0.5), label="175")
# plt.hist(read_dataset_150_fminst , bins= np.arange(min, max, 0.5), label="150")
# plt.hist(read_dataset_125_fminst , bins= np.arange(min, max, 0.5), label="125")
# plt.hist(read_dataset_100_fminst , bins= np.arange(min, max, 0.5), label="100")
# plt.hist(read_dataset_75_fminst , bins= np.arange(min, max, 0.5), label="75")
# plt.hist(read_dataset_50_fminst , bins= np.arange(min, max, 0.5), label="50")
# plt.hist(read_dataset_25_fminst , bins= np.arange(min, max, 0.5), label="25")
# plt.hist(read_dataset_0_fminst , bins= np.arange(min, max, 0.5), label="0")
fig, axes = plt.subplots(nrows=2,ncols=4, sharex=True)
axes[0][0].hist(read_dataset_0_fminst , bins= np.arange(min, max, 0.5), label="0", color="k")
axes[0][1].hist(read_dataset_25_fminst , bins= np.arange(min, max, 0.5), label="25", color="k")
axes[0][2].hist(read_dataset_50_fminst , bins= np.arange(min, max, 0.5), label="50", color="k")
axes[0][3].hist(read_dataset_75_fminst , bins= np.arange(min, max, 0.5), label="75", color="k")
axes[1][0].hist(read_dataset_100_fminst , bins= np.arange(min, max, 0.5), label="100", color="k")
axes[1][1].hist(read_dataset_125_fminst , bins= np.arange(min, max, 0.5), label="125", color="k")
axes[1][2].hist(read_dataset_150_fminst , bins= np.arange(min, max, 0.5), label="150", color="k")
axes[1][3].hist(read_dataset_175_fminst , bins= np.arange(min, max, 0.5), label="175", color="k")
plt.xlabel("Laplacian centrality", fontsize=axis_label_size-2)
plt.ylabel("Frequency", fontsize=axis_label_size-2)
# plt.title("Frequency Distribution of Laplacian Centrality of Nodes in MLP on FashionMNIST at Epoch 175")
plt.tight_layout()
# plt.show()
# plt.title("Frequency Distribution of Laplacian Centrality of Nodes in SET on FashionMNIST at Epoch 175")
# plt.show()
# plt.savefig("plots/tex/histogram_lap/SET_historgram_fashionMNIST.svg")
plt.savefig("plots/svg/histogram_lap/SET_historgram_fashionMNIST.svg")
| [
"a.j.heath@student.utwente.nl"
] | a.j.heath@student.utwente.nl |
dfd46b9c416c597dd87eaab4c87afb40fbfeb82b | d4ec4e00c8417b69ed10cd3af2d0414d6c145170 | /main.py | 00ebc41989920890a9254637f0a1d3f579a29921 | [] | no_license | liudong13518214786/blog_ | 0a3571e193b021e8783e5e3ffa34e69f7892fd2b | 69613f3b252df138cce96cdcb235b2f6b8b8e669 | refs/heads/master | 2022-12-11T11:31:16.070086 | 2019-06-11T10:01:58 | 2019-06-11T10:01:58 | 172,306,954 | 2 | 0 | null | 2022-12-08T01:38:14 | 2019-02-24T07:04:01 | Python | UTF-8 | Python | false | false | 3,571 | py | import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.log
import logging
from tornado.options import define, options
import time
import signal
from route import handler
from components.database import async_mysql_client
define("port", default=8000, help="run on the given port", type=int)
class APP:
def __int__(self):
self.http_server = None
self.main_app = None
self.io_loop = tornado.ioloop.IOLoop.instance()
self.deadline = None
def sig_handler(self, sig, frame): # pylint:disable=W0613
"""
捕捉停止信号
:param sig:
:return:
"""
logging.info('Caught signal: %s', sig)
tornado.ioloop.IOLoop.instance().add_callback(self.shutdown)
def shutdown(self):
"""
停止app
:return:
"""
logging.info('Stopping http server')
self.http_server.stop() # 不接收新的 HTTP 请求
logging.info('Will shutdown in %s seconds ...', 1)
self.deadline = time.time() + 1
self.io_loop = tornado.ioloop.IOLoop.current()
self.stop_loop()
def stop_loop(self):
"""
停止主循环
:return:
"""
print('Server Shutdown!')
self.io_loop.stop()
def init(self):
"""
初始化app
:return:
"""
# register signal.SIGTSTP's handler
signal.signal(signal.SIGTERM, self.sig_handler) # 监听终止的信号
signal.signal(signal.SIGQUIT, self.sig_handler) # 监听终端退出的信号
signal.signal(signal.SIGINT, self.sig_handler) # 连接中断的信号
signal.signal(signal.SIGTSTP, self.sig_handler) #
return True
def init_log(self):
logging.info('init log done')
# logger = logging.getLogger()
# fmt = tornado.log.LogFormatter(
# fmt='%(color)s[%(asctime)s %(funcName)s %(levelname)s]%(end_color)s %(message)s', datefmt='%H:%M:%S')
# fh = logging.StreamHandler()
# fh.setFormatter(fmt)
# fh.setLevel(logging.INFO)
# logger.addHandler(fh)
# return logger
def main_loop(self):
tornado.options.parse_command_line()
self.init_log()
[i.setFormatter(LogFormatter()) for i in logging.getLogger().handlers]
logging.info('init server...')
self.main_app = Application()
self.http_server = tornado.httpserver.HTTPServer(self.main_app)
self.http_server.listen(options.port)
logging.info("server running in port %s..." % options.port)
tornado.ioloop.IOLoop.instance().start()
class LogFormatter(tornado.log.LogFormatter):
def __init__(self):
super(LogFormatter, self).__init__(
fmt='%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
class Application(tornado.web.Application):
def __init__(self):
handlers = handler
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret="411444270345370dfbb49c29a0f1e3ce",
login_url="/login",
debug=True,
)
super(Application, self).__init__(handlers, **settings)
logging.info("init Application....")
self.db = async_mysql_client
if __name__ == "__main__":
APP = APP()
if APP.init():
APP.main_loop() | [
"liudong@mshare.cn"
] | liudong@mshare.cn |
e497243564ae7dbf90e000fb19c7bc89e9a81b11 | beb5a81f3656b234ed9915128a4247def4efb518 | /MultiplicacionDeMatrices/MultiplicacionRecursiva/multiplicacionRecursiva.py | d819388dc9e62aaa0146c88d0dd52b435df66a3a | [] | no_license | ricrui3/AnalisisDeAlgoritmos | 35a3e94f141f79dd4cdd45eb55f49ebdc98e5765 | c5a35d8316a856199e19cfedd0eb29f94fa83305 | refs/heads/master | 2021-01-21T13:26:22.840935 | 2016-05-16T19:37:46 | 2016-05-16T19:37:46 | 44,082,003 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,810 | py | # coding=utf-8
__author__ = 'MrRubik'
def multiplicacionRecursiva(A, B):
n = len(A)
if n == 1:
return multiplicacionDeMatrices(A, B)
else:
C = crearMatriz(n, 0)
a, b, c, d = partir(A)
e, f, g, h = partir(B)
c1 = sumaDeMatrices(multiplicacionRecursiva(a, e), multiplicacionRecursiva(b, g))
c2 = sumaDeMatrices(multiplicacionRecursiva(a, f), multiplicacionRecursiva(b, h))
c3 = sumaDeMatrices(multiplicacionRecursiva(c, e), multiplicacionRecursiva(d, g))
c4 = sumaDeMatrices(multiplicacionRecursiva(c, f), multiplicacionRecursiva(d, h))
copiarMatricesPequenoAgrande(c1, C, 0, n / 2, 0, n / 2)
copiarMatricesPequenoAgrande(c2, C, 0, n / 2, n / 2, n)
copiarMatricesPequenoAgrande(c3, C, n / 2, n, 0, n / 2)
copiarMatricesPequenoAgrande(c4, C, n / 2, n, n / 2, n)
return C
def multiplicacionDeMatrices(A, B):
n = len(A)
C = crearMatriz(n, 0)
for i in range(n):
for j in range(n):
C[i][j] = A[i][j] * B[i][j]
return C
def sumaDeMatrices(A, B):
n = len(A)
C = crearMatriz(n, 0)
for i in range(n):
for j in range(n):
C[i][j] = A[i][j] + B[i][j]
return C
def algortimoIngenuo(A, B):
n = len(A)
C = crearMatriz(n, 0)
for i in range(n):
for j in range(n):
for k in range(n):
C[i][j] = C[i][j] + (A[j][k] * B[k][j])
return C
def copiarMatrices(A, C, tamIniReng, tamFinReng, tamIniCol, tamFinCol):
n = len(A)
iaux = 0
for i in range(tamIniReng, tamFinReng):
jaux = 0
for j in range(tamIniCol, tamFinCol):
C[iaux][jaux] = A[i][j]
jaux += 1
iaux += 1
def copiarMatricesPequenoAgrande(A, C, tamIniReng, tamFinReng, tamIniCol, tamFinCol):
n = len(A)
iaux = 0
for i in range(tamIniReng, tamFinReng):
jaux = 0
for j in range(tamIniCol, tamFinCol):
C[i][j] = A[iaux][jaux]
jaux += 1
iaux += 1
def partir(A):
n = len(A)
a = crearMatriz(n / 2, 0)
b = crearMatriz(n / 2, 0)
c = crearMatriz(n / 2, 0)
d = crearMatriz(n / 2, 0)
copiarMatrices(A, a, 0, n / 2, 0, n / 2)
copiarMatrices(A, b, 0, n / 2, n / 2, n)
copiarMatrices(A, c, n / 2, n, 0, n / 2)
copiarMatrices(A, d, n / 2, n, n / 2, n)
return a, b, c, d
def crearMatriz(n, num):
M = []
for i in range(n):
M.append([num] * n)
return M
def inicio():
n = input()
A = crearMatriz(n, 1)
B = crearMatriz(n, 1)
C = multiplicacionRecursiva(A, B)
for i in range(n):
print C[i]
# A = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
# A = crearMatriz(5,2)
# B = crearMatriz(5,2)
# print(sumaDeMatrices(A,B))
inicio()
| [
"ricrui3@gmail.com"
] | ricrui3@gmail.com |
967e62c87ca72becaaa5ed00772a6423521bbde0 | ad129c1c58fd9a00c2aefa0c25f2684e906521b4 | /cipher_par.py | 4d7845ac800418737770c9bb5ac65e7504d412d1 | [] | no_license | Prezens/cryptography | b938382491296f705df219fb8303a55e31289c7a | 39f30d7ab3d34ae4072c554fc647354ba2922514 | refs/heads/master | 2020-05-18T23:01:49.616410 | 2019-05-03T05:27:20 | 2019-05-03T05:27:20 | 184,702,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | text = 'HELLO'
encrypt = 'GFKKP'
keys = {
'A': 'B', 'C': 'D', 'E': 'F',
'G': 'H', 'I': 'J', 'K': 'L',
'M': 'N', 'O': 'P', 'Q': 'R',
'S': 'T', 'U': 'V', 'W': 'X',
'Y': 'Z'
}
def encrypt_decrypt(message, couple):
text = ''
for symbol in message:
for key in couple:
if symbol == key:
text += couple[key]
elif symbol == couple[key]:
text += key
else:
pass
return text
print(encrypt_decrypt(text, keys))
print(encrypt_decrypt(encrypt, keys))
| [
"bunin.zhenya.99@gmail.com"
] | bunin.zhenya.99@gmail.com |
24dbd0bed38272f37b4a7c76ed76b8c07bcbfedf | bbff8c8cb8f64603ae9658e59a6bbc33bcb5adcf | /Sense-of-Census/code.py | 94b8fb80216dd3f9ca8720269c928ecb971c8656 | [
"MIT"
] | permissive | Abasshiekh/ga-learner-dsb-repo | a150d1d766a38009a0cd76b5bb929d9e85bd88fb | 7998122f00eea37cc059e84b8c58e2ac1520942f | refs/heads/master | 2022-09-08T17:47:44.064263 | 2020-05-27T10:40:54 | 2020-05-27T10:40:54 | 258,741,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | # --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
path
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data = np.genfromtxt(path,delimiter = ",",skip_header = 1)
census = np.concatenate((data,new_record),axis =0)
# --------------
#Code starts here
age = census[:,0]
#maxium age
max_age = age.max()
#minimum age
min_age = age.min()
#mean age
age_mean = age.mean()
#standard deviation of age
age_std = age.std()
# --------------
#Code starts here
race_0 = census[census[:,2]==0]
race_1 = census[census[:,2]==1]
race_2 = census[census[:,2]==2]
race_3 = census[census[:,2]==3]
race_4 = census[census[:,2]==4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
race_list = [len_0,len_1,len_2,len_3,len_4]
print(race_list)
minority_race =race_list.index(min(race_list))
print(minority_race)
# --------------
#Code starts here
senior_citizens = census[census[:,0]>60]
working_hours_sum = senior_citizens.sum(axis = 0)[6]
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum / senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = high[:,7].mean()
print(avg_pay_high)
avg_pay_low = low[:,7].mean()
print(avg_pay_low)
| [
"er.abasshiekh2014@gmail.com"
] | er.abasshiekh2014@gmail.com |
8fdb1dfa3ac8ad9e953ae234c1d21c99d8b72f67 | 1d7147717ed51c34d09e2f68dbb9c746245b147d | /EXP/EXP-2.py | 287365617ae422c5830ad050ebd705913f27a2d6 | [] | no_license | aaryanredkar/prog4everybody | 987e60ebabcf223629ce5a80281c984d1a7a7ec2 | 67501b9e9856c771aea5b64c034728644b25dabe | refs/heads/master | 2020-05-29T18:12:33.644086 | 2017-02-13T02:46:44 | 2017-02-13T02:46:44 | 46,829,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | zork = 0
for thing in [9, 41, 12, 3, 74, 15] :
zork = zork + thing
print 'After', zork
| [
"aaryantoki@live.com"
] | aaryantoki@live.com |
508e1da526b51f7b5607fb81bc48dd7cfcfd72f8 | 8a9d408cccbe2e56098247e9efdddba9b69984f8 | /links/schema.py | 8a59173b5200d1bca735c17d6a4f552dfd04b409 | [] | no_license | IsaacOmondi/howtographql | 2ca677f45bb93f9776c1b7e52f4c89259492a91c | 67aab270e8ca55a9c9e5bc25c2584052791d2c45 | refs/heads/master | 2020-04-22T04:30:22.653185 | 2019-02-18T09:26:21 | 2019-02-18T09:26:21 | 170,125,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | import graphene
from graphene_django import DjangoObjectType
from graphql import GraphQLError
from django.db.models import Q
from .models import Link, Vote
from users.schema import UserType
class LinkType(DjangoObjectType):
class Meta:
model = Link
class CreateLink(graphene.Mutation):
id = graphene.Int()
url = graphene.String()
description = graphene.String()
posted_by = graphene.Field(UserType)
class Arguments:
url = graphene.String()
description = graphene.String()
def mutate(self, info, url, description):
user = info.context.user or None
link = Link(url=url, description=description, posted_by=user)
link.save()
return CreateLink(
id = link.id,
url = link.url,
description = link.description,
posted_by=link.posted_by,
)
class VoteType(DjangoObjectType):
class Meta:
model = Vote
class CreateVote(graphene.Mutation):
user = graphene.Field(UserType)
link = graphene.Field(LinkType)
class Arguments:
link_id = graphene.Int()
def mutate(self, info, link_id):
user = info.context.user
if user.is_anonymous:
raise GraphQLError('You must be logged in to vote')
link = Link.objects.filter(id=link_id).first()
if not link:
raise Exception('Invalid Link!')
Vote.objects.create(
user=user,
link=link
)
return CreateVote(user=user, link=link)
class Mutation(graphene.ObjectType):
create_link = CreateLink.Field()
create_vote = CreateVote.Field()
class Query(graphene.ObjectType):
links = graphene.List(
LinkType,
search=graphene.String(),
first = graphene.Int(),
skip = graphene.Int(),
)
votes = graphene.List(VoteType)
def resolve_links(self, info, search=None, first=None, skip=None, **kwargs):
qs = Link.objects.all()
if search:
filter = (
Q(url__icontains=search) |
Q(description__icontains=search)
)
qs = qs.filter(filter)
if skip:
qs = qs[skip:]
if first:
qs = qs[:first]
return qs
def resolve_votes(self, info, **kwargs):
return Vote.objects.all() | [
"iomondi90@gmail.com"
] | iomondi90@gmail.com |
987512744b0b19826ce6672f59094a057ad54345 | b963a14a1b1111597fdd62da31edf6f5e9ec794d | /spider/xlsx_txt.py | e8576d3e6d01a7aaaa07745dfc09a7b02a218911 | [
"Zlib"
] | permissive | fishprotect/crawl_somewebsite_db | 8842096da93117f1f43feffbc1679a9fd841aa1d | df3761356b0a97d3a451091ee8597289bea31a82 | refs/heads/master | 2020-04-02T10:03:47.331952 | 2018-10-24T06:31:15 | 2018-10-24T06:31:15 | 154,322,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,904 | py | import openpyxl
import requests
from bs4 import BeautifulSoup
import re
import random
import time
from tools.random_sleep import random_sleep_time
'''
######生成随机睡眠数
'''
'''
######从网页中提取网页的body内容
######主要使用requests库
'''
def get_url(url,header):
try:
random_sleep_time()
con = requests.get(url,headers=header,timeout=30)
con.raise_for_status()
# con.encoding = con.apparent_encoding
return con.text
except:
return ""
'''
######解析网页body的内容,并且从里面提取需要的信息
######主要使用BeautifulSoup库
'''
def parse_con(con,moive):
con = BeautifulSoup(con,"lxml")
#电影名
try:
title = con.find("span",attrs={"property":"v:itemreviewed"}).text
except:
title = "不详"
try:
director = con.find("a",attrs={"rel":"v:directedBy"}).text
except:
director ="不详"
#电影详情
try:
content = con.find("span",attrs={"property":"v:summary"}).text.strip()
except:
content = "不详"
#演员
try:
start = con.find_all("a",attrs={"rel":"v:starring"})
star = ""
for i in start:
star = star+i.text+"; "
except:
star = "不详"
#电影上映时间
try:
init = con.find("span",attrs={"property":"v:initialReleaseDate"}).text
except:
init ="不详"
#电影时长
try:
runtime = con.find("span",attrs={"property":"v:runtime"}).text
except:
runtime = "不详"
moive={"title":title,'director':director,"star":star,"init":init,"runtime":runtime,"content":content}
return moive
'''
######主函数,
'''
def xlsx_to_txt():
#添加头部信息,简单的模拟浏览器
agent = "Mozilla/5.0 (Windows NT 6.1; rv:59.0) Gecko/20100101 Firefox/59.0"
header = {'User-Agent':agent}
nums = 1 ###表示电影排名
wb = openpyxl.load_workbook("doubanTOP250.xlsx")
ws = wb.active
moive = {}
for i in range(2,252):
url = str(ws['C'+str(i)].value)
con = get_url(url,header)
if con==" ":
print("error")
else:
moive = parse_con(con,moive)
#文件名如果没有创建成功,则所有的事情都白做。下面的错误处理就是为了保证文件名不会出错
try:
with open(str(nums)+moive["title"]+".txt",'a',errors='ignore') as f:
f.write("电影名:\n "+moive["title"]+"\n \n")
f.write("导 演:\n "+moive["director"]+"\n \n")
f.write("演 员:\n "+moive["star"]+"\n \n")
f.write("上映时间:\n "+moive["init"]+"\n \n")
f.write("时 长:\n "+moive["runtime"]+"\n \n")
f.write("详 情:\n "+moive["content"]+"\n \n")
except:
moive["title"] = re.match("[\w\u2E80-\u9FFF]+",moive["title"]).group()
with open(str(nums)+moive["title"]+".txt",'a',errors='ignore') as f:
f.write("电影名:\n "+moive["title"]+"\n \n")
f.write("导 演:\n "+moive["director"]+"\n \n")
f.write("演 员:\n "+moive["star"]+"\n \n")
f.write("上映时间:\n "+moive["init"]+"\n \n")
f.write("时 长:\n "+moive["runtime"]+"\n \n")
f.write("详 情:\n "+moive["content"]+"\n \n")
print("完成:######"+moive['title']+"的TXT格式文件保存")
nums=nums+1
return ""
'''
######运行代码
'''
| [
"dongyuhong163@163.com"
] | dongyuhong163@163.com |
8471e091175757482f6109e9dc1ecd8f00032daf | 0aa1e62eb5ad64473d97ebd9ff3292e43bb9ffc4 | /PythonWorkSpace/create_window.py | 439c4447597dc99d8e76318906fa31a9611db955 | [] | no_license | yinjinzhong/HaseSummary | 716cd1e0c93c6ae7a0ec88cf30b0ce838e8f6096 | 12073126df84120b4feac90af962f454aee035bf | refs/heads/master | 2023-03-19T18:20:33.953817 | 2021-02-23T06:25:32 | 2021-02-23T06:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | #创建自己的窗口
#Python自带的可编辑的GUI界面,是一个图像窗口。
#Tkinter是使用 python 进行窗口视窗设计的模块。
#简单的构造,多平台,多系统的兼容性, 能让它成为让你快速入门定制窗口文件的好助手。
#它在 python 窗口视窗模块中是一款简单型的。所以用来入门,熟悉窗口视窗的使用,非常有必要。
import re
import tkinter as tk #tkinter 窗口
window=tk.Tk()
window.title('my window')#窗口名字
window.geometry('300x100')#窗口大小
window.mainloop()#让窗口活起来 | [
"912709177@qq.com"
] | 912709177@qq.com |
bbd8e76811a4ccc4b13df98ccbd08e32dfe7cda7 | 971ea59ab59d881880a9fa2abf6a806384d09ffa | /blend_deformer_node.py | a352ab7692eb369ed147a62a5223761442696b44 | [] | no_license | ben-vandelaar/maya_api | 0b47fd6cdb59d9ffe743e22025c27ababf89320e | 5ddfd513cae8ef0e07374e471930ec76bd982654 | refs/heads/master | 2022-11-27T08:38:07.718606 | 2020-08-14T03:04:36 | 2020-08-14T03:04:36 | 287,163,194 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,919 | py | import maya.OpenMaya as om
import maya.OpenMayaMPx as ommpx
import maya.cmds as cmds
class BlendDeformerNode(ommpx.MPxDeformerNode):
TYPE_NAME = "blenddeformernode"
TYPE_ID = om.MTypeId(0x0007F7FD)
def __init__(self):
super(BlendDeformerNode, self).__init__()
def deform(self, data_block, geo_iter, matrix, multi_index):
envelope = data_block.inputValue(self.envelope).asFloat()
if envelope == 0:
return
blend_weight = data_block.inputValue(BlendDeformerNode.blend_weight).asFloat()
if blend_weight == 0:
return
target_mesh = data_block.inputValue(BlendDeformerNode.blend_mesh).asMesh()
if target_mesh.isNull():
return
target_points = om.MPointArray()
target_mesh_fn = om.MFnMesh(target_mesh)
target_mesh_fn.getPoints(target_points)
global_weight = blend_weight * envelope
geo_iter.reset()
while not geo_iter.isDone():
source_pt = geo_iter.position()
target_pt = target_points[geo_iter.index()]
source_weight = self.weightValue(data_block, multi_index, geo_iter.index())
final_pt = source_pt + ((target_pt - source_pt) * global_weight * source_weight)
geo_iter.setPosition(final_pt)
geo_iter.next()
@classmethod
def creator(cls):
return BlendDeformerNode()
@classmethod
def initialize(cls):
typed_attr = om.MFnTypedAttribute()
cls.blend_mesh = typed_attr.create("blendMesh", "bMesh", om.MFnData.kMesh)
numeric_attr = om.MFnNumericAttribute()
cls.blend_weight = numeric_attr.create("blendWeight", "bWeight", om.MFnNumericData.kFloat, 0.0)
numeric_attr.setKeyable(True)
numeric_attr.setMin(0.0)
numeric_attr.setMax(1.0)
cls.addAttribute(cls.blend_mesh)
cls.addAttribute(cls.blend_weight)
output_geom = ommpx.cvar.MPxGeometryFilter_outputGeom
cls.attributeAffects(cls.blend_mesh, output_geom)
cls.attributeAffects(cls.blend_weight, output_geom)
def initializePlugin(plugin):
vendor = "Ben"
version = "1.0.0"
plugin_fn = ommpx.MFnPlugin(plugin, vendor, version)
try:
plugin_fn.registerNode(BlendDeformerNode.TYPE_NAME,
BlendDeformerNode.TYPE_ID,
BlendDeformerNode.creator,
BlendDeformerNode.initialize,
ommpx.MPxNode.kDeformerNode)
except:
om.MGlobal.displayError("Failed to register node: {0}".format(BlendDeformerNode.TYPE_NAME))
cmds.makePaintable(BlendDeformerNode.TYPE_NAME, "weights", attrType="multiFloat", shapeMode="deformer")
def uninitializePlugin(plugin):
cmds.makePaintable(BlendDeformerNode.TYPE_NAME, "weights", attrType="multiFloat", remove=True)
plugin_fn = ommpx.MFnPlugin(plugin)
try:
plugin_fn.deregisterNode(BlendDeformerNode.TYPE_ID)
except:
om.MGlobal.displayError("Failed to deregister node: {0}".format(BlendDeformerNode.TYPE_NAME))
if __name__ == "__main__":
cmds.file(new=True, force=True)
plugin_name = "blend_deformer_node.py"
cmds.evalDeferred('if cmds.pluginInfo("{0}", q=True, loaded=True): cmds.unloadPlugin("{0}")'.format(plugin_name))
cmds.evalDeferred('if not cmds.pluginInfo("{0}", q=True, loaded=True): cmds.loadPlugin("{0}")'.format(plugin_name))
cmds.evalDeferred('cmds.file("C:/Users/benva/Desktop/study/Maya/mayaFiles/blend_deformer.ma",open=True,force=True)')
cmds.evalDeferred('cmds.select("sourceSphere"); cmds.deformer(type="blenddeformernode")')
cmds.evalDeferred('cmds.connectAttr("deformerTargetShape.outMesh", "blenddeformernode1.blendMesh", force=True)')
| [
"benvandelaar@gmail.com"
] | benvandelaar@gmail.com |
c3844394a1d734f67a9d8879ca813c80bfbe37eb | 80f56878dbceb714266abca85519ebbfa131404e | /app/main.py | 9266f39af6e62cd635ea47fef07f21720c4cb42c | [] | no_license | z-sector/async-fastapi-sqlalchemy | 1b944173972bc8487a2f9c638810ba0ffffbbbf5 | 9d622677c56d6d8495f3c87522216f289d52e2f7 | refs/heads/main | 2023-08-15T03:05:10.260060 | 2021-09-12T00:55:02 | 2021-09-12T00:55:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | from fastapi import FastAPI
from fastapi.responses import JSONResponse
from app.api.main import router as api_router
from app.settings import Settings
settings = Settings()
app = FastAPI(title="async-fastapi-sqlalchemy")
app.include_router(api_router, prefix="/api")
@app.get("/", include_in_schema=False)
async def health() -> JSONResponse:
return JSONResponse({"message": "It worked!!"})
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"rhoboro@gmail.com"
] | rhoboro@gmail.com |
cf297f53e6ac80a9e6f143b01404bb8118b28817 | 8c5e45061286c10cba373c1576432cc43bc387fc | /venv/bin/easy_install | aeb63742c78362328a12c31888d6cfb3f0b23c7b | [] | no_license | Ruldane/Mapping-using-Pandas-Folium | 13828b18ad0c7eeb45afad46d50f94f22ef9c908 | c0f8d6cda2898f8075d52e31ba9712f47e54b145 | refs/heads/master | 2020-06-10T17:07:05.522872 | 2019-06-25T10:22:13 | 2019-06-25T10:22:13 | 193,685,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | #!/home/ruldane/PycharmProjects/Mapping/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"l.nizzoli@gmail.com"
] | l.nizzoli@gmail.com | |
861bdf10deb30166bc2da66c95cdde5caca7aec6 | d5ca2bb1a75858382eb38f1aae58a1afd6aa8f9f | /Crosssectional_analysis_R/Attempt4/make_table_MHI.py | 4e6ae6521cd6c10376226229a98fd03238f1a55f | [
"MIT"
] | permissive | SukanyaSharma/AirBnB_SpatialEconometrics | b7ec33ca67cdc54c8706595c96658d659b9a37bd | ad7cfd657d8bbb165305c7f3fa3cb41d171c2a9a | refs/heads/master | 2020-04-23T08:47:47.871481 | 2019-02-16T21:07:06 | 2019-02-16T21:07:06 | 171,048,770 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,966 | py | writefile = open('Table_with_MHI.tex', 'w')
total_columns = 2 + 8 # includes names
column1 = ['Airbnb', '', 'Location', '', '', 'Demographic', '', '', '', 'Neighborhood level', 'Job accessibility', '', 'Constant',
'\\addlinespace[-1.3em]\midrule\\addlinespace[0.5em] Tests and statistics', '', '', '', '']
column2 = ['Airbnb all rentals',
'Airbnb composite score',
'Log BART distance',
'Log CBD distance',
'Coastal tracts (dummy)',
'Unemployment rate',
'Percentage non-white',
'Percentage foreign-born',
'Log median household income',
'School district quality',
'Accessibility by car',
'Accessibility by transit',
'Intercept',
'Log likelihood',
'$\sigma^2$',
'Number of observations',
'deg.\ of freedom',
'AIC']
preamble = ['\documentclass[10pt, letterpaper, landscape]{amsart}',
'\\usepackage[landscape, left=0.2cm, right=0.2cm, top=1cm]{geometry}',
'\\usepackage{booktabs}',
'\\usepackage[justification=centering]{caption}']
commands = ['\n', '\\newcommand{\entry}[3]{\\begin{tabular}[t]{@{}l@{}} $#1^{#2}$ \\\ $#3$ \end{tabular}}']
begin = ['\n', '\\begin{document}',
'\pagenumbering{gobble}',
'',
'\t\\begin{table}[!ht]',
'\t\centering',
'\t\caption{Coefficients and Standard errors for Spatial Lag models (with MHI)}',
'\t\label{my-label}',
'\t\\renewcommand{\\arraystretch}{0.7}',
'\t\\begin{tabular}{@{}llllllllll@{}}',
'\t\t\\toprule\\\[-2.4ex]\\toprule',
'\t\t\multicolumn{2}{c}{Independent variables $(X)$} & \multicolumn{8}{c}{Dependent variable $(Y)$} \\\ \midrule',
'\t\t\\addlinespace[0.5em]',
'\t\tCategory & Variable name & \multicolumn{2}{c}{Rent burdened} & \multicolumn{2}{c}{Rent over-burdened}',
'\t\t& \multicolumn{2}{c}{Log median rent} & \multicolumn{2}{c}{Log median house price} \\\ ',
'\t\t\\addlinespace[0.5em]',
'\t\t & &\multicolumn{1}{c}{Model 1} & \multicolumn{1}{c}{Model 2} & \multicolumn{1}{c}{Model 3}&\multicolumn{1}{c}{Model 4} &',
'\t\t\multicolumn{1}{c}{Model 5} & \multicolumn{1}{c}{Model 6} & \multicolumn{1}{c}{Model 7} & \multicolumn{1}{c}{Model 8}\\\ ',
'\t\t\midrule[.5pt]',
'\t\t\\addlinespace[0.5em]']
end = ['',
'\\\ \midrule\\\[-2.8ex]\\bottomrule',
'\t\t\\\ ',
'\t\t\emph{Notes:} & \multicolumn{9}{l}{${}^{*}p<0.1; \quad {}^{**}p<0.05; \quad {}^{***}p<0.01 $ } \\\ ',
'\t\t\end{tabular}',
'\t\end{table}',
'\end{document}']
readfile = open('With_MHI_Cleaned.txt', 'r')
lines = readfile.readlines()
lines = [i.strip() for i in lines]
# Sanity checks:
for i in lines:
if i[0] == '=':
pass
elif len(i.split(',')) != 3:
print('Error: some lines are missing entries', i)
quit()
counter = 0
columns = [[] for i in range(total_columns-2)]
for i in lines:
if i[0] == '=':
counter += 1
else:
columns[counter].append(i.split(','))
table = [[] for i in range(len(columns[0]))]
for i in range(len(columns[0])):
table[i].append('\n\t\t\\addlinespace[0.5em]')
if column1[i]:
table[i][0] += '\n\t\t\\addlinespace[1.3em]' + column1[i]
table[i].append(column2[i])
switch = 0
for row in range(len(columns[0])):
for c in range(total_columns-2):
if row >= 13:
table[row].append('{}'.format(columns[c][row][0]))
else:
table[row].append('\entry{{{0}}}{{{1}}}{{{2}}}'.format(*columns[c][row]))
table[row] = '\n\t\t\t & '.join(table[row])
table = ['\t' + '\\\\\n\t'.join(table)]
print(table)
writefile.write('\n'.join(preamble + commands + begin + table + end))
writefile.close()
readfile.close()
| [
"vkarve2@illinois.edu"
] | vkarve2@illinois.edu |
5e5a3471ae1f9a415c331cd21e1caa5f5128d1a4 | 603d1a0b21761f3a7a98b486f42fed040894689f | /cs.fail-project-master/main.py | 299c893e2d3f1651e93c3e05e41c985c6ddb3b6d | [] | no_license | VlPukhkalo/my_python | e9cac49bd398b0189cf6a8daf22706a0ff4c6640 | 203e89aade9ba86db9a402dd8c2fb462abcd40c2 | refs/heads/master | 2021-08-19T15:37:07.505267 | 2021-01-08T16:19:37 | 2021-01-08T16:19:37 | 241,367,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,663 | py | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
from PyQt5.QtCore import QThread
from PyQt5.QtCore import pyqtSignal
from lib.design import Ui_Dialog
from lib.logic import Main_logic
class CsFail(QThread):
sig_for_start = pyqtSignal(list)
# sig_for_break = pyqtSignal(int)
def __init__(self, parent=None):
QtCore.QThread.__init__(self, parent)
self.csfailmain_logic = Main_logic()
def status_of_what_do(self, status):
self.status = status
def status_for_break(self, isbreak):
self.isbreak = isbreak
def run(self):
if self.status == "Start":
for i in self.csfailmain_logic.load_last_game():
if self.isbreak == 1:
break
self.sig_for_start.emit(i)
class QthreadApp(QtWidgets.QWidget):
sig_for_status = pyqtSignal(str)
sig_for_sig_for_break = pyqtSignal(int)
def __init__(self, parent=None):
self.app = QtWidgets.QApplication(sys.argv)
self.Dialog = QtWidgets.QDialog()
self.ui = Ui_Dialog()
self.ui.setupUi(self.Dialog)
QtWidgets.QWidget.__init__(self, parent)
self.Dialog.show()
self.total_site_bank = []
self.win_pr = []
self.winn = []
self.flag = 0
self.last_game = 0
self.pred_last = 0
self.pred_last_last = 0
self.csfailthread = CsFail()
self.ui.pushButton.clicked.connect(self.start_load)
self.ui.pushButton_2.clicked.connect(self.stop_load)
sys.exit(self.app.exec_())
def start_load(self):
self.start_load_gif()
self.sig_for_status.connect(self.csfailthread.status_of_what_do)
self.sig_for_status.emit("Start")
self.sig_for_sig_for_break.connect(self.csfailthread.status_for_break)
self.sig_for_sig_for_break.emit(0)
self.csfailthread.start()
self.csfailthread.sig_for_start.connect(self.watch_it)
def start_load_gif(self):
self.ui.movie.start()
def stop_load_gif(self):
self.ui.movie.stop()
def watch_it(self, text_from):
_translate = QtCore.QCoreApplication.translate
if len(text_from) == 1:
if self.flag == 0:
self.stop_load_gif()
self.flag = 1
if text_from[0] >= 1.85 or self.pred_last - text_from[0] < -0.2:
self.ui.textEdit_9.setHtml(_translate("Dialog",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Times New Roman\',\'Georgia\',\'Serif\'; font-size:8.25pt; font-weight:448; font-style:normal;\">\n"
f"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:30pt;\">{str(round(text_from[0], 2))}</span></p></body></html>"))
else:
self.ui.textEdit_9.setHtml(_translate("Dialog",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Times New Roman\',\'Georgia\',\'Serif\'; font-size:8.25pt; font-weight:448; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:30pt;\">No Bet</span></p></body></html>"))
self.ui.textEdit_11.setHtml(_translate("Dialog",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Times New Roman\',\'Georgia\',\'Serif\'; font-size:8.25pt; font-weight:448; font-style:normal;\">\n"
f"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:30pt;\">{str(self.last_game_bank)}</span></p></body></html>"))
self.ui.textEdit_14.setHtml(_translate("Dialog",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Times New Roman\',\'Georgia\',\'Serif\'; font-size:8.25pt; font-weight:448; font-style:normal;\">\n"
f"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:30pt;\">{str(self.last_game_crash)}</span></p></body></html>"))
self.pred_last_last = self.pred_last
self.pred_last = round(text_from[0], 2)
elif len(text_from) == 3:
self.total_site_bank.append(text_from[2])
self.last_game_crash = round(text_from[1], 2)
self.last_game_bank = int(text_from[2])
self.ui.textEdit_5.setHtml(_translate("Dialog",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Times New Roman\',\'Georgia\',\'Serif\'; font-size:8.25pt; font-weight:448; font-style:normal;\">\n"
f"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:30pt;\">{str(int(sum(self.total_site_bank)))}</span></p></body></html>"))
if self.flag == 1:
if self.pred_last >= 1.85 or self.pred_last_last - self.pred_last < -0.2:
if self.last_game_crash >= 1.80:
self.winn.append(0.8)
self.win_pr.append(1)
else:
self.winn.append(-1)
self.win_pr.append(0)
self.ui.textEdit_6.setHtml(_translate("Dialog",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Times New Roman\',\'Georgia\',\'Serif\'; font-size:8.25pt; font-weight:448; font-style:normal;\">\n"
f"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:30pt;\">{str(sum(self.winn))}</span></p></body></html>"))
self.ui.textEdit_7.setHtml(_translate("Dialog",
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Times New Roman\',\'Georgia\',\'Serif\'; font-size:8.25pt; font-weight:448; font-style:normal;\">\n"
f"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:30pt;\">{str(100 * round(sum(self.win_pr) / len(self.win_pr), 1))}</span></p></body></html>"))
def stop_load(self):
self.sig_for_sig_for_break.connect(self.csfailthread.status_for_break)
self.sig_for_sig_for_break.emit(1)
obj = QthreadApp()
| [
"suzdareva666@yandex.ru"
] | suzdareva666@yandex.ru |
4e207cdf09c050b69dbefdd8566e8a61352b9829 | 213bbcdcc3d4685165248a465f890f8ae82b0c19 | /src/main/python/model/polar.py | 29ee35e94f8ce4e87e9830a812a5b958359511e6 | [
"MIT"
] | permissive | 3ll3d00d/pypolarmap | c6bd3c671f76d35e6d2edbd54ef4662e54202d8f | d96e16820b4da29ec33271abd34dc33c587e7657 | refs/heads/master | 2021-06-02T13:03:00.170646 | 2020-02-01T15:47:58 | 2020-02-01T15:47:58 | 136,064,479 | 1 | 1 | MIT | 2021-04-20T19:33:37 | 2018-06-04T18:08:52 | Python | UTF-8 | Python | false | false | 7,366 | py | import logging
import math
import numpy as np
from matplotlib import animation
from matplotlib.ticker import MultipleLocator, FuncFormatter
from model import calculate_dBFS_Scales, SINGLE_SUBPLOT_SPEC, set_y_limits
from model.measurement import REAL_WORLD_DATA, LOAD_MEASUREMENTS, CLEAR_MEASUREMENTS
logger = logging.getLogger('polar')
class PolarModel:
'''
Allows a set of measurements to be displayed on a polar chart with the displayed curve interactively changing.
'''
def __init__(self, chart, measurement_model, display_model, marker_data, type=REAL_WORLD_DATA,
subplotSpec=SINGLE_SUBPLOT_SPEC, redrawOnDisplay=True):
self._chart = chart
self._axes = self._chart.canvas.figure.add_subplot(subplotSpec, projection='polar')
self.__init_axes()
self._data = {}
self._curve = None
self._refreshData = False
self.name = f"polar"
self._measurementModel = measurement_model
self._measurementModel.register_listener(self)
self.xPosition = 1000
self.yPosition = 0
self._vline = None
self._vmarker = None
self._ani = None
self._redrawOnDisplay = redrawOnDisplay
self.__display_model = display_model
self._y_range_update_required = False
self.update_decibel_range(draw=False)
self.__marker_data = marker_data
def __repr__(self):
return self.name
def should_refresh(self):
return self._refreshData
def update_decibel_range(self, draw=True):
'''
Updates the decibel range on the chart.
'''
self._y_range_update_required = True
set_y_limits(self._axes, self.__display_model.db_range)
if self._ani:
# have to clear the blit cache to get the r grid to redraw as per
# https://stackoverflow.com/questions/25021311/matplotlib-animation-updating-radial-view-limit-for-polar-plot
self._ani._blit_cache.clear()
if draw:
self._chart.canvas.draw_idle()
self._y_range_update_required = False
def display(self):
'''
Updates the contents of the polar chart.
:return: true if it redrew.
'''
redrew = False
if self.should_refresh():
# convert x-y by theta data to theta-r by freq
xydata = self._measurementModel.get_magnitude_data()
self._data = {}
for idx, freq in enumerate(xydata[0].x):
theta, r = zip(*[(math.radians(x.h), x.y[idx]) for x in xydata])
self._data[freq] = (theta, r)
self._axes.set_thetagrids(np.arange(0, 360, 15))
rmax, rmin, rsteps, _ = calculate_dBFS_Scales(np.concatenate([x[1] for x in self._data.values()]),
max_range=self.__display_model.db_range)
self._axes.set_rgrids(rsteps)
# show degrees as +/- 180
self._axes.xaxis.set_major_formatter(FuncFormatter(self.formatAngle))
# show label every 12dB
self._axes.yaxis.set_major_locator(MultipleLocator(12))
# v line and marker
self._vline = self._axes.axvline(0, linewidth=2, color='gray', linestyle=':', visible=False)
self._vmarker = self._axes.plot(0, 0, 'bo', color='gray', markersize=6)[0]
# plot some invisible data to initialise
self._curve = self._axes.plot([math.radians(-180), math.radians(180)], [-200, -200], linewidth=2,
antialiased=True, linestyle='solid', visible=False)[0]
self._axes.set_ylim(bottom=rmin, top=rmax)
self._y_range_update_required = False
self._refreshData = False
redrew = True
else:
if self._axes is not None and self._y_range_update_required:
self.update_decibel_range(self._redrawOnDisplay)
# make sure we are animating
if self._ani is None and self._curve is not None:
logger.info(f"Starting animation in {self.name}")
self._ani = animation.FuncAnimation(self._chart.canvas.figure, self.redraw, interval=50,
init_func=self.initAnimation, blit=True, save_count=50)
return redrew
def formatAngle(self, x, pos=None):
format_str = "{value:0.{digits:d}f}\N{DEGREE SIGN}"
deg = np.rad2deg(x)
if deg > 180:
deg = deg - 360
return format_str.format(value=deg, digits=0)
def initAnimation(self):
'''
Inits a blank screen.
:return: the curve artist.
'''
self._curve.set_ydata([-200, -200])
return self._curve,
def redraw(self, frame, *fargs):
'''
Redraws the graph based on the yPosition.
'''
curveIdx, curveData = self.findNearestData()
if curveIdx != -1:
self._curve.set_visible(True)
self._curve.set_xdata(curveData[0])
self._curve.set_ydata(curveData[1])
self._curve.set_color(self._chart.get_colour(curveIdx, len(self._data.keys())))
self._vline.set_visible(True)
idx = np.argmax(np.array(curveData[0]) >= math.radians(self.yPosition))
self._vline.set_xdata([curveData[0][idx], curveData[0][idx]])
self._vmarker.set_data(curveData[0][idx], curveData[1][idx])
self.__marker_data.angle = self.formatAngle(curveData[0][idx])
return self._curve, self._vline, self._vmarker
def findNearestData(self):
'''
Searches the available data to find the curve that is the closest freq to our current xPosition.
:return: (curveIdx, curveData) or (-1, None) if nothing is found.
'''
curveIdx = -1
curveData = None
delta = 100000000
for idx, (freq, v) in enumerate(self._data.items()):
newDelta = abs(self.xPosition - freq)
if newDelta < delta:
delta = newDelta
curveIdx = idx
curveData = v
elif newDelta > delta:
break
return curveIdx, curveData
def on_update(self, type, **kwargs):
'''
handles measurement model changes
If event type is activation toggle then changes the associated curve visibility.
If event type is analysis change then the model is marked for refresh.
:param idx: the measurement idx.
'''
if type == LOAD_MEASUREMENTS:
self._refreshData = True
elif type == CLEAR_MEASUREMENTS:
self.clear()
def clear(self, draw=False):
'''
clears the graph.
'''
self.stop_animation()
self._axes.clear()
self._data = {}
self._curve = None
self.__init_axes()
self._refreshData = True
if draw:
self._chart.canvas.draw_idle()
def stop_animation(self):
'''
Stops the animation.
'''
if self._ani is not None:
logger.info(f"Stopping animation in {self.name}")
ani = self._ani
self._ani = None
ani._stop()
def __init_axes(self):
self._axes.grid(linestyle='--', axis='y', alpha=0.7)
| [
"mattkhan@gmail.com"
] | mattkhan@gmail.com |
15dc3449d2d8031e39dbbea6465c4c8ffb2fc8ff | 7aa1750ccd79a7908f32b8a48f1740148aef372b | /BOMHelper.0.0.6.py | 033d6b860e611238374986fce9536a24c5c03827 | [] | no_license | BibinGee/BOMHelper | 1397441562be9578ff6394df3de56859faacbdac | c16b77b199d670017423db8694ee16abe7614af5 | refs/heads/master | 2020-04-05T12:02:18.197631 | 2018-12-21T08:56:28 | 2018-12-21T08:56:28 | 156,855,752 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,246 | py | import xlrd
import re
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
import xlwt
class Application(QWidget):
def __init__(self):
super(Application, self).__init__()
self.setWindowTitle('BOM Helper Beta 1.0 Author: Daniel Gee')
self.setGeometry(400, 400, 1200, 640)
self.viewer = BOMViewer()
self.reviewBoard = ReviewBoard()
h_layout = QHBoxLayout()
v_layout = QVBoxLayout()
self.load_Btn = QPushButton(self)
self.load_Btn.setObjectName('load_btn')
self.load_Btn.setFont(QFont("Microsoft YaHei"))
self.load_Btn.setText('Load')
self.load_Btn.clicked.connect(self.load)
h_layout.addWidget(self.load_Btn)
self.find_btn = QPushButton(self)
self.find_btn.setObjectName('find_btn')
self.find_btn.setFont(QFont("Microsoft YaHei"))
self.find_btn.setText('Find Difference')
self.find_btn.clicked.connect(self.findDiff)
h_layout.addWidget(self.find_btn)
self.generateBOM_Btn = QPushButton(self)
self.generateBOM_Btn.setObjectName('gntBOM_Btn')
self.generateBOM_Btn.setFont(QFont("Microsoft YaHei"))
self.generateBOM_Btn.setText('Generate Location BOM')
self.generateBOM_Btn.clicked.connect(self.generateBOM)
h_layout.addWidget(self.generateBOM_Btn)
v_layout.addLayout(h_layout)
self.table = QTableWidget()
self.table.setRowCount(100)
self.table.setColumnCount(5)
self.table.resizeColumnToContents(2)
# Description column
self.table.setColumnWidth(1, 500)
# Location column
self.table.setColumnWidth(2, 300)
# Quantity column
self.table.setColumnWidth(3, 90)
# Check column
self.table.setColumnWidth(4, 90)
self.table.setFont(QFont("Microsoft YaHei"))
self.table.setHorizontalHeaderLabels(['Part number', 'Description', 'Location', 'Qty', 'Checked'])
v_layout.addWidget(self.table)
self.path_lb = QLabel(self)
self.path_lb.setObjectName('path_lb')
self.path_lb.setFont(QFont("Microsoft YaHei"))
self.path_lb.setText('')
v_layout.addWidget(self.path_lb)
self.setLayout(v_layout)
self.items = list()
self.bom_name = str()
self.find_btn.setEnabled(False)
self.generateBOM_Btn.setEnabled(False)
@pyqtSlot()
def load(self):
d, t = QFileDialog.getOpenFileName(self, 'Open', './', 'Excel(*.xls *.xlsx)')
print(d)
if d.find('Part Bill of Materials (Markup)') > 0:
self.path_lb.setText(d)
self.bom_name = re.findall('assy (.*) .', d)[0]
# print(self.bom_name)
self.table.clearContents()
self.items.clear()
self.filter(d)
if not self.generateBOM_Btn.isEnabled():
self.generateBOM_Btn.setEnabled(True)
if not self.find_btn.isEnabled():
self.find_btn.setEnabled(True)
else:
QMessageBox.warning(self, 'Warning', 'Please load a PDX BOM!')
@pyqtSlot()
def findDiff(self):
d, t = QFileDialog.getOpenFileName(self, 'Open', './', 'Excel(*.xls *.xlsx)')
print(d)
if d.find('Part Bill of Materials (Markup)') > 0:
if not self.reviewBoard.isVisible():
self.reviewBoard.show()
self.reviewBoard.findPDXDiff(d, self.items)
else:
self.reviewBoard.close()
self.reviewBoard.show()
self.reviewBoard.createBOM(d, self.items)
else:
QMessageBox.warning(self, 'Warning', 'Please load a PDX BOM!!')
@pyqtSlot()
def generateBOM(self):
# self.hide()
if not self.viewer.isVisible():
self.viewer.show()
self.viewer.createBOM(self.bom_name, self.items)
else:
self.viewer.close()
# print('close viewer')
self.viewer.show()
self.viewer.createBOM(self.bom_name, self.items)
def filter(self, d):
book = xlrd.open_workbook(d)
sheets = book.sheet_by_index(0)
rows = sheets.nrows
self.table.setRowCount(rows - 6)
for i in range(7, rows):
try:
item = {'PN': sheets.row_values(i)[5], 'Desc': sheets.row_values(i)[7],
'Qty': str(sheets.row_values(i)[9])}
# print(item)
self.items.append(item)
except Exception as e:
print(e)
book.release_resources()
del book
self.fillTable(self.items)
def fillTable(self, items):
i = 0
err = 0
for item in items:
# print(item)
# newitem = QTableWidgetItem(item['PN'])
self.table.setItem(i, 0, QTableWidgetItem(item['PN']))
# print(item['Desc'])
des = re.findall('(.*)\nRef', item['Desc'])
# print(des)
if des:
# newitem = QTableWidgetItem(des[0])
self.table.setItem(i, 1, QTableWidgetItem(des[0]))
else:
# newitem = QTableWidgetItem(item['Desc'])
self.table.setItem(i, 1, QTableWidgetItem(item['Desc']))
# num = re.findall('(\d).\d', item['Qty'])
if item['Qty']:
num = float(item['Qty'])
# print(num)
if num > 0:
# print('number')
newitem = QTableWidgetItem(str(num))
newitem.setTextAlignment(Qt.AlignCenter)
self.table.setItem(i, 3, newitem)
newitem = QTableWidgetItem('√')
newitem.setTextAlignment(Qt.AlignCenter)
newitem.setBackground(QBrush(QColor(0, 255, 0)))
self.table.setItem(i, 4, newitem)
elif num == 0:
newitem = QTableWidgetItem(str(num))
newitem.setTextAlignment(Qt.AlignCenter)
self.table.setItem(i, 3, newitem)
newitem = QTableWidgetItem('!')
newitem.setTextAlignment(Qt.AlignCenter)
newitem.setBackground(QBrush(QColor(255, 255, 0)))
self.table.setItem(i, 4, newitem)
else:
newitem = QTableWidgetItem(item['Qty'])
newitem.setTextAlignment(Qt.AlignCenter)
self.table.setItem(i, 3, newitem)
newitem = QTableWidgetItem('!')
newitem.setTextAlignment(Qt.AlignCenter)
newitem.setBackground(QBrush(QColor(255, 255, 0)))
self.table.setItem(i, 4, newitem)
# Retrieve Locations and compare with quantity
string = re.findall('RefDes:(.*)', item['Desc'])
# print(string)
if string:
locations = string[0]
lo_num = len(locations.split(','))
# print(lo_num)
# newitem = QTableWidgetItem(locations)
self.table.setItem(i, 2, QTableWidgetItem(locations))
num = float(item['Qty'])
if lo_num == int(num):
# print('Y')
newitem = QTableWidgetItem('√')
newitem.setBackground(QBrush(QColor(0, 255, 0)))
newitem.setTextAlignment(Qt.AlignCenter)
self.table.setItem(i, 4, newitem)
else:
newitem = QTableWidgetItem('×')
newitem.setTextAlignment(Qt.AlignCenter)
newitem.setBackground(QBrush(QColor(255, 0, 0)))
self.table.setItem(i, 4, newitem)
err = err + 1
# self.table.item(1, 4).setForeground(QBrush(QColor(0,255,0)))
i = i + 1
QMessageBox.information(self, "Checked Result", "Find " + str(err) + ' quantity error')
class BOMViewer(QWidget):
def __init__(self):
super(BOMViewer, self).__init__()
self.setWindowTitle('BOM Viewer Author: Daniel Gee')
self.setGeometry(410, 410, 1200, 640)
v_layout = QVBoxLayout(self)
h_layout = QHBoxLayout(self)
self.bom_lb = QLabel(self)
self.bom_lb.setObjectName('bom_lb')
self.bom_lb.setFont(QFont("Microsoft YaHei"))
self.bom_lb.setAlignment(Qt.AlignCenter)
self.bom_lb.setText('')
v_layout.addWidget(self.bom_lb)
self.save_btn = QPushButton(self)
self.save_btn.setObjectName('save_btn')
self.save_btn.setFont(QFont("Microsoft YaHei"))
self.save_btn.setText('Save to Excel')
self.save_btn.clicked.connect(self.createExcel)
h_layout.addWidget(self.save_btn)
self.findDiff_btn = QPushButton(self)
self.findDiff_btn.setObjectName('findDiff_btn')
self.findDiff_btn.setFont(QFont("Microsoft YaHei"))
self.findDiff_btn.setText('Find Difference')
self.findDiff_btn.clicked.connect(self.findBOMDiff)
h_layout.addWidget(self.findDiff_btn)
v_layout.addLayout(h_layout)
self.table = QTableWidget(self)
self.table.setColumnCount(4)
self.table.setRowCount(100)
self.table.setHorizontalHeaderLabels(['Part number', 'Description', 'Location', 'Qty'])
# Description column
self.table.setColumnWidth(1, 500)
# Location column
self.table.setColumnWidth(2, 300)
# Qty column
self.table.setColumnWidth(3, 90)
self.table.setFont(QFont("Microsoft YaHei"))
v_layout.addWidget(self.table)
#
self.setLayout(v_layout)
self.reviewBoard = ReviewBoard()
self.BOM = list()
self.insertions = list()
self.SMTs = list()
self.b_name = str()
@pyqtSlot()
def findBOMDiff(self):
d, t = QFileDialog.getOpenFileName(self, 'Open', './', 'Excel(*.xls *.xlsx)')
d = d.upper()
if d.find('LOCATION') > 0:
print(d.find('LOCATION'))
if not self.reviewBoard.isVisible():
self.reviewBoard.show()
self.reviewBoard.findBOMDiff(d, self.BOM)
else:
self.reviewBoard.close()
self.reviewBoard.show()
self.reviewBoard.findBOMDiff(d, self.BOM)
else:
QMessageBox.warning(self, 'Warning', 'File name must include <Location>!')
def createBOM(self, name, items):
i = 0
misc = list()
if name is not None:
self.b_name = name
self.bom_lb.setText('Location BOM: ' + name)
self.table.clearContents()
# initial displaying rows, roughly
self.table.setRowCount(len(items))
for item in items:
if re.findall('(.*)\nRef', item['Desc']):
self.table.setItem(i, 0, QTableWidgetItem(item['PN']))
# Retrieve Description
self.table.setItem(i, 1, QTableWidgetItem(re.findall('(.*)\nRef', item['Desc'])[0]))
# Retrieve Lccation
self.table.setItem(i, 2, QTableWidgetItem(re.findall('RefDes:(.*)', item['Desc'])[0]))
# Retrieve quantity
qty = len(re.findall('RefDes:(.*)', item['Desc'])[0].split(','))
t = QTableWidgetItem(str(qty))
t.setTextAlignment(Qt.AlignCenter)
self.table.setItem(i, 3, t)
# save to a list
item = {'PN': item['PN'], 'Desc': re.findall('(.*)\nRef', item['Desc'])[0],
'Location': re.findall('RefDes:(.*)', item['Desc'])[0].strip(),
'Qty': qty}
self.BOM.append(item)
i = i + 1
if re.findall('^PCB,|^FW(.*)', item['Desc']):
# print(item)
self.table.setItem(i, 0, QTableWidgetItem(item['PN']))
self.table.setItem(i, 1, QTableWidgetItem(item['Desc']))
t = QTableWidgetItem(re.findall('(.*).000', item['Qty'])[0])
t.setTextAlignment(Qt.AlignCenter)
self.table.setItem(i, 3, t)
item = {'PN': item['PN'], 'Desc': item['Desc'], 'Qty': re.findall('(.*).000', item['Qty'])[0]}
i = i + 1
misc.append(item)
# Change to actual displaying rows.
self.table.setRowCount(i)
self.BOM = self.BOM + misc
# Retrieve insertion parts and SMT parts
for b in self.BOM:
p = re.findall('S|s(.*)', b['PN'])
if p:
d = {'PN': b['PN'], 'Desc': b['Desc'], 'Location': b['Location'], 'Qty': b['Qty']}
self.SMTs.append(d)
else:
# print(b)
if not re.findall('^PCB,|^FW(.*)', b['Desc']):
d = {'PN': b['PN'], 'Desc': b['Desc'], 'Location': b['Location'], 'Qty': b['Qty']}
else:
# print(b)
d = {'PN': b['PN'], 'Desc': b['Desc'], 'Qty': b['Qty']}
self.insertions.append(d)
@pyqtSlot()
def createExcel(self):
if self.BOM:
# print(self.BOM)
f, t = QFileDialog.getSaveFileName(self, 'Save', '/', 'Excel(*.xls)')
if f:
wb = xlwt.Workbook(encoding='utf-8')
sheet = wb.add_sheet('Location BOM')
style = xlwt.XFStyle()
align1 = xlwt.Alignment()
# Horizontal center
align1.horz = xlwt.Alignment.HORZ_CENTER
align1.wrap = xlwt.Alignment.WRAP_AT_RIGHT
# Vertical center
align1.vert = xlwt.Alignment.VERT_CENTER
style.alignment = align1
border = xlwt.Borders()
border.left = xlwt.Borders.THIN
border.right = xlwt.Borders.THIN
border.top = xlwt.Borders.THIN
border.bottom = xlwt.Borders.THIN
style.borders = border
font = xlwt.Font()
font.name = 'Microsoft YaHei'
font.bold = True
style.font = font
# Location BOM title
sheet.write_merge(0, 1, 0, 4, 'Location BOM', style)
sheet.write_merge(2, 2, 0, 1, 'PCBA part number:', style)
sheet.write(2, 2, self.b_name.split(' ')[0], style)
sheet.write_merge(3, 3, 0, 1, 'PCBA Description:', style)
sheet.write(3, 2, 'PCBA ASSY. of ' + self.b_name.split(' ')[0], style)
sheet.write_merge(2, 3, 3, 4, 'Rev' + self.b_name.split(' ')[-1], style)
sheet.write_merge(4, 4, 0, 4, '', style)
# Location BOM header
sheet.write(5, 0, 'Index', style)
sheet.col(1).width = 256 * 15
sheet.write(5, 1, 'Part number', style)
sheet.col(2).width = 256 * 60
sheet.write(5, 2, 'Description', style)
sheet.col(3).width = 256 * 30
sheet.write(5, 3, 'Location', style)
sheet.col(4).width = 256 * 6
sheet.write(5, 4, 'Qty', style)
# setup cell style
align2 = xlwt.Alignment()
align2.horz = xlwt.Alignment.HORZ_LEFT
align2.wrap = xlwt.Alignment.WRAP_AT_RIGHT
# Vertical center
align2.vert = xlwt.Alignment.VERT_CENTER
style.alignment = align2
c_font = xlwt.Font()
c_font.name = 'Microsoft YaHei'
c_font.bold = False
style.font = c_font
# fill with insertion parts
i = 6
sheet.write_merge(i, i, 0, 4, 'Insertion Parts', style)
# i = i + 1
k = 1
for p in self.insertions:
try:
sheet.write(i + k, 0, k, style)
sheet.write(i + k, 1, p['PN'], style)
sheet.write(i + k, 2, p['Desc'], style)
sheet.write(i + k, 4, p['Qty'], style)
if not re.findall('PCB,(.*)', p['Desc']):
sheet.write(i + k, 3, p['Location'], style)
except Exception as e:
print('Exception in writing to Excel: ', e)
k = k + 1
# fill with SMT parts
sheet.write_merge(i + k, i + k, 0, 4, 'SMT Parts', style)
i = i + k
k = 1
for p in self.SMTs:
try:
sheet.write(i + k, 0, k, style)
sheet.write(i + k, 1, p['PN'], style)
sheet.write(i + k, 2, p['Desc'], style)
sheet.write(i + k, 3, p['Location'], style)
sheet.write(i + k, 4, p['Qty'], style)
except Exception as e:
print('Exception in writing to Excel: ', e)
k = k + 1
wb.save(f)
class ReviewBoard(QTableWidget):
def __init__(self):
super(ReviewBoard, self).__init__()
self.setWindowTitle('Review Board Author: Daniel Gee')
self.setGeometry(400, 400, 1200, 480)
# self.setRowCount(10)
self.setColumnCount(4)
self.setFont(QFont("Microsoft YaHei"))
self.setHorizontalHeaderLabels(['Part number', 'Current content', 'Referred content', 'Comments'])
# Current content column
self.setColumnWidth(1, 600)
# Referred content column
self.setColumnWidth(2, 600)
# Comment column
self.setColumnWidth(3, 200)
# Disable edit
# self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.BOM = list()
# find difference in PDX BOM
def findPDXDiff(self, path, cur_pdxbom):
self.BOM = list()
self.clearContents()
if len(cur_pdxbom) and path:
rb = xlrd.open_workbook(path)
sheets = rb.sheet_by_index(0)
rows = sheets.nrows
# set a initial row count
self.setRowCount(200)
for i in range(7, rows):
try:
item = {'PN': sheets.row_values(i)[5], 'Desc': sheets.row_values(i)[7],
'Qty': sheets.row_values(i)[9]}
# print(item)
self.BOM.append(item)
except Exception as e:
print(e)
rb.release_resources()
del rb
i = 0
f = False
p_err = list()
d_err = list()
q_err = list()
err = 0
for p in self.BOM:
for d in cur_pdxbom:
if p['PN'] == d['PN']:
f = True
break
if f:
f = False
if p['Desc'] == d['Desc']:
if float(p['Qty']) == float(d['Qty']):
i = i + 1
else:
print(p['Qty'], '\n', d['Qty'])
item = {'PN': p['PN'], 'cur': p['Desc'] + ' --> ' + str(float(p['Qty'])),
'ref': d['Desc'] + ' --> ' + str(float(d['Qty']))}
q_err.append(item)
err = err + 1
else:
print(p['Desc'], '\n', d['Desc'])
item = {'PN': p['PN'], 'cur': p['Desc'], 'ref': d['Desc']}
d_err.append(item)
err = err + 1
if float(p['Qty']) != float(d['Qty']):
print(p['Qty'], '\n', d['Qty'])
item = {'PN': p['PN'], 'cur': p['Desc'] + ' --> ' + str(float(p['Qty'])),
'ref': d['Desc'] + ' --> ' + str(float(d['Qty']))}
q_err.append(item)
err = err + 1
else:
print(p['PN'], '\n', d['PN'])
item = {'PN': p['PN'], 'cur': p['PN'], 'ref': 'No matched part number'}
p_err.append(item)
err = err + 1
print('showing difference')
i = 0
if len(p_err):
print('Part number Error:', len(p_err))
for p in p_err:
m = QTableWidgetItem(p['PN'])
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 0, m)
m = QTableWidgetItem(p['cur'])
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 1, m)
m = QTableWidgetItem(p['ref'])
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 2, m)
m = QTableWidgetItem('Part number difference')
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 3, m)
i = i + 1
if len(d_err):
print('Description Error:', len(d_err))
for p in d_err:
m = QTableWidgetItem(p['PN'])
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 0, m)
m = QTableWidgetItem(p['cur'])
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 1, m)
m = QTableWidgetItem(p['ref'])
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 2, m)
m = QTableWidgetItem('Description difference')
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 3, m)
i = i + 1
if len(q_err):
print('Quantity Error:', len(q_err))
for p in q_err:
m = QTableWidgetItem(p['PN'])
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 0, m)
m = QTableWidgetItem(p['cur'])
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 1, m)
m = QTableWidgetItem(p['ref'])
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 2, m)
m = QTableWidgetItem('Quantity difference')
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 3, m)
i = i + 1
self.setRowCount(err)
QMessageBox.warning(self, "Result", "Find " + str(err) + ' Differnce')
else:
QMessageBox.warning(self, "Warning", 'Please load a PDX BOM firstly')
# find difference in Location BOM
def findBOMDiff(self, path, cur_locationbom):
self.BOM = list()
if len(cur_locationbom):
self.clearContents()
rb = xlrd.open_workbook(path)
sheet = rb.sheet_by_index(0)
rows = sheet.nrows
self.setRowCount(200)
for i in range(rows):
string = sheet.row_values(i)
k = 0
for s in string:
if isinstance(s, str):
pn = re.findall('\d\d\d\d-\d\d\d\d', s)
if len(pn):
if len(string[k + 1]):
item = {'PN': string[k], 'Desc': string[k + 1], 'Location': string[k + 3],
'Qty': str(string[k + 2])}
self.BOM.append(item)
k = k + 1
p_err = list()
d_err = list()
q_err = list()
err = 0
f = False
for p in cur_locationbom:
for d in self.BOM:
if p['PN'] == d['PN']:
f = True
break
if f:
f = False
if p['Location'].replace(' ', '') == d['Location'].replace(' ', ''):
if float(p['Qty']) == float(d['Qty']):
i = i + 1
else:
print(p['Qty'], '\n', d['Qty'])
item = {'PN': p['PN'], 'cur': p['Location'] + ' --> ' + str(float(p['Qty'])),
'ref': d['Location'] + ' --> ' + str(float(d['Qty']))}
q_err.append(item)
err = err + 1
else:
print(p['Location'], '\n', d['Location'])
item = {'PN': p['PN'], 'cur': p['Location'], 'ref': d['Location']}
d_err.append(item)
err = err + 1
else:
print(p['PN'], '\n', d['PN'])
item = {'PN': p['PN'], 'cur': p['PN'], 'ref': 'No matched part number'}
p_err.append(item)
err = err + 1
i = 0
if len(p_err):
print('Part number Error:', len(p_err))
for p in p_err:
m = QTableWidgetItem(p['PN'])
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 0, m)
m = QTableWidgetItem(p['cur'])
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 1, m)
m = QTableWidgetItem(p['ref'])
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 2, m)
m = QTableWidgetItem('Part number difference')
m.setBackground(QBrush(QColor(255, 250, 205)))
self.setItem(i, 3, m)
i = i + 1
if len(d_err):
print('Description Error:', len(d_err))
for p in d_err:
m = QTableWidgetItem(p['PN'])
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 0, m)
m = QTableWidgetItem(p['cur'])
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 1, m)
m = QTableWidgetItem(p['ref'])
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 2, m)
m = QTableWidgetItem('Location difference')
m.setBackground(QBrush(QColor(0, 255, 255)))
self.setItem(i, 3, m)
i = i + 1
if len(q_err):
print('Quantity Error:', len(q_err))
for p in q_err:
m = QTableWidgetItem(p['PN'])
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 0, m)
m = QTableWidgetItem(p['cur'])
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 1, m)
m = QTableWidgetItem(p['ref'])
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 2, m)
m = QTableWidgetItem('Quantity difference')
m.setBackground(QBrush(QColor(192, 255, 62)))
self.setItem(i, 3, m)
i = i + 1
self.setRowCount(err)
self.resizeColumnsToContents()
QMessageBox.warning(self, "Result", "Find " + str(err) + ' Differnce')
else:
QMessageBox.warning(self, "Warning", 'Please load a Location BOM firstly')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Application()
ex.show()
app.exit(app.exec())
# sys.exit(app.exec())
| [
"noreply@github.com"
] | BibinGee.noreply@github.com |
ed913d30c9e7ee293859af9c93aaa29e246aa60f | d65fe3e1472c17a614c6fbe5ec5d67d7939590ee | /votes/urls.py | 9cfece9330db525c452f89159d054084260d22ec | [] | no_license | jlfrancisco/django-projet-2021 | 75c3ade39f5895729bd2b4aef05de2f53cf66dfd | de58eeaf3de4595ea309aa88bbb201f1e336fb4a | refs/heads/main | 2023-03-13T22:07:17.032354 | 2021-03-05T11:29:18 | 2021-03-05T11:29:18 | 344,789,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
# /sondages/1/
path('<int:question_id>/', views.detail, name='detail'),
# /sondages/1/results
path('<int:question_id>/results/', views.results, name='results'),
# /sondages/1/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
"francisco.luc93@gmail.com"
] | francisco.luc93@gmail.com |
fd8b6fff1287a1e36704b8487c4510ebd0a623ae | c99665bc8514182330087aca6108b38406567d61 | /Src/set.py | b4aaf115a3e4048c3bc524dcee8b6d4850a42242 | [] | no_license | samayer12/tdd_python_set | 22b5c4924fca1fe454b4e3366c9097700ed66800 | 35c6f05216d7f90c9dd41fbce8c32414ec7ec8a7 | refs/heads/master | 2021-02-14T08:38:37.778310 | 2020-03-04T02:29:48 | 2020-03-04T02:29:48 | 244,789,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | class Set:
def __init__(self, size=None):
if size is None:
self.data = []
else:
self.data = [None] * size
def __findEmpty__(self):
if self.data[-1] is None:
return self.data.index(None)
def getSize(self):
return len(self.data)
def getCapacity(self):
return self.data.count(None)
def add(self, element):
if self.data.__eq__([]):
self.data = [element]
elif None in self.data:
self.data[self.__findEmpty__()] = element
elif element not in self.data:
self.data.append(element)
def remove(self, element):
try:
self.data.remove(element)
except ValueError:
# Element was not in list
pass
| [
"samayer12@gmail.com"
] | samayer12@gmail.com |
5cffd566fecbb6df3e3ebc61d26be8fa2c81aa4f | 8541fea1477cbd8b919d05dac1fb304ff3a07234 | /dockerinspect/dockerinspect.py | 435df0208c1aabd223c4203fedf40e6062d87309 | [] | no_license | agentreno/dockerinspect | fbac06d04f87f373900c180cc642070fbe92e127 | 85a0d7555b36de3f841ebee75e5742648c697637 | refs/heads/master | 2020-03-31T00:21:59.219999 | 2018-10-05T16:11:56 | 2018-10-05T16:11:56 | 151,736,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | import re
import requests
class RegistryClient:
def __init__(self, base_url):
self.base_url = base_url
def get_image(self, name, tag='latest'):
manifest = self.get_image_v2_manifest(name, tag)
layers = []
if 'layers' in manifest:
layers = [layer['digest'] for layer in manifest['layers']]
return DockerImage(name, tag, manifest, layers)
def get_image_v2_manifest(self, name, tag):
headers = {
'Accept': 'application/vnd.docker.distribution.manifest.v2+json'
}
manifest_url = self.base_url + 'v2/{}/manifests/{}'
manifest = None
try:
response = requests.get(
manifest_url.format(name, tag),
headers=headers
)
response.raise_for_status()
manifest = response.json()
except requests.exceptions.HTTPError as err:
if err.response.status_code != 401:
raise
# Handle 401 Unauthorized by using Www-Authenticate header to get a
# token from the auth service and retry
match = re.search(
'realm="(.+)",service="(.+)",scope="(.+)"',
err.response.headers['Www-Authenticate']
)
if not match:
raise
realm, service, scope = match.groups()
token = self.get_registry_token(realm, service, scope)
headers['Authorization'] = 'Bearer ' + token
response = requests.get(
manifest_url.format(name, tag),
headers=headers
)
response.raise_for_status()
manifest = response.json()
return manifest
def get_registry_token(self, auth_url_base, service, scope):
response = requests.get(
auth_url_base + '?scope={}&service={}'.format(scope, service)
)
response.raise_for_status()
return response.json()['token']
class DockerImage:
def __init__(self, name, tag, manifest, layers):
self.name = name
self.tag = tag
self.manifest = manifest
self.layers = layers
def is_base_of(self, image):
for i, _ in enumerate(self.layers):
if self.layers[i] != image.layers[i]:
return False
return True
| [
"karl.hopkinson-turrell@nearform.com"
] | karl.hopkinson-turrell@nearform.com |
760db46ba4eebc9fa0b8925f44ef611e0d8c3e64 | 4c939d09fa2c9ca407a70e2eea88ea5aa81ffa30 | /venv/bin/easy_install-3.7 | 23e1191d2951fccb4b0dd8c4d78cf08ac4b417bd | [] | no_license | gchun88/carbuddy | bff5b020c222a4a31f769a69070653cc6d26e442 | 15610701f186c8bed52dc783739af4e0e0bc0a45 | refs/heads/master | 2023-08-19T23:46:18.857572 | 2020-09-27T21:04:33 | 2020-09-27T21:04:33 | 277,402,633 | 0 | 2 | null | 2021-09-22T19:23:07 | 2020-07-05T23:50:26 | JavaScript | UTF-8 | Python | false | false | 276 | 7 | #!/home/tyler/PycharmProjects/carbuddy071920/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gchun88@gmail.com"
] | gchun88@gmail.com |
3b14bebd791e49b44c807127e1d8c83ab8667dd2 | bc441bb06b8948288f110af63feda4e798f30225 | /resource_package_tools_sdk/model/easy_flow/deploy_strategy_pb2.pyi | 96c1f94d9b9d719ffac232700204d7507eacb6ca | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,205 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from resource_package_tools_sdk.model.cmdb.cluster_info_pb2 import (
ClusterInfo as resource_package_tools_sdk___model___cmdb___cluster_info_pb2___ClusterInfo,
)
from resource_package_tools_sdk.model.easy_flow.deploy_target_pb2 import (
DeployTarget as resource_package_tools_sdk___model___easy_flow___deploy_target_pb2___DeployTarget,
)
from resource_package_tools_sdk.model.easy_flow.target_info_pb2 import (
TargetInfo as resource_package_tools_sdk___model___easy_flow___target_info_pb2___TargetInfo,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class DeployStrategy(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class App(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
name = ... # type: typing___Text
appId = ... # type: typing___Text
def __init__(self,
*,
name : typing___Optional[typing___Text] = None,
appId : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy.App: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy.App: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"appId",b"appId",u"name",b"name"]) -> None: ...
class BatchStrategy(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class AutoBatch(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
batchNum = ... # type: builtin___int
batchInterval = ... # type: builtin___int
failedStop = ... # type: builtin___bool
def __init__(self,
*,
batchNum : typing___Optional[builtin___int] = None,
batchInterval : typing___Optional[builtin___int] = None,
failedStop : typing___Optional[builtin___bool] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy.BatchStrategy.AutoBatch: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy.BatchStrategy.AutoBatch: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"batchInterval",b"batchInterval",u"batchNum",b"batchNum",u"failedStop",b"failedStop"]) -> None: ...
class ManualBatch(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Batches(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def targets(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___easy_flow___deploy_target_pb2___DeployTarget]: ...
def __init__(self,
*,
targets : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___easy_flow___deploy_target_pb2___DeployTarget]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy.BatchStrategy.ManualBatch.Batches: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy.BatchStrategy.ManualBatch.Batches: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"targets",b"targets"]) -> None: ...
batchNum = ... # type: builtin___int
batchInterval = ... # type: builtin___int
failedStop = ... # type: builtin___bool
@property
def batches(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[DeployStrategy.BatchStrategy.ManualBatch.Batches]: ...
def __init__(self,
*,
batches : typing___Optional[typing___Iterable[DeployStrategy.BatchStrategy.ManualBatch.Batches]] = None,
batchNum : typing___Optional[builtin___int] = None,
batchInterval : typing___Optional[builtin___int] = None,
failedStop : typing___Optional[builtin___bool] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy.BatchStrategy.ManualBatch: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy.BatchStrategy.ManualBatch: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"batchInterval",b"batchInterval",u"batchNum",b"batchNum",u"batches",b"batches",u"failedStop",b"failedStop"]) -> None: ...
type = ... # type: typing___Text
@property
def autoBatch(self) -> DeployStrategy.BatchStrategy.AutoBatch: ...
@property
def manualBatch(self) -> DeployStrategy.BatchStrategy.ManualBatch: ...
def __init__(self,
*,
autoBatch : typing___Optional[DeployStrategy.BatchStrategy.AutoBatch] = None,
manualBatch : typing___Optional[DeployStrategy.BatchStrategy.ManualBatch] = None,
type : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy.BatchStrategy: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy.BatchStrategy: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"autoBatch",b"autoBatch",u"manualBatch",b"manualBatch"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"autoBatch",b"autoBatch",u"manualBatch",b"manualBatch",u"type",b"type"]) -> None: ...
class PackageList(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
packageName = ... # type: typing___Text
targetVersion = ... # type: typing___Text
preStop = ... # type: builtin___bool
postRestart = ... # type: builtin___bool
autoStart = ... # type: builtin___bool
userCheck = ... # type: builtin___bool
fullUpdate = ... # type: builtin___bool
packageId = ... # type: typing___Text
installPath = ... # type: typing___Text
type = ... # type: builtin___int
platform = ... # type: typing___Text
@property
def cluster(self) -> resource_package_tools_sdk___model___cmdb___cluster_info_pb2___ClusterInfo: ...
def __init__(self,
*,
packageName : typing___Optional[typing___Text] = None,
cluster : typing___Optional[resource_package_tools_sdk___model___cmdb___cluster_info_pb2___ClusterInfo] = None,
targetVersion : typing___Optional[typing___Text] = None,
preStop : typing___Optional[builtin___bool] = None,
postRestart : typing___Optional[builtin___bool] = None,
autoStart : typing___Optional[builtin___bool] = None,
userCheck : typing___Optional[builtin___bool] = None,
fullUpdate : typing___Optional[builtin___bool] = None,
packageId : typing___Optional[typing___Text] = None,
installPath : typing___Optional[typing___Text] = None,
type : typing___Optional[builtin___int] = None,
platform : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy.PackageList: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy.PackageList: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"cluster",b"cluster"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"autoStart",b"autoStart",u"cluster",b"cluster",u"fullUpdate",b"fullUpdate",u"installPath",b"installPath",u"packageId",b"packageId",u"packageName",b"packageName",u"platform",b"platform",u"postRestart",b"postRestart",u"preStop",b"preStop",u"targetVersion",b"targetVersion",u"type",b"type",u"userCheck",b"userCheck"]) -> None: ...
class Status(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
outOfDate = ... # type: builtin___bool
def __init__(self,
*,
outOfDate : typing___Optional[builtin___bool] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy.Status: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy.Status: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"outOfDate",b"outOfDate"]) -> None: ...
id = ... # type: typing___Text
name = ... # type: typing___Text
apiVersion = ... # type: typing___Text
org = ... # type: builtin___int
type = ... # type: typing___Text
scope = ... # type: typing___Text
clusterEnvironment = ... # type: typing___Text
clusterType = ... # type: typing___Text
@property
def app(self) -> DeployStrategy.App: ...
@property
def batchStrategy(self) -> DeployStrategy.BatchStrategy: ...
@property
def clusters(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___cmdb___cluster_info_pb2___ClusterInfo]: ...
@property
def targetList(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_package_tools_sdk___model___easy_flow___target_info_pb2___TargetInfo]: ...
@property
def packageList(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[DeployStrategy.PackageList]: ...
@property
def status(self) -> DeployStrategy.Status: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
apiVersion : typing___Optional[typing___Text] = None,
org : typing___Optional[builtin___int] = None,
app : typing___Optional[DeployStrategy.App] = None,
type : typing___Optional[typing___Text] = None,
batchStrategy : typing___Optional[DeployStrategy.BatchStrategy] = None,
scope : typing___Optional[typing___Text] = None,
clusters : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___cmdb___cluster_info_pb2___ClusterInfo]] = None,
targetList : typing___Optional[typing___Iterable[resource_package_tools_sdk___model___easy_flow___target_info_pb2___TargetInfo]] = None,
clusterEnvironment : typing___Optional[typing___Text] = None,
clusterType : typing___Optional[typing___Text] = None,
packageList : typing___Optional[typing___Iterable[DeployStrategy.PackageList]] = None,
status : typing___Optional[DeployStrategy.Status] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> DeployStrategy: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> DeployStrategy: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"app",b"app",u"batchStrategy",b"batchStrategy",u"status",b"status"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"apiVersion",b"apiVersion",u"app",b"app",u"batchStrategy",b"batchStrategy",u"clusterEnvironment",b"clusterEnvironment",u"clusterType",b"clusterType",u"clusters",b"clusters",u"id",b"id",u"name",b"name",u"org",b"org",u"packageList",b"packageList",u"scope",b"scope",u"status",b"status",u"targetList",b"targetList",u"type",b"type"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
e928848f377e80e21711ceeba5084b285c91b67f | c5274f6a352d3310fd697824a716ef01f51ff8b1 | /picture_compare.py | 297a9f869ede8e346aa8db886ec85cdfac5a7f78 | [
"MIT"
] | permissive | westwind1027/LED_OCR | 601c028be30c39751b3fb49ab18ec60bec7b0e34 | 9c18d22ee4134af5ac771bd4426763e63d74f98f | refs/heads/master | 2023-03-18T21:27:54.206873 | 2018-11-20T08:47:21 | 2018-11-20T08:47:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | #-*- coding:utf-8 -*-
'''
进行图片两张图片相似度的比较
'''
import math
import operator
from functools import reduce
def image_contract(image_object1,image_object2):
'比较两张图片的像素分布'
h1 = image_object1.histogram()
h2 = image_object2.histogram()
result = math.sqrt(reduce(operator.add, list(map(lambda a, b: (a - b) ** 2, h1, h2))) / len(h1))
return result
| [
"noreply@github.com"
] | westwind1027.noreply@github.com |
53c1fe8ffffae46a2885398f58eefd18ef485e02 | e4330d9738ac86ff420755ac2c90cd4352910379 | /test_one/management/commands/cache_test.py | 1eb0f0b91bf0c3ccb0215e09fd7e9e9b49bc2b27 | [] | no_license | chaochaoge123/django_test | 4664e47d97dda72afccee50af034123e3b0e9d8e | d3e118719272f19a589744fa852218d0bfa8d869 | refs/heads/master | 2022-12-09T16:38:41.977143 | 2021-03-23T02:51:00 | 2021-03-23T02:51:00 | 224,540,789 | 0 | 0 | null | 2022-01-06T22:40:37 | 2019-11-28T00:38:39 | Python | UTF-8 | Python | false | false | 358 | py | #!/usr/bin/env python
# encoding: utf-8
import redis
from django.core.management.base import BaseCommand
from django.shortcuts import render,HttpResponse
from django.core.cache import cache
class Command(BaseCommand):
def handle(self, *args, **options):
self.add_cache()
@staticmethod
def add_cache():
print(cache.get("add"))
| [
"qqcqqc_123@163.com"
] | qqcqqc_123@163.com |
f31e694a406cc2de9d91346686692db2601eb8cf | 3ecbefe7bc2fcb92af308134fc218fe30742c360 | /controlled_data/apps.py | 59c8b0b83feb44cb451c4ad6a80d35dbe0b55900 | [] | no_license | ScheglovIvan/linkcoin_new | 7f37ee958ed295ced27aaa95600e0cadffc130e3 | 4f34de5bf21878fd924d10fdbf5f036f7ac13d96 | refs/heads/master | 2022-12-02T03:11:29.359807 | 2020-08-14T20:45:57 | 2020-08-14T20:45:57 | 287,620,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from django.apps import AppConfig
class ControlledDataConfig(AppConfig):
name = 'controlled_data'
| [
"vektor9966@gmail.com"
] | vektor9966@gmail.com |
2169e049f5fcb83977bb385d889204a73b5abaed | 5bb61144ae7aadddeb70224ef795e0384a9d3ec2 | /cvquery/api.py | f71af2875c7bb830263dfdf07b566450feaccad7 | [] | no_license | katarinamatic/flask-rest-api | c0591bf4d1909f261a5f1ba5175c0d3d7045e755 | ec81861f67fb9bcfc3d0095ef03f881b98c2e01a | refs/heads/master | 2021-06-16T08:03:59.549841 | 2016-12-06T09:10:28 | 2016-12-06T09:10:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,019 | py | from flask import jsonify, request, g
from flask_restful_swagger_2 import Resource, swagger
from flask_httpauth import HTTPBasicAuth
import json
from cvquery.models import User
from cvquery.models import Post
from cvquery.main import logger
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
user = User.query.filter_by(username = username).first()
if not user or user.password!=password:
return False
g.user = user
return True
class UserView(Resource):
@swagger.doc({
'tags': ['user'],
'description': 'Returns list of all users',
'responses': {
'200': {
'description': 'Users',
}
}
})
def get(self):
"""Returns list of all users"""
users = User.query.all()
dicts = []
for user in users:
dicts.append({'username' : user.username})
log_req('/users/', request)
return jsonify(dicts)
@swagger.doc({
'tags': ['user'],
'description': 'Creates a new user',
'produces':[
'application/json'
],
'responses': {
'200': {
'description': 'User',
}
},
})
def post(self):
"""Creates a new user"""
username = request.json.get('username')
password = request.json.get('password')
if username is None or password is None:
return jsonify({"error": "username or password blank"})
if User.query.filter_by(username = username).first() is not None:
return jsonify({"error": "username already exists"})
user = User(username, password)
user.save()
log_req('/users/', request)
return jsonify({"username": user.username, "success": True})
@swagger.doc({
'tags': ['user'],
'description': 'Deletes an user',
'produces':[
'application/json'
],
'responses': {
'200': {
'description': 'User',
}
},
})
@auth.login_required
def delete(self):
"""Deletes an user"""
usr_id = request.json.get('user_id')
success = False
if(g.user.user_id == usr_id):
User.query.filter_by(user_id = usr_id).first().delete()
success = True
log_req('/users/', request)
return jsonify({"user_id": usr_id, "success": success})
@swagger.doc({
'tags': ['user'],
'description': 'Updates an user',
'produces':[
'application/json'
],
'responses': {
'200': {
'description': 'user',
}
},
})
@auth.login_required
def put(self):
"""Updates an user"""
user_id = request.json.get('user_id')
new_username = request.json.get('new_username')
new_password = request.json.get('new_password')
success = False
user = User.query.filter_by(user_id = user_id).first()
if (g.user.user_id == user.user_id):
if new_username is not None:
user.username = new_username
if new_password is not None:
user.password = new_password
user.save()
success = True
log_req('/users/', request)
return jsonify({"user_id": user_id, "success": success})
class PostView(Resource):
@swagger.doc({
'tags': ['post'],
'description': 'Returns list of all posts',
'produces':[
'application/json'
],
'responses': {
'200': {
'description': 'Posts',
}
},
})
def get(self):
"""Returns list of all posts"""
posts = Post.query.all()
dicts = []
for post in posts:
dicts.append({'post' : post.post})
log_req('/posts/', request)
return jsonify(dicts)
@swagger.doc({
'tags': ['post'],
'description': 'Creates a new post',
'produces':[
'application/json'
],
'responses': {
'200': {
'description': 'post',
}
},
})
@auth.login_required
def post(self):
"""Creates a new post"""
post = request.json.get('post')
user_id = g.user.user_id
post = Post(user_id, post)
post.save()
log_req('/posts/', request)
return jsonify({"user_id": user_id, "success": True})
@swagger.doc({
'tags': ['post'],
'description': 'Delets a post',
'produces':[
'application/json'
],
'responses': {
'200': {
'description': 'post',
}
},
})
@auth.login_required
def delete(self):
"""Deletes a post"""
post_id = request.json.get('post_id')
success = False
post = Post.query.filter_by(post_id = post_id).first()
if (g.user.user_id == post.user_id):
post.delete()
success = True
log_req('/posts/', request)
return jsonify({"post_id": post_id, "success": success})
@swagger.doc({
'tags': ['post'],
'description': 'Updates a post',
'produces':[
'application/json'
],
'responses': {
'200': {
'description': 'post',
}
},
})
@auth.login_required
def put(self):
"""Updates a post"""
post_id = request.json.get('post_id')
new_text = request.json.get('post')
success = False
post = Post.query.filter_by(post_id = post_id).first()
if (g.user.user_id == post.user_id):
post.post = new_text
post.save()
success = True
log_req('/posts/', request)
return jsonify({"post_id": post_id, "success": success})
class UserPostsView(Resource):
@swagger.doc({
'tags': ['user posts'],
'description': 'Gets all posts of a user with username',
'produces':[
'application/json'
],
'parameters':[{
'name': 'username',
'in': 'path',
'type': 'string',
}],
'responses': {
'200': {
'description': 'User posts',
}
},
})
def get(self, username):
"""Gets all posts of a user with username"""
user = User.query.filter_by(username = username).first()
posts = Post.query.filter_by(user_id = user.user_id)
posts = [post.post for post in posts]
log_req('/user/'+user.username+'/posts', request)
return jsonify({"posts": posts, "success": True})
def log_req(path, req):
browser = req.user_agent.browser
logger.info('/users/' + str(browser))
| [
"katarina.matic@fer.hr"
] | katarina.matic@fer.hr |
443ad2b069ebe801ccdc2108f6045a11a4f817f6 | c2e93b806bf439136d7ff651c14601af405eddc5 | /play_input.py | 362bf3b9fe6bed561365d670f1af67ed564a0782 | [] | no_license | mehulchopradev/divya-python-core | 11bdd09072b81a7f4c46ee84170119655f9d7273 | 0d10fd5697686c3fb46ab1f9b42c0b7d2fb771b8 | refs/heads/master | 2020-08-23T20:35:12.946154 | 2019-11-05T03:00:07 | 2019-11-05T03:00:07 | 216,702,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | print('Program starts')
n = input('Enter n : ')
# Exception handling
try:
ii = int(n)
except ValueError:
print('Please enter integer value')
else:
# will execute when there is no exception raised in the corresponding try block
print('Odd') if ii % 2 else print('Even')
print('Program ends') | [
"Mehul.Chopra@avalara.com"
] | Mehul.Chopra@avalara.com |
02a563d3078255693593d470cdec2e68bd2c13e8 | 19b7d7130310546bf1afa0f71eb3d64850b096e3 | /cart_chen/print.py | f01397311b518f08485ab97f858da1f5c1d9e0cf | [] | no_license | ycj1905/Dawisdom_POS_system | b9e4c75df60f50b87e47618e4fde48ec00cde777 | 51f6cd39f0106aa0db59e0e33d52d8552df3fc89 | refs/heads/master | 2021-01-22T23:33:54.118031 | 2017-03-21T03:42:07 | 2017-03-21T03:42:07 | 85,653,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,430 | py | #!/usr/bin/env python
#coding: utf8 #this '#' is not comment
from serial import Serial
from struct import unpack
from time import sleep
#import io
#===========================================================#
# RASPBERRY PI (tested with Raspbian Jan 2012):
# - Ensure that ttyAMA0 is not used for serial console access:
# edit /boot/cmdline.txt (remove all name-value pairs containing
# ttyAMA0) and comment out last line in /etc/inittab.
# - Fix user permissions with "sudo usermod -a -G dialout pi"
# - Reboot
# - Ensure that the SERIALPORT setting is correct below
#
# BEAGLE BONE:
# Mux settings (Ängström 2012.05, also work on ubuntu 12.04):
# echo 1 > /sys/kernel/debug/omap_mux/spi0_sclk
# echo 1 > /sys/kernel/debug/omap_mux/spi0_d0
#===========================================================#
class ThermalPrinter(object):
"""
Thermal printing library that controls the "micro panel thermal printer" sold in
shops like Adafruit and Sparkfun (e.g. http://www.adafruit.com/products/597).
Mostly ported from Ladyada's Arduino library
(https://github.com/adafruit/Adafruit-Thermal-Printer-Library) to run on
BeagleBone and Raspberry Pi.
Currently handles printing image data and text, but the rest of the
built-in functionality like underlining and barcodes are trivial
to port to Python when needed.
If on BeagleBone or similar device, remember to set the mux settings
or change the UART you are using. See the beginning of this file for
default setup.
Thanks to Matt Richardson for the initial pointers on controlling the
device via Python.
@author: Lauri Kainulainen
"""
# default serial port for the Beagle Bone
#SERIALPORT = '/dev/ttyO2'
# this might work better on a Raspberry Pi
#SERIALPORT = '/dev/ttyAMA0'
SERIALPORT = '/dev/ttyUSB0'
BAUDRATE = 19200
TIMEOUT = 3
# pixels with more color value (average for multiple channels) are counted as white
# tweak this if your images appear too black or too white
black_threshold = 48
# pixels with less alpha than this are counted as white
alpha_threshold = 127
printer = None
_ESC = chr(27)
# These values (including printDensity and printBreaktime) are taken from
# lazyatom's Adafruit-Thermal-Library branch and seem to work nicely with bitmap
# images. Changes here can cause symptoms like images printing out as random text.
# Play freely, but remember the working values.
# https://github.com/adafruit/Adafruit-Thermal-Printer-Library/blob/0cc508a9566240e5e5bac0fa28714722875cae69/Thermal.cpp
# Set "max heating dots", "heating time", "heating interval"
# n1 = 0-255 Max printing dots, Unit (8dots), Default: 7 (64 dots)
# n2 = 3-255 Heating time, Unit (10us), Default: 80 (800us)
# n3 = 0-255 Heating interval, Unit (10us), Default: 2 (20us)
# The more max heating dots, the more peak current will cost
# when printing, the faster printing speed. The max heating
# dots is 8*(n1+1). The more heating time, the more density,
# but the slower printing speed. If heating time is too short,
# blank page may occur. The more heating interval, the more
# clear, but the slower printing speed.
def __init__(self, heatTime=80, heatInterval=2, heatingDots=7, serialport=SERIALPORT):
self.printer = Serial(serialport, self.BAUDRATE, timeout=self.TIMEOUT)
self.printer.write(self._ESC) # ESC - command
self.printer.write(chr(64)) # @ - initialize
self.printer.write(self._ESC) # ESC - command
self.printer.write(chr(55)) # 7 - print settings
self.printer.write(chr(heatingDots)) # Heating dots (20=balance of darkness vs no jams) default = 20
self.printer.write(chr(heatTime)) # heatTime Library default = 255 (max)
self.printer.write(chr(heatInterval)) # Heat interval (500 uS = slower, but darker) default = 250
# Description of print density from page 23 of the manual:
# DC2 # n Set printing density
# Decimal: 18 35 n
# D4..D0 of n is used to set the printing density. Density is 50% + 5% * n(D4-D0) printing density.
# D7..D5 of n is used to set the printing break time. Break time is n(D7-D5)*250us.
printDensity = 15 # 120% (? can go higher, text is darker but fuzzy)
printBreakTime = 15 # 500 uS
self.printer.write(chr(18))
self.printer.write(chr(35))
self.printer.write(chr((printDensity << 4) | printBreakTime))
self.printer.write(self._ESC) #add line1 ESC 9 n #CSN-A2 User manual p.29
self.printer.write(chr(57)) #add line2 ASCII '9' = chr(57)
self.printer.write(chr(1)) #add line3 ASCII 'n' =chr(1)='UTF-8'
#add line3 ASCII 'n' =chr(0)='GBK'
#add line3 ASCII 'n' =chr(3)='BIG5'
def offline(self):
# Take the printer offline. Print commands sent after this will be
# ignored until 'online' is called.
self.printer.write(self._ESC)
self.printer.write(chr(61))
self.printer.write(chr(0))
def online(self):
# Take the printer back online. Subsequent print commands will be obeyed.
self.printer.write(self._ESC)
self.printer.write(chr(61))
self.printer.write(chr(1))
def sleep(self):
# Put the printer into a low-energy state immediately.
self.sleep_after(1) # Can't be 0, that means 'don't sleep'
def sleep_after(self, seconds):
# Put the printer into a low-energy state after the given number
# of seconds.
if seconds:
sleep(seconds)
self.printer.write(self._ESC)
self.printer.write(chr(56))
self.printer.write(chr(seconds))
self.printer.write(chr(seconds >> 8))
def wake(self):
# Wake the printer from a low-energy state.
self.printer.write(chr(255))
sleep(0.05)
self.printer.write(self._ESC)
self.printer.write(chr(56))
self.printer.write(chr(0))
self.printer.write(chr(0))
def has_paper(self):
# Check the status of the paper using the printer's self reporting
# ability. SerialTX _must_ be connected!
status = -1
self.printer.write(self._ESC)
self.printer.write(chr(118))
self.printer.write(chr(0))
for i in range(0, 9):
if self.printer.inWaiting():
status = unpack('b', self.printer.read())[0]
break
sleep(0.01)
return not bool(status & 0b00000100)
def reset(self):
self.printer.write(self._ESC)
self.printer.write(chr(64))
def linefeed(self, number=1):
for _ in range(number):
self.printer.write(chr(10))
def justify(self, align="L"):
pos = 0
if align == "L":
pos = 0
elif align == "C":
pos = 1
elif align == "R":
pos = 2
self.printer.write(self._ESC)
self.printer.write(chr(97))
self.printer.write(chr(pos))
def bold(self, on=True):
self.printer.write(self._ESC)
self.printer.write(chr(69))
self.printer.write(chr(on))
def font_b(self, on=True):
self.printer.write(self._ESC)
self.printer.write(chr(33))
self.printer.write(chr(on))
def underline(self, on=True):
self.printer.write(self._ESC)
self.printer.write(chr(45))
self.printer.write(chr(on))
def inverse(self, on=True):
self.printer.write(chr(29))
self.printer.write(chr(66))
self.printer.write(chr(on))
def upsidedown(self, on=True):
self.printer.write(self._ESC)
self.printer.write(chr(123))
self.printer.write(chr(on))
def barcode_chr(self, msg):
self.printer.write(chr(29)) # Leave
self.printer.write(chr(72)) # Leave
self.printer.write(msg) # Print barcode # 1:Abovebarcode 2:Below 3:Both 0:Not printed
def barcode_height(self, msg):
self.printer.write(chr(29)) # Leave
self.printer.write(chr(104)) # Leave
self.printer.write(msg) # Value 1-255 Default 50
def barcode_height(self):
self.printer.write(chr(29)) # Leave
self.printer.write(chr(119)) # Leave
self.printer.write(chr(2)) # Value 2,3 Default 2
def barcode(self, msg):
""" Please read http://www.adafruit.com/datasheets/A2-user%20manual.pdf
for information on how to use barcodes. """
# CODE SYSTEM, NUMBER OF CHARACTERS
# 65=UPC-A 11,12 #71=CODEBAR >1
# 66=UPC-E 11,12 #72=CODE93 >1
# 67=EAN13 12,13 #73=CODE128 >1
# 68=EAN8 7,8 #74=CODE11 >1
# 69=CODE39 >1 #75=MSI >1
# 70=I25 >1 EVEN NUMBER
self.printer.write(chr(29)) # LEAVE
self.printer.write(chr(107)) # LEAVE
self.printer.write(chr(65)) # USE ABOVE CHART
self.printer.write(chr(12)) # USE CHART NUMBER OF CHAR
self.printer.write(msg)
def print_text(self, msg, chars_per_line=None):
""" Print some text defined by msg. If chars_per_line is defined,
inserts newlines after the given amount. Use normal '\n' line breaks for
empty lines. """
if not chars_per_line:
self.printer.write(msg)
sleep(0.2)
else:
l = list(msg)
le = len(msg)
for i in xrange(chars_per_line + 1, le, chars_per_line + 1):
l.insert(i, '\n')
self.printer.write("".join(l))
sleep(0.2)
def print_markup(self, markup):
""" Print text with markup for styling.
Keyword arguments:
markup -- text with a left column of markup as follows:
first character denotes style (n=normal, b=bold, u=underline, i=inverse, f=font B)
second character denotes justification (l=left, c=centre, r=right)
third character must be a space, followed by the text of the line.
"""
lines = markup.splitlines(True)
for l in lines:
style = l[0]
justification = l[1].upper()
text = l[3:]
if style == 'b':
self.bold()
elif style == 'u':
self.underline()
elif style == 'i':
self.inverse()
elif style == 'f':
self.font_b()
self.justify(justification)
self.print_text(text)
if justification != 'L':
self.justify()
if style == 'b':
self.bold(False)
elif style == 'u':
self.underline(False)
elif style == 'i':
self.inverse(False)
elif style == 'f':
self.font_b(False)
def convert_pixel_array_to_binary(self, pixels, w, h):
""" Convert the pixel array into a black and white plain list of 1's and 0's
width is enforced to 384 and padded with white if needed. """
black_and_white_pixels = [1] * 384 * h
if w > 384:
print "Bitmap width too large: %s. Needs to be under 384" % w
return False
elif w < 384:
print "Bitmap under 384 (%s), padding the rest with white" % w
print "Bitmap size", w
if type(pixels[0]) == int: # single channel
print " => single channel"
for i, p in enumerate(pixels):
if p < self.black_threshold:
black_and_white_pixels[i % w + i / w * 384] = 0
else:
black_and_white_pixels[i % w + i / w * 384] = 1
elif type(pixels[0]) in (list, tuple) and len(pixels[0]) == 3: # RGB
print " => RGB channel"
for i, p in enumerate(pixels):
if sum(p[0:2]) / 3.0 < self.black_threshold:
black_and_white_pixels[i % w + i / w * 384] = 0
else:
black_and_white_pixels[i % w + i / w * 384] = 1
elif type(pixels[0]) in (list, tuple) and len(pixels[0]) == 4: # RGBA
print " => RGBA channel"
for i, p in enumerate(pixels):
if sum(p[0:2]) / 3.0 < self.black_threshold and p[3] > self.alpha_threshold:
black_and_white_pixels[i % w + i / w * 384] = 0
else:
black_and_white_pixels[i % w + i / w * 384] = 1
else:
print "Unsupported pixels array type. Please send plain list (single channel, RGB or RGBA)"
print "Type pixels[0]", type(pixels[0]), "haz", pixels[0]
return False
return black_and_white_pixels
def print_bitmap(self, pixels, w, h, output_png=False):
""" Best to use images that have a pixel width of 384 as this corresponds
to the printer row width.
pixels = a pixel array. RGBA, RGB, or one channel plain list of values (ranging from 0-255).
w = width of image
h = height of image
if "output_png" is set, prints an "print_bitmap_output.png" in the same folder using the same
thresholds as the actual printing commands. Useful for seeing if there are problems with the
original image (this requires PIL).
Example code with PIL:
import Image, ImageDraw
i = Image.open("lammas_grayscale-bw.png")
data = list(i.getdata())
w, h = i.size
p.print_bitmap(data, w, h)
"""
counter = 0
if output_png:
from PIL import Image, ImageDraw
test_img = Image.new('RGB', (384, h))
draw = ImageDraw.Draw(test_img)
self.linefeed()
black_and_white_pixels = self.convert_pixel_array_to_binary(pixels, w, h)
print_bytes = []
# read the bytes into an array
for rowStart in xrange(0, h, 256):
chunkHeight = 255 if (h - rowStart) > 255 else h - rowStart
print_bytes += (18, 42, chunkHeight, 48)
for i in xrange(0, 48 * chunkHeight):
# read one byte in
byt = 0
for xx in xrange(8):
pixel_value = black_and_white_pixels[counter]
counter += 1
# check if this is black
if pixel_value == 0:
byt += 1 << (7 - xx)
if output_png: draw.point((counter % 384, round(counter / 384)), fill=(0, 0, 0))
# it's white
else:
if output_png: draw.point((counter % 384, round(counter / 384)), fill=(255, 255, 255))
print_bytes.append(byt)
# output the array all at once to the printer
# might be better to send while printing when dealing with
# very large arrays...
for b in print_bytes:
self.printer.write(chr(b))
if output_png:
test_print = open('print-output.png', 'wb')
test_img.save(test_print, 'PNG')
print "output saved to %s" % test_print.name
test_print.close()
if __name__ == '__main__':
# main(sys.argv[1:])
import sys, os
serialport = ThermalPrinter.SERIALPORT
if not os.path.exists(serialport):
sys.exit("ERROR: Serial port not found at: %s" % serialport)
print "Testing printer on port %s" % serialport
p = ThermalPrinter(serialport=serialport)
#!!!!!!!!!!!!!!!!!!! start of printing text
#file = open('2.txt', 'r', encoding='UTF-8')
#file = io.open(name, 'r', encoding ='Big5')
#file = open('1.txt', 'r')
#file = open('2_JPN.txt', 'r')
#file = open('2_GBK.txt', 'r')
#name = raw_input('請輸入檔名:')
#print(name)
#file = open(name, 'r')
inputfile = str(sys.argv[1])
print 'Input file is "', inputfile
file = open(inputfile, 'r')
content = file.read()
print(content)
p.print_text(content)
file.close()
#!!!!!!!!!!!!!!!!!!! end of printing text
| [
"ycj1665@gmail.com"
] | ycj1665@gmail.com |
c62b0598e0d1006e434b0e847282b0f5ffeb7668 | d5c0aebecd596056be4073c2a6bb7ca7973dbe66 | /student-work/cassandradelieto/Extracting_CSVS/day_1/working_with_csvs/comprehensions.py | 1baa7ff57abe6e193ce12a258d28b225b43a8fad | [] | no_license | hackoregon/civicu-pythonii-summer-2017 | 00b1bf94ae78080fffbb18d5bd9d70f6e37c8027 | ac91a2ce0422d4e7a1664a71436c399fa103d0e6 | refs/heads/master | 2020-12-02T07:45:15.552888 | 2017-09-12T22:28:47 | 2017-09-12T22:28:47 | 96,719,417 | 1 | 21 | null | 2017-09-12T22:28:48 | 2017-07-10T00:18:53 | PLpgSQL | UTF-8 | Python | false | false | 71 | py | #please add any of the toy examples you build with comprehensions here
| [
"cassandradelieto@gmail.com"
] | cassandradelieto@gmail.com |
53dc441fe12b89377f02f7e79ecae2df2cb8dcbc | f627b0b3a534fc724e474d233a4914309ad88d27 | /tests/test_fractional_discount.py | 8efa6a39f6a47ba74e1fefdd1b64bd9aa9c00fdd | [
"BSD-2-Clause"
] | permissive | hunungare/prices | 0212356c275b7187d4b0bb29e78ff96c74b6e59e | e56c162aa254f17ab3fa50b718d74e491a90b913 | refs/heads/master | 2021-07-12T20:37:43.306170 | 2017-09-28T13:22:40 | 2017-09-28T13:22:40 | 107,232,165 | 0 | 0 | null | 2017-10-17T07:20:41 | 2017-10-17T07:20:40 | null | UTF-8 | Python | false | false | 742 | py | from prices import Amount, FractionalDiscount, Price, PriceRange
def test_discount():
price = Price(Amount(100, 'BTC'), Amount(100, 'BTC'))
discount = FractionalDiscount(factor='0.25')
result = discount.apply(price)
assert result.net == Amount(75, 'BTC')
assert result.gross == Amount(75, 'BTC')
price_range = PriceRange(price, price)
result = discount.apply(price_range)
assert result.min_price == Price(Amount(75, 'BTC'), Amount(75, 'BTC'))
assert result.max_price == Price(Amount(75, 'BTC'), Amount(75, 'BTC'))
def test_repr():
discount = FractionalDiscount(factor='0.25', name='Test discount')
assert repr(discount) == (
"FractionalDiscount(Decimal('0.25'), name='Test discount')")
| [
"maarcin.gebala@gmail.com"
] | maarcin.gebala@gmail.com |
6b152beccb5eaa5fe80526c70aa33082e6c766ef | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays4CstmVapO2_px_N2.py | 2fd80c1ca4fc8994a818eb65c208cb1c144cf3b0 | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | def VapO2_px_N2(P,T,x_N2):
x = (P-5.62017561e+02)/2.47804900e-01
y = (T--1.74950614e+02)/6.71933000e-02
z = (x_N2-7.23608844e-01)/7.27108322e-03
output = \
1*1.91797051e+00
y_O2 = output*1.00000000e+00+0.00000000e+00
return y_O2 | [
"1052632241@qq.com"
] | 1052632241@qq.com |
a03162d0eb6c680d37653ef1d8d1e9db152505ef | 1f68b15b8e2f230a557cba5188038d31bdac7ebf | /newproject/newproject/urls.py | 8de97da8689be56b9daaaf7eeb46f50b65875ba0 | [] | no_license | lissiyas/django1 | fd3e37962d3a09113585aff0c4fa2d387ea343ca | 04bea917c20d5d6c9db74589e7ec08f7efecdada | refs/heads/master | 2023-05-31T09:44:30.160377 | 2021-06-12T10:42:17 | 2021-06-12T10:42:17 | 286,578,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | """newproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', user_views.register, name ='register'),
path('profile/', user_views.profile, name ='profile'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name ='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'),name ='logout'),
path('', include('new.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"62198945+lissiyas@users.noreply.github.com"
] | 62198945+lissiyas@users.noreply.github.com |
0d23517bb0aff59d5f304377c079282338c4f2be | 73cc2ee526724211a3995e7e554913a46b4617cd | /qnet_learn.py | cb308121ccde755ea3dc566239dc2744c0a20ae1 | [] | no_license | elmeriniemela/ai_asteroids | 417b3f6a63f1485ab0e2582b303e99ecb2b487ed | 3b5c15b0b251b854fad0ea7436585e97645a45c6 | refs/heads/master | 2023-02-05T02:19:55.970784 | 2020-12-31T11:11:04 | 2020-12-31T11:11:04 | 325,563,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,782 | py | #!/usr/bin/env python3
import pygame
import asteroids
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
env = gym.make('CartPole-v0').unwrapped
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQN(nn.Module):
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
resize = T.Compose([T.ToPILImage(),
T.Resize(40, interpolation=Image.CUBIC),
T.ToTensor()])
def get_cart_location(screen_width):
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART
def get_screen():
# Returned screen requested by gym is 400x600x3, but is sometimes larger
# such as 800x1200x3. Transpose it into torch order (CHW).
screen = env.render(mode='rgb_array').transpose((2, 0, 1))
# Cart is in the lower half, so strip off the top and bottom of the screen
_, screen_height, screen_width = screen.shape
screen = screen[:, int(screen_height*0.4):int(screen_height * 0.8)]
view_width = int(screen_width * 0.6)
cart_location = get_cart_location(screen_width)
if cart_location < view_width // 2:
slice_range = slice(view_width)
elif cart_location > (screen_width - view_width // 2):
slice_range = slice(-view_width, None)
else:
slice_range = slice(cart_location - view_width // 2,
cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
screen = screen[:, :, slice_range]
# Convert to float, rescale, convert to torch tensor
# (this doesn't require a copy)
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
# Resize, and add a batch dimension (BCHW)
return resize(screen).unsqueeze(0).to(device)
env.reset()
plt.figure()
plt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(),
interpolation='none')
plt.title('Example extracted screen')
plt.show()
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
# Get screen size so that we can initialize layers correctly based on shape
# returned from AI gym. Typical dimensions at this point are close to 3x40x90
# which is the result of a clamped and down-scaled render buffer in get_screen()
init_screen = get_screen()
_, _, screen_height, screen_width = init_screen.shape
# Get number of actions from gym action space
n_actions = env.action_space.n
policy_net = DQN(screen_height, screen_width, n_actions).to(device)
target_net = DQN(screen_height, screen_width, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.RMSprop(policy_net.parameters())
memory = ReplayMemory(10000)
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
return policy_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)
episode_durations = []
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
def optimize_model():
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
num_episodes = 1000
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
last_screen = get_screen()
current_screen = get_screen()
state = current_screen - last_screen
for t in count():
# Select and perform an action
action = select_action(state)
_, reward, done, _ = env.step(action.item())
reward = torch.tensor([reward], device=device)
# Observe new state
last_screen = current_screen
current_screen = get_screen()
if not done:
next_state = current_screen - last_screen
else:
next_state = None
# Store the transition in memory
memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize_model()
if done:
episode_durations.append(t + 1)
plot_durations()
break
# Update the target network, copying all weights and biases in DQN
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
print('Complete')
env.render()
env.close()
plt.ioff()
plt.show() | [
"niemela.elmeri@gmail.com"
] | niemela.elmeri@gmail.com |
d91546939d0860222380a3748c0436104a8132f6 | ccf3e114ac5979fc0a4623f9c30d6280955a78a8 | /crawler/webapp/webapp/dockerstudy/models.py | 457b695f968f8ec22553e59603b2904c8349ccb6 | [
"Apache-2.0"
] | permissive | anonymousdummyauthors/ICSE2020 | 0b2f66a36be5efaf4a7a96ead0a9b7e196846f0a | 717267e3856fb9afd9487582be9c6105716f1ef8 | refs/heads/master | 2022-07-14T15:01:43.188984 | 2019-08-24T08:35:44 | 2019-08-24T08:35:44 | 203,857,992 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,117 | py | from django.db import models
from django.utils import timezone
from postgres_copy import CopyManager
# Create your models here.
class DockerImageName(models.Model):
original_id = models.IntegerField(blank=False, null=False, unique=False)
image_name = models.CharField(max_length=2048, blank=False, null=False)
objects = CopyManager()
def __unicode__(self):
return '{} - {}'.format(self.id, self.original_id)
def __str__(self):
return self.__unicode__()
def to_dict(self):
return {
'pk': self.id,
'original_id': self.original_id,
'image_name': self.image_name
}
class Page(models.Model):
page = models.IntegerField(blank=False, null=False, unique=True)
last_sent = models.DateTimeField(default=None, blank=True, null=True)
tag = models.TextField(blank=True, null=True)
objects = CopyManager()
def __unicode__(self):
return '{} - {}'.format(self.id, self.page)
def update_last_sent(self):
self.refresh_from_db()
self.last_sent = timezone.now()
self.save()
def __str__(self):
return self.__unicode__()
def to_dict(self):
return {
'page': self.page,
'last_sent': self.last_sent,
'tag': self.tag
}
class DockerImage(models.Model):
image_name = models.CharField(max_length=2048, blank=False, null=False)
source_repo_name = models.CharField(max_length=2048, blank=True, null=True)
last_sent = models.DateTimeField(default=None, blank=True, null=True)
reponame_task=models.BooleanField(default=False)
imageinfo_task=models.BooleanField(default=False)
latest_dockerfile = models.TextField(blank=True, null=True)
dockerfile_source = models.TextField(blank=True, null=True)
tags_count = models.IntegerField(blank=True, null=True)
tags_name = models.TextField(blank=True, null=True)
image_size = models.TextField(blank=True, null=True)
image_updated_at = models.TextField(default=None, blank=True, null=True)
image_pull_count = models.IntegerField(blank=True, null=True)
image_star_count = models.IntegerField(blank=True, null=True)
repo_commits_count = models.IntegerField(blank=True, null=True)
dockerfile_commit_sha = models.TextField(blank=True, null=True)
dockerfile_commit_date = models.TextField(blank=True, null=True)
dockerfile_commit_message = models.TextField(blank=True, null=True)
language = models.TextField(blank=True, null=True)
forks_count = models.IntegerField(blank=True, null=True)
stargazers_count = models.IntegerField(blank=True, null=True)
watchers_count = models.IntegerField(blank=True, null=True)
repo_size = models.IntegerField(blank=True, null=True)
default_branch = models.TextField(blank=True, null=True)
open_issues_count = models.IntegerField(blank=True, null=True)
topics = models.TextField(blank=True, null=True)
has_issues = models.TextField(blank=True, null=True)
has_projects = models.TextField(blank=True, null=True)
has_wiki = models.TextField(blank=True, null=True)
has_pages = models.TextField(blank=True, null=True)
has_downloads = models.TextField(blank=True, null=True)
archived = models.TextField(blank=True, null=True)
pushed_at = models.DateTimeField(default=None, blank=True, null=True)
created_at = models.DateTimeField(default=None, blank=True, null=True)
updated_at = models.DateTimeField(default=None, blank=True, null=True)
subscribers_count = models.IntegerField(blank=True, null=True)
network_count = models.IntegerField(blank=True, null=True)
license = models.TextField(blank=True, null=True)
image_description = models.TextField(blank=True, null=True)
def __unicode__(self):
return '{}'.format(self.id)
def update_last_sent(self):
self.refresh_from_db()
self.last_sent = timezone.now()
self.save()
def __str__(self):
return self.__unicode__()
def to_task_dict(self):
return {
'pk': self.id,
'image_name':self.image_name,
'source_repo_name':self.source_repo_name
}
def to_dict(self):
return {
'pk': self.id,
'image_name':self.image_name,
'source_repo_name':self.source_repo_name,
'last_sent':self.last_sent,
'reponame_task':self.reponame_task,
'imageinfo_task':self.imageinfo_task,
'latest_dockerfile':self.latest_dockerfile,
'dockerfile_source':self.dockerfile_source,
'tags_count':self.tags_count,
'tags_name':self.tags_name,
'image_size':self.image_size,
'image_updated_at':self.image_updated_at,
'image_pull_count':self.image_pull_count,
'image_star_count':self.image_star_count,
'repo_commits_count':self.repo_commits_count,
'dockerfile_commit_sha':self.dockerfile_commit_sha,
'dockerfile_commit_date':self.dockerfile_commit_date,
'dockerfile_commit_message':self.dockerfile_commit_message,
'language':self.language,
'forks_count':self.forks_count,
'stargazers_count':self.stargazers_count,
'watchers_count':self.watchers_count,
'repo_size':self.repo_size,
'default_branch':self.default_branch,
'open_issues_count':self.open_issues_count,
'topics':self.topics,
'has_issues':self.has_issues,
'has_projects':self.has_projects,
'has_wiki':self.has_wiki,
'has_pages':self.has_pages,
'has_downloads':self.has_downloads,
'archived':self.archived,
'pushed_at':self.pushed_at,
'created_at':self.created_at,
'updated_at':self.updated_at,
'subscribers_count':self.subscribers_count,
'network_count':self.network_count,
'license':self.license,
'image_description':self.image_description
} | [
"anonymousmailadd@gmail.com"
] | anonymousmailadd@gmail.com |
c88d7a52a584e7019408a043bd60b25a2db3c30d | 393c27c631889c3d9b30dfcb17e16de5dc79c759 | /op25/gr-op25_repeater/apps/tx/multi_tx.py | eaae4e29120a0235daf4d05f79cb704f7652a3c3 | [] | no_license | MattMills/op25 | 170d2ca71f4c0e5227795b77fbecd0358fd18d45 | f15126ff97f91f64fb38423f52a856950686ff54 | refs/heads/master | 2023-01-06T23:51:47.919736 | 2020-11-03T07:52:53 | 2020-11-03T07:52:53 | 66,678,446 | 2 | 0 | null | 2017-12-07T02:14:14 | 2016-08-26T20:49:23 | Python | UTF-8 | Python | false | false | 10,530 | py | #!/usr/bin/env python
#################################################################################
#
# Multiprotocol Digital Voice TX (C) Copyright 2017, 2018, 2019, 2020 Max H. Parke KA1RBI
#
# This file is part of OP25
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#################################################################################
"""
Transmit four simultaneous RF channels (dmr, p25, dstar, and ysf)
"""
import sys
import os
import math
from gnuradio import gr, gru, audio, eng_notation
from gnuradio import filter, blocks, analog, digital
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import osmosdr
import op25
import op25_repeater
from math import pi
from op25_c4fm_mod import p25_mod_bf
class pipeline(gr.hier_block2):
def __init__(self, protocol=None, config_file=None, mod_adjust=None, gain_adjust=None, output_gain=None, if_freq=0, if_rate=0, verbose=0, fullrate_mode=False, sample_rate=0, bt=0, alt_input=None):
gr.hier_block2.__init__(self, "dv_modulator",
gr.io_signature(1, 1, gr.sizeof_short), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
from dv_tx import RC_FILTER
if protocol == 'dmr':
assert config_file
ENCODER = op25_repeater.ambe_encoder_sb(verbose)
ENCODER2 = op25_repeater.ambe_encoder_sb(verbose)
ENCODER2.set_gain_adjust(gain_adjust)
DMR = op25_repeater.dmr_bs_tx_bb(verbose, config_file)
self.connect(self, ENCODER, (DMR, 0))
if not alt_input:
alt_input = self
self.connect(alt_input, ENCODER2, (DMR, 1))
elif protocol == 'dstar':
assert config_file
ENCODER = op25_repeater.dstar_tx_sb(verbose, config_file)
elif protocol == 'p25':
ENCODER = op25_repeater.vocoder(True, # 0=Decode,True=Encode
False, # Verbose flag
0, # flex amount
"", # udp ip address
0, # udp port
False) # dump raw u vectors
elif protocol == 'ysf':
assert config_file
ENCODER = op25_repeater.ysf_tx_sb(verbose, config_file, fullrate_mode)
elif protocol.startswith('nxdn'):
assert config_file
ENCODER = op25_repeater.nxdn_tx_sb(verbose, config_file, protocol == 'nxdn96')
ENCODER.set_gain_adjust(gain_adjust)
MOD = p25_mod_bf(output_sample_rate = sample_rate, dstar = (protocol == 'dstar'), bt = bt, rc = RC_FILTER[protocol])
AMP = blocks.multiply_const_ff(output_gain)
max_dev = 12.5e3
k = 2 * math.pi * max_dev / if_rate
FM_MOD = analog.frequency_modulator_fc (k * mod_adjust)
if protocol == 'dmr':
self.connect(DMR, MOD)
else:
self.connect(self, ENCODER, MOD)
INTERP = filter.rational_resampler_fff(if_rate // sample_rate, 1)
MIXER = blocks.multiply_cc()
LO = analog.sig_source_c(if_rate, analog.GR_SIN_WAVE, if_freq, 1.0, 0)
self.connect(MOD, AMP, INTERP, FM_MOD, (MIXER, 0))
self.connect(LO, (MIXER, 1))
self.connect(MIXER, self)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="", help="device args")
parser.add_option("-A", "--do-audio", action="store_true", default=False, help="live input audio")
parser.add_option("-b", "--bt", type="float", default=0.5, help="specify bt value")
parser.add_option("-f", "--file", type="string", default=None, help="specify the input file (mono 8000 sps S16_LE)")
parser.add_option("-g", "--gain", type="float", default=1.0, help="input gain")
parser.add_option("-i", "--if-rate", type="int", default=480000, help="output rate to sdr")
parser.add_option("-I", "--audio-input", type="string", default="", help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-N", "--gains", type="string", default=None, help="gain settings")
parser.add_option("-o", "--if-offset", type="float", default=100000, help="channel spacing (Hz)")
parser.add_option("-q", "--frequency-correction", type="float", default=0.0, help="ppm")
parser.add_option("-Q", "--frequency", type="float", default=0.0, help="Hz")
parser.add_option("-r", "--repeat", action="store_true", default=False, help="input file repeat")
parser.add_option("-R", "--fullrate-mode", action="store_true", default=False, help="ysf fullrate")
parser.add_option("-s", "--modulator-rate", type="int", default=48000, help="must be submultiple of IF rate")
parser.add_option("-S", "--alsa-rate", type="int", default=48000, help="sound source/sink sample rate")
parser.add_option("-v", "--verbose", type="int", default=0, help="additional output")
(options, args) = parser.parse_args()
assert options.file # input file name (-f filename) required
f1 = float(options.if_rate) / options.modulator_rate
i1 = int(options.if_rate / options.modulator_rate)
if f1 - i1 > 1e-3:
print ('*** Error, sdr rate %d not an integer multiple of modulator rate %d - ratio=%f' % (options.if_rate, options.modulator_rate, f1))
sys.exit(1)
protocols = 'nxdn48 dmr dstar ysf p25'.split()
start_freq = options.frequency
end_freq = options.frequency + options.if_offset * (len(protocols)-1)
tune_freq = (start_freq + end_freq) // 2
print ('start %d end %d center tune %d' % (start_freq, end_freq, tune_freq))
bw = options.if_offset * len(protocols) + 50000
if bw > options.if_rate:
print ('*** Error, a %d Hz band is required for %d channels and guardband.' % (bw, len(protocols)))
print ('*** Either reduce channel spacing using -o (current value is %d Hz),' % (options.if_offset) )
print ('*** or increase SDR output sample rate using -i (current rate is %d Hz)' % (options.if_rate) )
sys.exit(1)
max_inputs = 1
from dv_tx import output_gains, gain_adjust, gain_adjust_fullrate, mod_adjust
if options.do_audio:
AUDIO = audio.source(options.alsa_rate, options.audio_input)
lpf_taps = filter.firdes.low_pass(1.0, options.alsa_rate, 3400.0, 3400 * 0.1, filter.firdes.WIN_HANN)
audio_rate = 8000
AUDIO_DECIM = filter.fir_filter_fff (int(options.alsa_rate / audio_rate), lpf_taps)
AUDIO_SCALE = blocks.multiply_const_ff(32767.0 * options.gain)
AUDIO_F2S = blocks.float_to_short()
self.connect(AUDIO, AUDIO_DECIM, AUDIO_SCALE, AUDIO_F2S)
alt_input = AUDIO_F2S
else:
alt_input = None
SUM = blocks.add_cc()
input_repeat = True
for i in range(len(protocols)):
SOURCE = blocks.file_source(gr.sizeof_short, options.file, input_repeat)
protocol = protocols[i]
if (options.fullrate_mode and protocol == 'ysf') or protocol == 'p25':
gain_adj = gain_adjust_fullrate[protocols[i]]
else:
gain_adj = gain_adjust[protocols[i]]
if protocols[i] == 'dmr':
cfg = 'dmr-cfg.dat'
elif protocols[i] == 'ysf':
cfg = 'ysf-cfg.dat'
elif protocols[i] == 'dstar':
cfg = 'dstar-cfg.dat'
elif protocols[i].startswith('nxdn'):
cfg = 'nxdn-cfg.dat'
else:
cfg = None
this_freq = start_freq + i * options.if_offset
if_freq = this_freq - tune_freq
print ('%s\t%d\t%d\t%d' % (protocols[i], this_freq, tune_freq, if_freq))
CHANNEL = pipeline(
protocol = protocols[i],
output_gain = output_gains[protocols[i]],
gain_adjust = gain_adj,
mod_adjust = mod_adjust[protocols[i]],
if_freq = if_freq,
if_rate = options.if_rate,
sample_rate = options.modulator_rate,
bt = options.bt,
fullrate_mode = options.fullrate_mode,
alt_input = alt_input,
config_file = cfg)
self.connect(SOURCE, CHANNEL, (SUM, i))
self.u = osmosdr.sink (options.args)
AMP = blocks.multiply_const_cc(1.0 / float(len(protocols)))
self.setup_sdr_output(options, tune_freq)
self.connect(SUM, AMP, self.u)
def setup_sdr_output(self, options, tune_freq):
gain_names = self.u.get_gain_names()
for name in gain_names:
range = self.u.get_gain_range(name)
print ("gain: name: %s range: start %d stop %d step %d" % (name, range[0].start(), range[0].stop(), range[0].step()))
if options.gains:
for tuple in options.gains.split(","):
name, gain = tuple.split(":")
gain = int(gain)
print ("setting gain %s to %d" % (name, gain))
self.u.set_gain(gain, name)
print ('setting sample rate %d' % options.if_rate)
self.u.set_sample_rate(options.if_rate)
print ('setting SDR tuning frequency %d' % tune_freq)
self.u.set_center_freq(tune_freq)
self.u.set_freq_corr(options.frequency_correction)
if __name__ == "__main__":
print ('Multiprotocol Digital Voice TX (C) Copyright 2017-2020 Max H. Parke KA1RBI')
try:
my_top_block().run()
except KeyboardInterrupt:
tb.stop()
| [
"ikj1234i@yahoo.com"
] | ikj1234i@yahoo.com |
f69a22886b9e73676be73b0bbedc6f5b740e85fa | 597c4f48332251552a602122bb3d325bc43a9d7f | /etc/04_calculator/01_string.py | e88884554aa8d5499cf5f68d2c4a5093c4f0af83 | [] | no_license | Kyeongrok/python_algorithm | 46de1909befc7b17766a57090a7036886361fd06 | f0cdc221d7908f26572ae67b5c95b12ade007ccd | refs/heads/master | 2023-07-11T03:23:05.782478 | 2023-06-22T06:32:31 | 2023-06-22T06:32:31 | 147,303,654 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | string = "1 + 2 * (3 - 4)" | [
"oceanfog1@gmail.com"
] | oceanfog1@gmail.com |
b53897487d2fcd3e8504685fa14e63772a1bd376 | 0fe5a7ede2c3e8c4d47a85eebe959e2ab749568b | /models/unet.py | d9cdb9980e09ee8251af30fe2601f4916427d7ad | [] | no_license | FlyGlider/Kits19 | 2ad9e1befb44f7e23f56c8b42b1b81d350361620 | 20efc3327c7a74431bb53fc3d58e453b791df49e | refs/heads/master | 2020-06-25T23:03:51.926141 | 2019-07-29T12:35:42 | 2019-07-29T12:35:42 | 199,448,545 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,606 | py | from networks import *
# 在nnunet中当池化为1时,卷积也为1,但俺不是
class UNet(nn.Module):
MAX_NUM_FILTERS_3D = 320
MAX_NUM_FILTERS_2D = 480
NUM_CONVS = 2
def __init__(self, in_ch=1, base_num_features=30, num_classes=3,
norm_type='batch', nonlin_type='relu', pool_type='max',
pool_layer_kernel_sizes=[None] * 5, deep_supervision=True, mode='2D'):
super(UNet, self).__init__()
if mode not in ['2D', '3D', '3D_LOW']:
raise NotImplementedError('mode [%s] is not found' % mode)
if mode == '2D':
upsample_mode = 'bilinear'
else:
upsample_mode = 'trilinear'
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.conv_layers = []
self.upsample_layers = []
self.num_pool = len(pool_layer_kernel_sizes)
self.deep_supervision = deep_supervision
self.mode = mode
pool_layer = get_pool_layer(pool_type=pool_type, mode=mode)
out_ch = base_num_features
in_ch = in_ch
# down
for d in range(self.num_pool):
self.conv_blocks_context.append(StackedConvLayers(in_ch=in_ch, out_ch=out_ch, num_convs=self.NUM_CONVS,
norm_type=norm_type, nonlin_type=nonlin_type, mode=mode))
self.td.append(pool_layer(pool_layer_kernel_sizes[d]))
in_ch = out_ch
out_ch = out_ch * 2
if mode == '2D':
out_ch = min(out_ch, self.MAX_NUM_FILTERS_2D)
else:
out_ch = min(out_ch, self.MAX_NUM_FILTERS_3D)
# bottleneck
# nnunet的做法是上采样后直接cat,而我原来是上采样后conv+bn再cat
final_ch = self.conv_blocks_context[-1].out_ch
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(in_ch=in_ch, out_ch=out_ch, num_convs=self.NUM_CONVS - 1,
norm_type=norm_type, nonlin_type=nonlin_type, mode=mode),
StackedConvLayers(in_ch=out_ch, out_ch=final_ch, num_convs=1,
norm_type=norm_type, nonlin_type=nonlin_type, mode=mode)
))
# up
pool_layer_kernel_sizes.reverse()
for u in range(self.num_pool):
# ch_from_down = final_ch
ch_from_skip = self.conv_blocks_context[-(2 + u)].out_ch
ch_after_tu_and_cat = ch_from_skip * 2
if u != self.num_pool - 1:
final_ch = self.conv_blocks_context[-(3 + u)].out_ch
else:
final_ch = ch_from_skip
self.tu.append(Upsample(scale_factor=pool_layer_kernel_sizes[u], mode=upsample_mode))
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(in_ch=ch_after_tu_and_cat, out_ch=ch_from_skip, num_convs=self.NUM_CONVS - 1,
norm_type=norm_type, nonlin_type=nonlin_type, mode=mode),
StackedConvLayers(in_ch=ch_from_skip, out_ch=final_ch, num_convs=1,
norm_type=norm_type, nonlin_type=nonlin_type, mode=mode)
))
# 做深度监督需要用到的conv,将输出通道数映射到类别数
for ds in range(self.num_pool):
self.conv_layers.append(get_conv_layer(in_ch=self.conv_blocks_localization[ds][-1].out_ch,
out_ch=num_classes, kernel_size=1, use_bias=False, mode=mode))
# 需要将结果倒序
pool_layer_kernel_sizes.reverse()
cum_upsample = np.cumprod(np.vstack(pool_layer_kernel_sizes), axis=0)[::-1].tolist()
# bottleneck不使用,最后一个localization不需要上采样
for usl in range(self.num_pool - 1):
self.upsample_layers.append(Upsample(scale_factor=tuple(cum_upsample[usl + 1]), mode=upsample_mode))
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.conv_layers = nn.ModuleList(self.conv_layers)
self.upsample_layers = nn.ModuleList(self.upsample_layers)
def forward(self, x):
skips = []
seg_outputs = []
for d in range(self.num_pool):
x = self.conv_blocks_context[d](x)
skips.append(x)
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(self.num_pool):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
seg_outputs.append(self.conv_layers[u](x))
if self.deep_supervision:
# 顺序是从高分辨率到低分辨率
return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(self.upsample_layers[::-1], seg_outputs[:-1][::-1])])
else:
return (seg_outputs[-1])
if __name__ == '__main__':
tmp = torch.randn(2, 1, 64, 64, 16).cuda()
model = UNet(in_ch=1, base_num_features=30, num_classes=3, norm_type='batch', nonlin_type='relu', pool_type='max',
pool_layer_kernel_sizes=[(2, 2, 2), (2, 2, 2), (2, 2, 2)], deep_supervision=True, mode='3D').cuda()
output = model(tmp)
print('')
| [
"noreply@github.com"
] | FlyGlider.noreply@github.com |
c955cc15d37d086dc2cdcfb27dd726cde852c2b9 | 0fdfac03eeedbd12e1fe6f45ffa46c33c7a2c6ef | /if.py | 46d4597daf2f40a93d42c2a8d8e7e155194ffeb5 | [] | no_license | grockcharger/LXFpython | 3c151bf9169f866034d21c70b3a71727ae9c32f4 | 5266ebee1ce14f5492d56fc5ef19ed3657f36d32 | refs/heads/master | 2020-03-26T05:56:26.813517 | 2014-07-24T14:21:46 | 2014-07-24T14:21:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | #!/usr/bin/env python in Linux/OS X
# -*- coding: utf-8 -*-
#没啥特别需要记录的。。
| [
"dickerzhy1111@163.com"
] | dickerzhy1111@163.com |
aa9645e56df07dec0f994311efd80a58baed1428 | 159bd4c0274271aae7cf2d42bc6819957ee626c9 | /script/UG_Complex_Network_4UD.py | b19380956564614c4ec4e80c552ce2dfa34e0a4b | [] | no_license | Schuck9/UG-in-Weighted-Network | aaa9810e8806d6130ec87c275a169009da460abc | 8e2a6ebde2ed4b9e2f6d2a2ca9d84140c2c5e792 | refs/heads/master | 2021-03-01T04:03:05.983146 | 2020-04-24T02:51:34 | 2020-04-24T02:51:34 | 245,752,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,179 | py | """
A simple implementation of Ultimatum Game in complex network
@date: 2020.3.8
@author: Tingyu Mo
"""
import os
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
class UG_Complex_Network():
def __init__(self,node_num = 10000,network_type = "SF",update_rule ="NS",player_type = "B",
avg_degree = 4,intensity_selection = 0.01,mutate_rate = 0.001,check_point = None):
self.node_num = node_num
self.avg_degree = avg_degree
self.network_type = network_type # "SF" or "ER"
self.player_type = player_type # "A" or "B" "C"
self.update_rule = update_rule # "SP" or "SP"
self.max_weight = 0.4
self.intensity_selection = intensity_selection
self.mutate_rate = mutate_rate
self.avg_strategy = (0,0)
self.avg_pq_list=[]
if not os.path.exists("./result"):
os.mkdir('./result')
if check_point == None:
self.dir_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
os.mkdir("./result/{}".format(self.dir_str))
else:
self.dir_str = check_point
def build_network(self,network_type = None):
'''
building network
'''
print("Building network!")
if network_type == None:
network_type = self.network_type
if network_type == "SF":
G = nx.random_graphs.barabasi_albert_graph(self.node_num, int(self.avg_degree/2))
G = self.network_weights_asign(G)
elif network_type == "ER":
G = nx.random_graphs.erdos_renyi_graph(self.node_num, self.avg_degree/self.node_num)
for n in G.nodes():
if G.degree(n) == 0:
while True:
nbr = np.random.choice(G.nodes(),size = 1)[0]
if nbr != n:
break
G.add_edge(n, nbr)
elif network_type == "RG":
G = nx.random_graphs.random_regular_graph(self.avg_degree, self.node_num)
G = self.network_weights_asign(G)
print("平均连接度为: ",self.avg_degree_caculate(G))
return G
def nbr_weighted_check(self,G,n):
is_MaxWeight_exis = None
for nbr in G.adj[n]:
weight = G.edges[n, nbr]['weight']
if weight == self.max_weight:
is_MaxWeight_exis = nbr
break
return is_MaxWeight_exis
def network_weights_asign(self,G):
#边权重初始化
for n in list(G.nodes()):
nbrs = list(G.adj[n])
for nbr in nbrs:
G.edges[n, nbr]['weight'] = 0
#检查双方是否存在紧密联系者
for n in list(G.nodes()):
nbrs = list(G.adj[n])
for nbr in nbrs:
isMaxWeightExisIn_N = self.nbr_weighted_check(G,n)
isMaxWeightExisIn_Nbr = self.nbr_weighted_check(G,nbr)
if (isMaxWeightExisIn_N == None) and (isMaxWeightExisIn_Nbr == None):
G.edges[n, nbr]['weight'] = self.max_weight
elif (isMaxWeightExisIn_N==nbr) and (isMaxWeightExisIn_Nbr == n):
G.edges[n, nbr]['weight'] = self.max_weight
elif (isMaxWeightExisIn_N != None) or (isMaxWeightExisIn_Nbr != None) :
G.edges[n, nbr]['weight'] = (1-self.max_weight)/(self.avg_degree-1)
# 打印输出
# for n, nbrs in G.adjacency():
# for nbr, eattr in nbrs.items():
# data = eattr['weight']
# print('(%d, %d, %0.3f)' % (n,nbr,data))
cnt = 0
for n in list(G.nodes()):
result = self.nbr_weighted_check(G,n)
if result == None:
cnt += 1
print("无亲密关系者率:",cnt/self.node_num)
return G
def initialize_strategy(self,G):
'''
initialize every node's strategy
'''
self.strategy_asigned(G,list(G.nodes()),Type = self.player_type)
def initialize_payoff(self,G):
'''
clear all player's payoff
'''
for n in list(G.nodes()):
G.nodes[n]['payoff'] = 0
def fitness_calculate(self,G,n):
'''
f = 1-w+w*Π
'''
return 1 - self.intensity_selection + self.intensity_selection*G.nodes[n]['payoff']
def strategy_asigned(self,G,node_list,Type = 'B'):
'''
A B C ,three types inBdividual
'''
if Type == 'B':
for n in node_list:
#Type-A player
strategy = np.random.rand()
G.nodes[n]['p'] = strategy
G.nodes[n]['q'] = 1-strategy
G.nodes[n]['payoff'] = 0
elif Type == 'A':
for n in node_list:
#Type-A player
strategy = np.random.rand()
G.nodes[n]['p'] = strategy
G.nodes[n]['q'] = strategy
G.nodes[n]['payoff'] = 0
elif Type == 'C':
for n in node_list:
#Type-A player
G.nodes[n]['p'] = np.random.rand()
G.nodes[n]['q'] = np.random.rand()
G.nodes[n]['payoff'] = 0
def synchronous_play(self,G):
'''
using synchronous method to play ultimatum game
and update graph every generation
'''
for n, nbrs in G.adjacency():
for nbr, _ in nbrs.items():
# proposer = n ,responder = nbr
offer = G.nodes[n]['p']
demand = G.nodes[nbr]['q']
if offer > demand:
G.nodes[n]['payoff'] += 1-offer
# G.nodes[nbr]['payoff'] += offer
# proposer = nbr ,responder = n
offer = G.nodes[nbr]['p']
demand = G.nodes[n]['q']
if offer > demand:
# G.node[nbr]['payoff'] += 1-offer
G.nodes[n]['payoff'] += offer
num_nbrs = G.degree(n)
if num_nbrs != 0:
G.nodes[n]['payoff'] /= G.degree(n)
def natural_selection(self,G):
'''
each player i in the network selects at random one neighbor j
and compares its payoff Πi with that of j
'''
cnt = 0
for n in list(G.nodes()):
nbrs = list(G.adj[n])
nbr = np.random.choice(nbrs,size = 1)[0]
n_payoff = G.nodes[n]['payoff']
nbr_payoff = G.nodes[nbr]['payoff']
if nbr_payoff > n_payoff:
probs_adopt = (nbr_payoff - n_payoff)/(2*max(G.degree(n),G.degree(nbr)))
if np.random.rand() < probs_adopt:
# n adopts nbr's strategy
cnt += 1
G.nodes[n]['p'] = G.nodes[nbr]['p']
G.nodes[n]['q'] = G.nodes[nbr]['q']
# print("occur:",cnt)
def social_penalty(self,G):
'''
remove the player with lowest payoff and replace it with random one
'''
lowest_n = 0
for n in G.nodes():
if G.nodes[n]['payoff'] < G.nodes[lowest_n]['payoff']:
lowest_n = n
lowest_cluster = list(G.adj[lowest_n])
lowest_cluster.append(lowest_n)
self.strategy_asigned(G,lowest_cluster,Type = self.player_type)
# for n in lowest_cluster:
# #Type-A player
# strategy = np.random.rand()
# G.nodes[n]['p'] = strategy
# G.nodes[n]['q'] = strategy
# G.nodes[n]['payoff'] = 0
def death_birth_updating(self,G):
'''
death-birth updating process,
choose an individual randomly over the whole population,
reproduce the strategy with proportional to nbr's fitness
'''
individual_list = list(G.nodes())
chosen_individual = np.random.choice(individual_list,size=1)[0]
nbrs = list(G.adj[chosen_individual])
reproduce_probability = list()
for nbr in nbrs:
rp = self.fitness_calculate(G,nbr)
reproduce_probability.append(rp)
reproduce_probability = np.array(reproduce_probability)
reproduce_probability /= sum(reproduce_probability)
reproduce_individual = np.random.choice(nbrs,size=1,p = reproduce_probability)[0]
G.nodes[chosen_individual]['p'] = G.nodes[reproduce_individual]['p']
G.nodes[chosen_individual]['q'] = G.nodes[reproduce_individual]['q']
return chosen_individual,reproduce_individual
def birth_death_updating(self,G):
'''
birth death updating process,
choose an individual with proportional to fitnees
replace one of its nbr randomly
'''
individual_list = list(G.nodes())
fitness_list = list()
for n in individual_list:
fitness = self.fitness_calculate(G,n)
fitness_list.append(fitness)
fitness_list = np.array(fitness_list)
fitness_list /= sum(fitness_list)
reproduce_individual = np.random.choice(individual_list,size = 1,p = fitness_list)[0]
nbrs = list(G.adj[reproduce_individual])
chosen_individual = np.random.choice(nbrs,size = 1)[0]
G.nodes[chosen_individual]['p'] = G.nodes[reproduce_individual]['p']
G.nodes[chosen_individual]['q'] = G.nodes[reproduce_individual]['q']
return chosen_individual,reproduce_individual
def pairwise_comparison(self,G):
'''
pairwise comparison process,
choose an individual and its nbr randomlyj
individual imitate its nbr's strategy with probability of 1/1+e^(-w*(Πi-Πj))
'''
individual_list = list(G.nodes())
chosen_individual = np.random.choice(individual_list,size=1)[0]
nbrs = list(G.adj[chosen_individual])
reproduce_individual = np.random.choice(nbrs,size = 1)[0]
imitate_probability = 1/(1+np.exp(-1*self.intensity_selection*(G.nodes[chosen_individual]['payoff']-G.nodes[reproduce_individual]['payoff'])))
if np.random.rand() < imitate_probability:
G.nodes[chosen_individual]['p'] = G.nodes[reproduce_individual]['p']
G.nodes[chosen_individual]['q'] = G.nodes[reproduce_individual]['q']
return chosen_individual,reproduce_individual
def imitaion_updaing(self,G):
'''
imitaion updating process,
choose an individual randomly,
update its strategy with proportional to its & nbrs fitness
'''
individual_list = list(G.nodes())
chosen_individual = np.random.choice(individual_list,size=1)[0]
fitness_list = list()
nbrs = list(G.adj[chosen_individual])
for n in nbrs:
fitness = self.fitness_calculate(G,n)
fitness_list.append(fitness)
nbrs.append(chosen_individual)
near_domain = nbrs
fitness_ci = self.fitness_calculate(G,chosen_individual)
fitness_list.append(fitness_ci)
fitness_list = np.array(fitness_list)
fitness_list /= sum(fitness_list)
reproduce_individual = np.random.choice(near_domain,size =1,p = fitness_list)[0]
G.nodes[chosen_individual]['p'] = G.nodes[reproduce_individual]['p']
G.nodes[chosen_individual]['q'] = G.nodes[reproduce_individual]['q']
return chosen_individual,reproduce_individual
def mutation(self,G,chosen_individual,reproduce_individual):
if np.random.rand(1) <= self.mutate_rate*10:
G.nodes[chosen_individual]['p'],G.nodes[chosen_individual]['q'] = np.random.rand(2)
print("MC")
# else:
# G.nodes[chosen_individual]['p'] = G.nodes[reproduce_individual]['p']
# G.nodes[chosen_individual]['q'] = G.nodes[reproduce_individual]['q']
def update(self,G):
'''
natural seletion an social penalty
'''
if self.update_rule == "NS":
chosen_individual,reproduce_individual = self.natural_selection(G)
elif self.update_rule == "SP":
chosen_individual,reproduce_individual = self.social_penalty(G)
elif self.update_rule == "DB":
chosen_individual,reproduce_individual = self.death_birth_updating(G)
elif self.update_rule == "BD":
chosen_individual,reproduce_individual = self.birth_death_updating(G)
elif self.update_rule == "PC":
chosen_individual,reproduce_individual = self.pairwise_comparison(G)
elif self.update_rule == "IU":
chosen_individual,reproduce_individual = self.imitaion_updaing(G)
self.mutation(G,chosen_individual,reproduce_individual)
def avg_strategy_calculate(self,G,Epoch):
'''
calculate the mean strategy over arg Epoch
'''
p_vector = self.get_all_values(G,'p')
q_vector = self.get_all_values(G,'q')
p,q = self.avg_strategy #the Epoch-1's average strategy
p = ((p*self.node_num*(Epoch-1)) + np.sum(p_vector))*1.0/(Epoch*self.node_num)
q = ((q*self.node_num*(Epoch-1)) + np.sum(q_vector))*1.0/(Epoch*self.node_num)
# self.avg_pq_list.append((p,q))
return (p,q)
def save(self,G,Epoch):
#Save Graph
result_dir = './result/'
info = "{}_{}_{}_{}_{}_{}".format(self.network_type,self.player_type,self.update_rule,self.intensity_selection,self.mutate_rate,Epoch)
Epoch_dir = os.path.join(result_dir,self.dir_str,info)
if not os.path.exists(Epoch_dir):
os.mkdir(Epoch_dir)
graph_path = os.path.join(Epoch_dir,info+"_Graph.yaml")
nx.write_yaml(G,graph_path)
#Save strategy
p_vector = self.get_all_values(G,'p')
q_vector = self.get_all_values(G,'q')
pq_array = np.vstack((p_vector,q_vector))
pq_path = os.path.join(Epoch_dir,info+"_strategy.csv")
pq = pd.DataFrame(data = pq_array)
pq.to_csv(pq_path)
#Save average offer/respond
avg_pq_path = os.path.join(Epoch_dir,info+"_average_strategy.csv")
avg_pq = pd.DataFrame(data = self.avg_pq_list)
avg_pq.to_csv(avg_pq_path)
def retrain(self,filepath):
'''
continue evolution from specific check point
'''
print(filepath)
filepath = os.path.join('./result/',filepath)
lists = os.listdir(filepath)
lists.sort(key=lambda fn: os.path.getmtime(filepath + "/" + fn))
result_dir = os.path.join(filepath, lists[-1])
result_list = os.listdir(result_dir)
result_list.sort()
parse_str = result_list[0][:-5].split("_")
self.network_type = parse_str[0]
self.player_type = parse_str[1]
self.update_rule = parse_str[2]
self.intensity_selection = float(parse_str[3])
self.mutate_rate = float(parse_str[4])
Epoch = int(parse_str[5])
graph_path = os.path.join(result_dir,result_list[0])
avg_pq_path = os.path.join(result_dir,result_list[1])
avg_pq = pd.read_csv(avg_pq_path)
self.avg_strategy = avg_pq.values[-1][1:]
G = nx.read_yaml(graph_path)
return G,Epoch+1
def get_all_values(self,G,attr_name):
'''
get specific attribute values of all nodes
'''
value_dict = nx.get_node_attributes(G,attr_name)
value_list = list(value_dict.values())
return value_list
def pq_distribution(self,G,attr_name):
x_axis = np.arange(0,1.05,1/20) # 21 descrete points,range 0~1,step size 0.05
y_axis = np.zeros(x_axis.size)
value_list = self.get_all_values(G,attr_name)
for v in value_list:
for i in range(x_axis.size):
if abs(v-x_axis[i]) < 0.05:
y_axis[i] += 1
return (x_axis,y_axis)
def avg_degree_caculate(self,G):
'''
caculate average degree of graph
'''
degree_total = 0
for x in range(len(G.degree())):
degree_total = degree_total + G.degree(x)
return degree_total/self.node_num
if __name__ == '__main__':
node_num = 100
network_type = "RG" # [SF, ER, RG]
update_rule ='PC' # [NS, SP, DB, BD, PC, IU]
player_type = "C" # [A=(p=q,q), B=(p,1-p), C=(p,q)]
avg_degree = 4
intensity_selection = 0.01
mutate_rate = 0.001
avg_strategy_list = []
Epochs = pow(10,7)
check_point = None
# check_point = '2020-03-08-11-52-42'
if check_point != None:
UG = UG_Complex_Network(node_num,network_type,update_rule,player_type,avg_degree,intensity_selection,mutate_rate,check_point)
G,Start = UG.retrain(check_point)
else:
Start = 1
UG = UG_Complex_Network(node_num,network_type,update_rule,player_type,avg_degree,intensity_selection)
#bulids network structure
G = UG.build_network()
#initialize the strategy of player in network
UG.initialize_strategy(G)
#play game
for Epoch in range(Start,Epochs+1):
UG.initialize_payoff(G)
UG.synchronous_play(G)
UG.update(G)
UG.avg_strategy = UG.avg_strategy_calculate(G,Epoch)
if Epoch % 10000 == 0:
print("Epoch[{}]".format(Epoch))
print("Average strategy: (p ,q)={}\n".format(UG.avg_strategy))
UG.avg_pq_list.append(UG.avg_strategy)
UG.save(G,Epoch)
# UG.viz(G)
| [
"noreply@github.com"
] | Schuck9.noreply@github.com |
6fe7af175efdf0816b4ee3b6a3cc1517c3c59e48 | 920e357f73c2c9944fa9a45c0c751f0d28f4c4bf | /healthcare/db/db_postgre.py | d470f11c41c36f889329a935fbf3373c81149906 | [] | no_license | baekinjun/health_care_data | eb3d62a34b272e49f440a3824852ce08b29c9fe4 | 38e39eaa4de7770b7894739a26172ea1826571cc | refs/heads/master | 2023-06-06T11:34:02.171715 | 2021-06-24T06:11:37 | 2021-06-24T06:11:37 | 370,053,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from dbutils.pooled_db import PooledDB
from config import Database_config
import urllib3
import psycopg2
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
DB = PooledDB(
ping=7,
creator=psycopg2,
mincached=5,
blocking=True,
host=Database_config['host'],
port=Database_config['port'],
user=Database_config['user'],
password=Database_config['Password'],
database=Database_config['Database'],
) | [
"binjun0711@gmail.com"
] | binjun0711@gmail.com |
3109afea6f409c780c3383f96644a080e84a1224 | 709103ab0c3167fc98ce50db35b8b976abad8836 | /matrix/main.py | 3fd7864a35043b07db922297fa293ca9236bb187 | [] | no_license | ankitanwar/DataStructure | 1502a5ac833cd52e31ec725a23b570d35c7f8b65 | 0e67dd844821492f9652508d5f6ac19ba2724540 | refs/heads/master | 2023-02-25T08:52:24.139426 | 2021-02-03T16:18:01 | 2021-02-03T16:18:01 | 307,113,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | import math
from math import gcd
import os
import sys
from io import BytesIO, IOBase
# Input Functions
def arr_input():
return list(map(int, input().split()))
def mul_input():
return map(int, input().split())
def int_inp():
return int(input())
def str_input():
return str(input())
# New Imports
def solution():
pass
def main():
testcases = int(input())
for _ in range(testcases):
solution()
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
def input(): return sys.stdin.readline().rstrip("\r\n")
# endregion
if __name__ == "__main__":
main() | [
"tanwariam@gmail.com"
] | tanwariam@gmail.com |
46f35ef2020f867a594b80dcdad1297c376a2690 | d3a0be28d5acd551a31369d078be8d7e48e301e3 | /myapp/endpoint/__init__.py | 95e68705130fa73772da1434681c799837606260 | [] | no_license | wesclemens/flask-example | ff32a23832071b379624c2f6d4632c3e3c487173 | 9261125f072c74d41cc2a477004f1822eac21803 | refs/heads/master | 2020-05-18T06:15:05.213187 | 2014-11-07T00:46:44 | 2014-11-07T00:46:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | import os
import glob
import importlib
endpoint_dir = os.path.dirname(__file__)
for path in glob.iglob(os.path.join(endpoint_dir, "*.py")):
module = os.path.basename(path)[:-3]
if module != '__init__':
importlib.import_module('.' + module, __name__)
| [
"wesclemens@gmail.com"
] | wesclemens@gmail.com |
1cd2caafcdfc3d03b63d2f9a535fc0900a18007d | f221094494405cb85d950b5a3e0c7fce273c934c | /tictactoe/ai.py | b1b752e6c04341919c2fac4149838b298bc376a5 | [] | no_license | mauropalumbo75/ai | f39e11df44025e118449fe9507f66b35b3d90933 | 3bdd86ca30edc01c68534d73a5b2fd0d2321a955 | refs/heads/master | 2020-12-06T08:42:21.631013 | 2020-01-07T22:18:10 | 2020-01-07T22:18:10 | 232,413,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,787 | py | # https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python
# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python
# Simple reinforcement learning algorithm for learning tic-tac-toe
# Use the update rule: V(s) = V(s) + alpha*(V(s') - V(s))
# Use the epsilon-greedy policy:
# action|s = argmax[over all actions possible from state s]{ V(s) } if rand > epsilon
# action|s = select random action from possible actions from state s if rand < epsilon
#
#
# INTERESTING THINGS TO TRY:
#
# Currently, both agents use the same learning strategy while they play against each other.
# What if they have different learning rates?
# What if they have different epsilons? (probability of exploring)
# Who will converge faster?
# What if one agent doesn't learn at all?
# Poses an interesting philosophical question: If there's no one around to challenge you,
# can you reach your maximum potential?
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
LENGTH = 3
class Agent:
def __init__(self, eps=0.1, alpha=0.5):
self.eps = eps # probability of choosing random action instead of greedy
self.alpha = alpha # learning rate
self.verbose = False
self.state_history = []
def setV(self, V):
self.V = V
def set_symbol(self, sym):
self.sym = sym
def set_verbose(self, v):
# if true, will print values for each position on the board
self.verbose = v
def reset_history(self):
self.state_history = []
def take_action(self, env):
# choose an action based on epsilon-greedy strategy
r = np.random.rand()
best_state = None
if r < self.eps:
# take a random action
if self.verbose:
print("Taking a random action")
possible_moves = []
for i in range(LENGTH):
for j in range(LENGTH):
if env.is_empty(i, j):
possible_moves.append((i, j))
idx = np.random.choice(len(possible_moves))
next_move = possible_moves[idx]
else:
# choose the best action based on current values of states
# loop through all possible moves, get their values
# keep track of the best value
pos2value = {} # for debugging
next_move = None
best_value = -1
for i in range(LENGTH):
for j in range(LENGTH):
if env.is_empty(i, j):
# what is the state if we made this move?
env.board[i,j] = self.sym
state = env.get_state()
env.board[i,j] = 0 # don't forget to change it back!
pos2value[(i,j)] = self.V[state]
if self.V[state] > best_value:
best_value = self.V[state]
best_state = state
next_move = (i, j)
# if verbose, draw the board w/ the values
if self.verbose:
print("Taking a greedy action")
for i in range(LENGTH):
print("------------------")
for j in range(LENGTH):
if env.is_empty(i, j):
# print the value
print(" %.2f|" % pos2value[(i,j)], end="")
else:
print(" ", end="")
if env.board[i,j] == env.x:
print("x |", end="")
elif env.board[i,j] == env.o:
print("o |", end="")
else:
print(" |", end="")
print("")
print("------------------")
# make the move
env.board[next_move[0], next_move[1]] = self.sym
def update_state_history(self, s):
# cannot put this in take_action, because take_action only happens
# once every other iteration for each player
# state history needs to be updated every iteration
# s = env.get_state() # don't want to do this twice so pass it in
self.state_history.append(s)
def update(self, env):
# we want to BACKTRACK over the states, so that:
# V(prev_state) = V(prev_state) + alpha*(V(next_state) - V(prev_state))
# where V(next_state) = reward if it's the most current state
#
# NOTE: we ONLY do this at the end of an episode
# not so for all the algorithms we will study
reward = env.reward(self.sym)
target = reward
for prev in reversed(self.state_history):
value = self.V[prev] + self.alpha*(target - self.V[prev])
self.V[prev] = value
target = value
self.reset_history()
# this class represents a tic-tac-toe game
# is a CS101-type of project
class Environment:
def __init__(self):
self.board = np.zeros((LENGTH, LENGTH))
self.x = -1 # represents an x on the board, player 1
self.o = 1 # represents an o on the board, player 2
self.winner = None
self.ended = False
self.num_states = 3**(LENGTH*LENGTH)
def is_empty(self, i, j):
return self.board[i,j] == 0
def reward(self, sym):
# no reward until game is over
if not self.game_over():
return 0
# if we get here, game is over
# sym will be self.x or self.o
return 1 if self.winner == sym else 0
def get_state(self):
# returns the current state, represented as an int
# from 0...|S|-1, where S = set of all possible states
# |S| = 3^(BOARD SIZE), since each cell can have 3 possible values - empty, x, o
# some states are not possible, e.g. all cells are x, but we ignore that detail
# this is like finding the integer represented by a base-3 number
k = 0
h = 0
for i in range(LENGTH):
for j in range(LENGTH):
if self.board[i,j] == 0:
v = 0
elif self.board[i,j] == self.x:
v = 1
elif self.board[i,j] == self.o:
v = 2
h += (3**k) * v
k += 1
return h
def game_over(self, force_recalculate=False):
# returns true if game over (a player has won or it's a draw)
# otherwise returns false
# also sets 'winner' instance variable and 'ended' instance variable
if not force_recalculate and self.ended:
return self.ended
# check rows
for i in range(LENGTH):
for player in (self.x, self.o):
if self.board[i].sum() == player*LENGTH:
self.winner = player
self.ended = True
return True
# check columns
for j in range(LENGTH):
for player in (self.x, self.o):
if self.board[:,j].sum() == player*LENGTH:
self.winner = player
self.ended = True
return True
# check diagonals
for player in (self.x, self.o):
# top-left -> bottom-right diagonal
if self.board.trace() == player*LENGTH:
self.winner = player
self.ended = True
return True
# top-right -> bottom-left diagonal
if np.fliplr(self.board).trace() == player*LENGTH:
self.winner = player
self.ended = True
return True
# check if draw
if np.all((self.board == 0) == False):
# winner stays None
self.winner = None
self.ended = True
return True
# game is not over
self.winner = None
return False
def is_draw(self):
return self.ended and self.winner is None
# Example board
# -------------
# | x | | |
# -------------
# | | | |
# -------------
# | | | o |
# -------------
def draw_board(self):
for i in range(LENGTH):
print("-------------")
for j in range(LENGTH):
print(" ", end="")
if self.board[i,j] == self.x:
print("x ", end="")
elif self.board[i,j] == self.o:
print("o ", end="")
else:
print(" ", end="")
print("")
print("-------------")
class Human:
def __init__(self):
pass
def set_symbol(self, sym):
self.sym = sym
def take_action(self, env, move=None):
while True:
# break if we make a legal move
move = input("Enter coordinates i,j for your next move (i,j=0..2): ")
i, j = move.split(',')
i = int(i)
j = int(j)
if env.is_empty(i, j):
env.board[i,j] = self.sym
break
def update(self, env):
pass
def update_state_history(self, s):
pass
# recursive function that will return all
# possible states (as ints) and who the corresponding winner is for those states (if any)
# (i, j) refers to the next cell on the board to permute (we need to try -1, 0, 1)
# impossible games are ignored, i.e. 3x's and 3o's in a row simultaneously
# since that will never happen in a real game
def get_state_hash_and_winner(env, i=0, j=0):
results = []
for v in (0, env.x, env.o):
env.board[i,j] = v # if empty board it should already be 0
if j == 2:
# j goes back to 0, increase i, unless i = 2, then we are done
if i == 2:
# the board is full, collect results and return
state = env.get_state()
ended = env.game_over(force_recalculate=True)
winner = env.winner
results.append((state, winner, ended))
else:
results += get_state_hash_and_winner(env, i + 1, 0)
else:
# increment j, i stays the same
results += get_state_hash_and_winner(env, i, j + 1)
return results
# play all possible games
# need to also store if game is over or not
# because we are going to initialize those values to 0.5
# NOTE: THIS IS SLOW because MANY possible games lead to the same outcome / state
# def get_state_hash_and_winner(env, turn='x'):
# results = []
# state = env.get_state()
# # board_before = env.board.copy()
# ended = env.game_over(force_recalculate=True)
# winner = env.winner
# results.append((state, winner, ended))
# # DEBUG
# # if ended:
# # if winner is not None and env.win_type.startswith('col'):
# # env.draw_board()
# # print "Winner:", 'x' if winner == -1 else 'o', env.win_type
# # print "\n\n"
# # assert(np.all(board_before == env.board))
# if not ended:
# if turn == 'x':
# sym = env.x
# next_sym = 'o'
# else:
# sym = env.o
# next_sym = 'x'
# for i in xrange(LENGTH):
# for j in xrange(LENGTH):
# if env.is_empty(i, j):
# env.board[i,j] = sym
# results += get_state_hash_and_winner(env, next_sym)
# env.board[i,j] = 0 # reset it
# return results
def initialV_x(env, state_winner_triples):
# initialize state values as follows
# if x wins, V(s) = 1
# if x loses or draw, V(s) = 0
# otherwise, V(s) = 0.5
V = np.zeros(env.num_states)
for state, winner, ended in state_winner_triples:
if ended:
if winner == env.x:
v = 1
else:
v = 0
else:
v = 0.5
V[state] = v
return V
def initialV_o(env, state_winner_triples):
# this is (almost) the opposite of initial V for player x
# since everywhere where x wins (1), o loses (0)
# but a draw is still 0 for o
V = np.zeros(env.num_states)
for state, winner, ended in state_winner_triples:
if ended:
if winner == env.o:
v = 1
else:
v = 0
else:
v = 0.5
V[state] = v
return V
def play_game(p1, p2, env, draw=False):
# loops until the game is over
current_player = None
while not env.game_over():
# alternate between players
# p1 always starts first
if current_player == p1:
current_player = p2
else:
current_player = p1
# draw the board before the user who wants to see it makes a move
if draw:
if draw == 1 and current_player == p1:
env.draw_board()
if draw == 2 and current_player == p2:
env.draw_board()
# current player makes a move
current_player.take_action(env)
# update state histories
state = env.get_state()
p1.update_state_history(state)
p2.update_state_history(state)
if draw:
env.draw_board()
# do the value function update
p1.update(env)
p2.update(env)
class Game:
def __init__(self):
self.p1 = Agent()
self.p2 = Human()
self.env = Environment()
def reset(self):
self.p1 = Agent()
self.p2 = Human()
self.env = Environment()
def play_first_move(self):
self.p1.take_action(self.env)
# update state histories
state = self.env.get_state()
self.p1.update_state_history(state)
self.p2.update_state_history(state)
def play_move(self):
# play player move
self.p2.take_action(self.env)
# update state histories
state = self.env.get_state()
self.p1.update_state_history(state)
self.p2.update_state_history(state)
# play ai move
self.p1.take_action(self.env)
# update state histories
state = self.env.get_state()
self.p1.update_state_history(state)
self.p2.update_state_history(state) | [
"mauropalumbo75@gmail.com"
] | mauropalumbo75@gmail.com |
510d24877cdf12ce2f2484dcbd9b3e0331a1aefb | 55772377e3035c578fd49bc00e861bd388cc0e13 | /LeetCode/merge_two_sorted_lists.py | 20617d9ef7931c21cb1469af42ce624691451f87 | [
"MIT"
] | permissive | qicst23/Daily | 5c4588b9cc1d38242ee2771c2fb04063c7f3fec0 | 12f9a5ae24dd251568904729b10122ddaf5b9cad | refs/heads/master | 2021-01-15T16:29:17.684859 | 2015-10-19T15:11:02 | 2015-10-19T15:11:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | """
Merge two sorted linked lists and return it as a new list. The new list should
be made by splicing together the nodes of the first two lists.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def mergeTwoLists(self, l1, l2):
if not l1:
return l2
if not l2:
return l1
head = ListNode(0)
tail = head
while None not in [l1, l2]:
if l1.val <= l2.val:
tail.next = l1
l1 = l1.next
else:
tail.next = l2
l2 = l2.next
tail = tail.next
if l1 is not None:
tail.next = l1
if l2 is not None:
tail.next = l2
head = head.next
return head
| [
"reterclose@gmail.com"
] | reterclose@gmail.com |
59b39957186f3c76e740d3bac8084fb63519bf5e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3066.py | fce606fdb7b3bf7e4ebcb4d8aa5331d6907dbeba | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | fin = open('in', 'r')
fout = open('out', 'w')
numberOfCases = int(fin.readline())
def findChosenRow():
answer = int(fin.readline())
for rowNum in range (1,5):
row = fin.readline()
if rowNum == answer:
cosenRow = row.split()
cosenRow = [int(string) for string in cosenRow]
return cosenRow
def findCommonCard(firstRow, secondRow):
numOfCommons = 0
possibleAnswer = 0
for card1 in firstRow:
for card2 in secondRow:
if card1 == card2:
possibleAnswer = card1
numOfCommons += 1
if numOfCommons == 1:
return possibleAnswer
if numOfCommons > 1:
return 0
if numOfCommons == 0:
return -1
for case in range(1,numberOfCases + 1):
firstRow = findChosenRow()
secondRow = findChosenRow()
answer = findCommonCard(firstRow, secondRow)
if answer > 0:
fout.write('case #' + str(case) + ': ' + str(answer) + '\n')
elif answer == 0:
fout.write('case #' + str(case) + ': Bad magician!\n')
elif answer == -1:
fout.write('case #' + str(case) + ': Volunteer cheated!\n')
def method():
pass
fin.close()
fout.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
cd820bed2bb0fe512b0fe3b46b33a6ec078a7035 | d99465bf2ee5b40c6f3d473d1dfeef3f360c9088 | /RESTful/RESTful.py | 32014d0ca25ec6bc91369c02431a9ca6bf0a7e19 | [] | no_license | GeekPhx/flask-me | e07bc576d61daddbff60bbd3ecc1edd82d6aa921 | 76ae7ab8b75468683931c973131debb8475617c4 | refs/heads/master | 2016-08-12T22:27:01.878609 | 2015-11-13T02:37:27 | 2015-11-13T02:37:27 | 46,093,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | from flask import Flask
from flask import abort, jsonify, make_response, \
request, url_for
from flask.ext.httpauth import HTTPBasicAuth
auth = HTTPBasicAuth()
@auth.get_password
def get_password(username):
if username == 'phoenix':
return 'hainuer'
return None
@auth.error_handler
def unauthorized():
# return make_response(jsonify({'error': 'Unauthorized access'}), 401)
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
app = Flask(__name__)
# Datas
tasks = [
{
'id': 1,
'title': u'Buy iPad',
'description': u'iPad Air 2',
'done': False
},
{
'id': 2,
'title': u'Learn Openstack',
'description': u'Need to find a good OpenStack tutorial on the web',
'done': False
}
]
def make_public_task(task):
new_task = {}
for field in task:
if field == 'id':
new_task['uri'] = url_for('get_task', task_id=task['id'], _external=True)
else:
new_task[field] = task[field]
return new_task
@app.route('/')
def index():
return '<p align="center"><a href="/todo/api/v1.0/tasks">Todo API</a></p>'
@app.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
# return jsonify({'tasks': tasks})
return jsonify({'tasks': list(map(make_public_task, tasks))})
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
@auth.login_required
def get_task(task_id):
# filter->list
task = list(filter(lambda t: t['id'] == int(task_id), tasks))
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
@app.route('/todo/api/v1.0/tasks', methods=['POST'])
@auth.login_required
def create_task():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({'task': task}), 201
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['PUT'])
@auth.login_required
def update_task(task_id):
task = list(filter(lambda t: t['id'] == task_id, tasks))
if len(task) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task[0]['title'] = request.json.get('title', task[0]['title'])
task[0]['description'] = request.json.get('description', task[0]['description'])
task[0]['done'] = request.json.get('done', task[0]['done'])
return jsonify({'task': task[0]})
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['DELETE'])
@auth.login_required
def delete_task(task_id):
task = filter(lambda t: t['id'] == task_id, tasks)
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return jsonify({'result': True})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found.'}), 404)
if __name__ == '__main__':
app.run(debug=True)
| [
"Phoenix@PhoenixdeiMac.local"
] | Phoenix@PhoenixdeiMac.local |
4633bf2dabdce0c57ad2d98e8f50c7744e7dae68 | 1cdc297dc7573b64d934af8508b787b42257d4fe | /InstructionCounter/instructions/class_field_instruction.py | 55ff8e0fef1ba70c0429a54803b9eda892ec01bf | [] | no_license | Caspersn2/EnergyIDE | dc74c53002b1cd48789fcca713b93d73825f8658 | a28ccd16033cb21351d7730503c08cd5fcd76a59 | refs/heads/main | 2023-06-11T13:41:00.400464 | 2021-06-27T08:18:39 | 2021-06-27T08:18:39 | 337,654,715 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | from instruction import instruction
from action_enum import Actions
class store_class_field(instruction):
def __init__(self, name, field_name):
self.field_name = field_name
super().__init__(name)
@classmethod
def create(cls, name, elements):
field_name = elements[-1].split('::')[-1]
return store_class_field(name, field_name)
@classmethod
def keys(cls):
return ['stfld']
def execute(self, storage):
value = storage.pop_stack()
cls = storage.pop_stack()
cls.set_state(self.field_name, value)
return Actions.NOP, None
def __repr__(self) -> str:
return f'{self.name}: {self.field_name}'
class load_class_field(instruction):
def __init__(self, name, field_name):
self.field_name = field_name
super().__init__(name)
@classmethod
def create(cls, name, elements):
field_name = elements[-1].split('::')[-1]
return load_class_field(name, field_name)
@classmethod
def keys(cls):
return ['ldfld']
def execute(self, storage):
cls = storage.pop_stack()
value = cls.get_state(self.field_name).get_value()
storage.push_stack(value)
return Actions.NOP, None
def __repr__(self) -> str:
return f'{self.name}: {self.field_name}'
| [
"jnarha16@student.aau.dk"
] | jnarha16@student.aau.dk |
79b941729158619e1c7109c695cd2d227a105149 | 037ea32af8ffc1b66f5ddbbb51ff37ee94d94d30 | /thrift_benchmark/client_multiprocess.py | 0f73e013b75c141f32dd74b0f85091bb77cbfb51 | [] | no_license | popart/ubuntu_thrift_benchmark | c67fc9b607d69ba8b82ccb12fc429bb8d331ee2e | f7848ace3237493683804447c7fd60a96a34543e | refs/heads/master | 2021-01-11T13:49:02.096187 | 2017-04-06T19:33:24 | 2017-04-06T19:33:24 | 86,623,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | import sys
sys.path.append('gen-py')
import time
from multiprocessing import Pool
from echo import Echo
from echo.ttypes import Packet
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
def create_client():
# Make socket
transport = TSocket.TSocket('localhost', 9090)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = Echo.Client(protocol)
return (client, transport)
def worker(rps):
client, transport = create_client()
# Connect!
transport.open()
for s in xrange(5):
start = time.time()
for _ in xrange(rps):
client.echo('hello world')
elapsed = time.time() - start
#if elapsed > 1:
#return False
if elapsed < 1:
time.sleep(1 - elapsed)
transport.close()
return True
if __name__ == '__main__':
for n in xrange(2,3):
for rps in xrange(1000, 10000, 1000):
# reset the server counts
client, transport = create_client()
transport.open()
client.reset()
#run some load tests
pool = Pool(processes=n)
ress = []
for _ in xrange(n):
ress.append(pool.apply_async(worker, (rps,)))
pool.close()
pool.join()
# get the server counts
cs = client.count()
transport.close()
if all(res.get() for res in ress):
print "** PASS: clients: %d, rps: %d **" % (n, rps)
print cs
else:
print '-- FAIL: clients: %d, rps: %d --' % (n, rps)
print cs
break
| [
"andrew.wong@pelotoncycle.com"
] | andrew.wong@pelotoncycle.com |
6fe66bf89f4b24d7c434891bd47d1178cf654a3e | bb09fcb6fed888b30fabef0d91f0f59c12a7c942 | /thehindu/thehindu/pipelines.py | 0bfed483076c96624ffc224bde82b49f7f5e6f0f | [] | no_license | AshwiniTokekar/Multi-document-summarization-of-news-articles-using-deep-learning | a50d64c635f9e4a4ed0e66ed6d5022b4554f72e2 | 80c6acdd79707e8feb7fa45ef10b1c1abd6d6493 | refs/heads/master | 2021-01-01T05:43:01.185698 | 2016-05-17T13:27:18 | 2016-05-17T13:27:18 | 58,912,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ThehinduPipeline(object):
def process_item(self, item, spider):
return item
| [
"tokekar.ashwini@gmail.com"
] | tokekar.ashwini@gmail.com |
2c0a169960a4ac870dcd1e184a8799168675e1b5 | f861dd55108ab727797d7f1b0004242e847b5349 | /app/config.py | 964a372c1389f1c1a6c2bce4b8f611d7520cd550 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Jorgee97/CovidTwilio | 7f41e8ba52be2923ff7e0de427e6a714e368a5c5 | bab3641da6037690b0d960c1ba7e5c5248daf3d9 | refs/heads/master | 2022-12-20T03:15:13.545933 | 2020-05-11T04:24:18 | 2020-05-11T04:24:18 | 253,131,340 | 3 | 3 | MIT | 2022-12-08T09:50:21 | 2020-04-05T01:18:32 | Python | UTF-8 | Python | false | false | 448 | py | import os
class Config():
DEBUG = False
TESTING = False
MONGODB_SETTINGS = {
'db': os.environ.get('MONGO_DB') or 'covid_19',
'host': os.environ.get('MONGO_HOST') or 'mongodb://localhost',
}
DATOS_GOV_KEY = os.environ.get('DATOS_GOV_KEY')
TWILIO_ML_API = os.environ.get('TWILIO_ML_API') or 'abcdefggjgdd'
class DevConfig(Config):
DEBUG = True
class ProdConfig(Config):
DEBUG = False | [
"ingjorgegomez@outlook.com"
] | ingjorgegomez@outlook.com |
26f14e295a03e3cd20d40a232caddf97471a11f2 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/VBF/Full2016DNN/cuts.py | cfe23e2e39ec86c60d657dd0498314605d02f098 | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 13,831 | py | # cuts
#cuts = {}
supercut = 'mll>12 \
&& std_vector_lepton_pt[0]>25 && std_vector_lepton_pt[1]>10 \
&& std_vector_lepton_pt[2]<10 \
&& metPfType1 > 20 \
&& ptll > 30 \
&& (std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
'
cuts['hww2l2v_13TeV_of2j_vbf_incl'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
&& (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
&& (mth>=60 && mth<125) \
&& (njet==2) \
&& (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
&& (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
&& (detajj>3.5 && mjj>=400) \
&& (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
&& '+bVeto+' \
'
cuts['hww2l2v_13TeV_of2j_vbf_incl_NOdetajj'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
&& (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
&& (mth>=60 && mth<125) \
&& (njet==2) \
&& (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
&& (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
&& (mjj>=400) \
&& (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
&& '+bVeto+' \
'
#cuts['hww2l2v_13TeV_of2j_vbf_DNNgt09'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar >= 0.9) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNlt09'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar < 0.9) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNgt08'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar >= 0.8) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNlt08'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar < 0.8) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNgt07'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar >= 0.7) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNlt07'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar < 0.7) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNgt06'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar >= 0.6) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNlt06'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar < 0.6) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#cuts['hww2l2v_13TeV_of2j_vbf_DNNgt03'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar >= 0.3) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_DNNlt03'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400) \
# && (DNNvar < 0.3) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#cuts['hww2l2v_13TeV_of2j_vbf_lowmjj'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# && (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=400 && mjj<700) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#cuts['hww2l2v_13TeV_of2j_vbf_highmjj'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
# && (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
# && (mth>=60 && mth<125) \
# && (njet==2) \
# & (abs((std_vector_lepton_eta[0] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (abs((std_vector_lepton_eta[1] - (jeteta1+jeteta2)/2)/detajj) < 0.5) \
# && (detajj>3.5 && mjj>=700) \
# && (std_vector_jet_pt[0]>30 && std_vector_jet_pt[1]>30) \
# && '+bVeto+' \
# '
#
#
# control regions
#
cuts['hww2l2v_13TeV_top_of2j_vbf'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
&& (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
&& mll>50 \
&& ( std_vector_jet_pt[0] >= 30 ) \
&& ( std_vector_jet_pt[1] >= 30 ) \
&& (njet==2) \
&& (detajj>3.5 && mjj>400) \
&& ( std_vector_jet_cmvav2[0]>-0.5884 || std_vector_jet_cmvav2[1]>-0.5884 ) \
'
cuts['hww2l2v_13TeV_dytt_of2j_vbf'] = '(std_vector_lepton_flavour[0] * std_vector_lepton_flavour[1] == -11*13) \
&& (abs(std_vector_lepton_flavour[1]) == 13 || std_vector_lepton_pt[1]>13) \
&& ( mth<60) \
&& mll>40 && mll<80 \
&& ( std_vector_jet_pt[0] >= 30 ) \
&& ( std_vector_jet_pt[1] >= 30 ) \
&& (njet==2) \
&& (detajj>3.5 && mjj>400) \
&& '+bVeto+' \
'
# 11 = e
# 13 = mu
# 15 = tau
| [
"lorenzo.viliani@cern.ch"
] | lorenzo.viliani@cern.ch |
1ee1d5c75d3aaf91e8da50f86f6ac5bb232151ba | 150ba1bd884b475ca251f54ecef6c7496b958815 | /boundingbox.py | 017949ac3651eca0006db7dd64a85cd85281df6d | [] | no_license | nvwlspls/SmallBar | d83aaa5bdebf018510b701b662cb82fa338fefab | b9b163fa733584e9291323e085923d7ca815ff85 | refs/heads/master | 2021-01-22T22:57:03.982976 | 2013-11-28T04:34:12 | 2013-11-28T04:34:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py |
import flickrapi
import xml.etree.ElementTree as ET
import psycopg2
#create a connection the localhost database
#conn = psycopg2.connect("dbname=postgres port=5423 user=wayne password=smallbar")
#create a cursor for the database connection
#cur = conn.cursor()
bboxlist = [[-122.523763, 37.696404, -122.331622, 37.831665]]
count = 0
flickr = flickrapi.FlickrAPI('c83487f5d94759be0bcbe9a480be02c8', format='etree')
smallboxes = []
for e in bboxlist:
count = count + 1
print count , "count"
currentbox = str(e).strip("[]")
#print str(currentbox + "current box")
#half the height of the box
h = (abs(e[3] - e[1])*.5)
#half the width of the box
w = (abs(e[0] - e[2])*.5)
search = flickr.photos_search(min_upload_date=2013-01-01,
bbox=currentbox,
accuracy=16,
extras='geo,date_taken,tags')
#print search
search.attrib['stat'] = 'ok'
total = search.find('photos').attrib['total']
#print total
nophotos = int(float(total))
print nophotos , "nophotos"
#this will check the number of photos returned is greater then 10000 and if it is it will
#draw a smaller box until the number of photos returned is less then 10000
if nophotos >10000:
print "Too many photos in box"
new_lat = e[1] + h
new_lon = e[0] + w
nb1 = [e[0] , new_lat , new_lon, e[3]]
bboxlist.append(nb1)
nb2 = [new_lon, new_lat, e[2], e[3]]
bboxlist.append(nb2)
nb3 = [e[0], e[1], new_lon, new_lat]
bboxlist.append(nb3)
nb4 = [new_lon, e[1], e[2], new_lat]
bboxlist.append(nb4)
bboxlist.remove(e)
print len(bboxlist) , "after bixbox"
else:
print "This box is small enough"
#print bboxlist
print len(bboxlist) , "before"
smallboxes.append(e)
bboxlist.remove(e)
print len(bboxlist) , "after"
print len(bboxlist) , "final"
#print bboxlist , "this here"
##print ('smallboxes' , smallboxes)
#print len(smallboxes)
#print smallboxes
#print get_small_boxes(bboxlist)
| [
"waynejessen@waynes-macbook.local"
] | waynejessen@waynes-macbook.local |
4cb9216fe42a1d68811c6513183c40488acaff47 | bb150497a05203a718fb3630941231be9e3b6a32 | /models_restruct/deepxde/tools/start.py | 7e9ecf8ec9ecf1528bb5f166d1ce332103f5b5aa | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 6,862 | py | """
start before model running
"""
import os
import sys
import json
import shutil
import urllib
import logging
import wget
logger = logging.getLogger("ce")
class DeepXDE_Start(object):
"""
自定义环境准备
"""
def __init__(self):
"""
init
"""
self.qa_yaml_name = os.environ["qa_yaml_name"]
self.rd_yaml_path = os.environ["rd_yaml_path"]
logger.info("###self.qa_yaml_name: {}".format(self.qa_yaml_name))
self.reponame = os.environ["reponame"]
self.system = os.environ["system"]
self.step = os.environ["step"]
logger.info("###self.step: {}".format(self.step))
self.paddle_whl = os.environ["paddle_whl"]
self.mode = os.environ["mode"] # function or precision
self.REPO_PATH = os.path.join(os.getcwd(), self.reponame)
self.env_dict = {}
self.model = self.qa_yaml_name.split("^")[-1]
logger.info("###self.model_name: {}".format(self.model))
self.env_dict["model"] = self.model
os.environ["model"] = self.model
def prepare_gpu_env(self):
"""
根据操作系统获取用gpu还是cpu
"""
if "cpu" in self.system or "mac" in self.system:
self.env_dict["set_cuda_flag"] = "cpu" # 根据操作系统判断
else:
self.env_dict["set_cuda_flag"] = "gpu" # 根据操作系统判断
return 0
def add_paddle_to_pythonpath(self):
"""
paddlescience 打包路径添加到python的路径中
"""
cwd = os.getcwd()
paddle_path = os.path.join(cwd, "deepxde")
old_pythonpath = os.environ.get("PYTHONPATH", "")
new_pythonpath = f"{paddle_path}:{old_pythonpath}"
os.environ["PYTHONPATH"] = new_pythonpath
os.environ["DDE_BACKEND"] = "paddle"
return 0
def alter(self, file, old_str, new_str, flag=True, except_str="model.train(0"):
"""
replaced the backend
"""
file_data = ""
with open(file, "r", encoding="utf-8") as f:
for line in f:
if flag:
if old_str in line and new_str not in line and except_str not in line:
line = line.replace(old_str, new_str)
else:
if old_str in line:
line = line.replace(old_str, new_str)
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
return 0
def add_seed(self, file, old_str, new_str):
"""
add the seed
"""
file_data = ""
with open(file, "r", encoding="utf-8") as f:
for line in f:
if old_str in line:
if old_str == "L-BFGS":
if " " not in line:
global flag_LBFGS
flag_LBFGS = True
line += new_str
else:
line += new_str
# line += "paddle.seed(1)\n"
# line += "np.random.seed(1)\n"
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
return 0
def change_backend(self, file, backend, flag):
"""
change models.py backend
"""
file_data = ""
if flag is True:
index = False
with open(file, "r", encoding="utf-8") as f:
for line in f:
if index is True:
if "# " in line and "Backend jax" not in line:
line = line.replace("# ", "")
else:
index = False
if backend in line:
index = True
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
else:
index = False
with open(file, "r", encoding="utf-8") as f:
for line in f:
if index is True:
if "Backend paddle" not in line:
line = "# " + line
else:
index = False
if backend in line:
index = True
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
return 0
def get_example_dir(self):
"""
get_example_dir
"""
example_dir = self.qa_yaml_name.replace("^", "/")
if "lulu" in example_dir:
example_dir = "deepxde" + example_dir[4:] + ".py"
elif "rd" in example_dir:
example_dir = "deepxde" + example_dir[2:] + ".py"
return example_dir
def get_deepxde_data(self):
"""
get_deepxde_data
"""
os.system("cp -r deepxde/examples/dataset/ ./")
return 0
def build_prepare(self):
"""
build prepare
"""
ret = 0
ret = self.prepare_gpu_env()
if ret:
logger.info("build prepare_gpu_env failed")
return ret
os.environ[self.reponame] = json.dumps(self.env_dict)
return ret
def download_datasets(self):
"""
download dataset
"""
url = "https://paddle-qa.bj.bcebos.com/deepxde/datasets.tar.gz"
file_name = "datasets.tar.gz"
urllib.request.urlretrieve(url, file_name)
os.system("tar -zxvf " + file_name + " -C deepxde/")
return 0
def run():
"""
执行入口
"""
model = DeepXDE_Start()
model.build_prepare()
model.add_paddle_to_pythonpath()
model.get_deepxde_data()
filedir = model.get_example_dir()
model.alter(filedir, "tf", "paddle")
model.change_backend(filedir, "Backend paddle", True)
model.change_backend(filedir, "Backend tensorflow.compat.v1", False)
model.alter(filedir, "model.train(", "model.train(display_every=1,", True, "model.train(0")
model.alter(filedir, "model.train(", "losshistory, train_state = model.train(")
model.alter(filedir, "display_every=1000,", " ", False)
model.alter(filedir, "display_every=1000", " ", False)
model.alter(filedir, "display_every=500", " ", False)
model.add_seed(filedir, "import deepxde", "import paddle\n")
# add_seed(filedir, "import paddle", "paddle.seed(1)\n")
model.add_seed(filedir, "import deepxde", "import numpy as np\n")
model.add_seed(filedir, "import deepxde", "dde.config.set_random_seed(1)\n")
if "antiderivative" in model.qa_yaml_name:
model.download_datasets()
return 0
if __name__ == "__main__":
run()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
2a3afbad100efcb1edda22e3475a09ff6d227fab | 7949f96ee7feeaa163608dbd256b0b76d1b89258 | /toontown/ai/DistributedPhaseEventMgr.py | 2a79a55ca8df3ac10529506eb7476344ed65df63 | [] | no_license | xxdecryptionxx/ToontownOnline | 414619744b4c40588f9a86c8e01cb951ffe53e2d | e6c20e6ce56f2320217f2ddde8f632a63848bd6b | refs/heads/master | 2021-01-11T03:08:59.934044 | 2018-07-27T01:26:21 | 2018-07-27T01:26:21 | 71,086,644 | 8 | 10 | null | 2018-06-01T00:13:34 | 2016-10-17T00:39:41 | Python | UTF-8 | Python | false | false | 1,096 | py | # File: t (Python 2.4)
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
import datetime
class DistributedPhaseEventMgr(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhaseEventMgr')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.holidayDates = []
def setIsRunning(self, isRunning):
self.isRunning = isRunning
def setNumPhases(self, numPhases):
self.numPhases = numPhases
def setCurPhase(self, curPhase):
self.curPhase = curPhase
def getIsRunning(self):
return self.isRunning
def getNumPhases(self):
return self.numPhases
def getCurPhase(self):
return self.curPhase
def setDates(self, holidayDates):
for holidayDate in holidayDates:
self.holidayDates.append(datetime.datetime(holidayDate[0], holidayDate[1], holidayDate[2], holidayDate[3], holidayDate[4], holidayDate[5]))
| [
"fr1tzanatore@aol.com"
] | fr1tzanatore@aol.com |
0f20585a844977b4362a9860a036f47b28823b97 | ecf1ce6f8b592f76c7b7c253608c1264ae0676a3 | /days/day017/list_comprehensions_and_generators.py | 78d9123b75c5cf83388f77dff5985392cf955d59 | [] | permissive | alex-vegan/100daysofcode-with-python-course | 94e99880a50ac412e398ad209ed53796f253641f | b6c12316abe18274b7963371b8f0ed2fd549ef07 | refs/heads/master | 2021-07-20T23:05:59.721661 | 2019-01-21T16:18:25 | 2019-01-21T16:18:25 | 150,115,516 | 0 | 0 | MIT | 2018-09-24T14:28:16 | 2018-09-24T14:28:15 | null | UTF-8 | Python | false | false | 1,006 | py | from random import sample
from itertools import islice
from pprint import pprint as pp
NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',
'julian sequeira', 'sandra bullock', 'keanu reeves',
'julbob pybites', 'bob belderbos', 'julian sequeira',
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
def convert_title_case_names(names=NAMES):
return [name.title() for name in names]
def reverse_first_last_names(names=NAMES):
return [" ".join(name.split()[::-1]) for name in names]
def gen_pairs(names=NAMES):
while True:
l, r = sample(names, 2)
yield f"{(l.split()[0]).title()} teams up with {(r.split()[0]).title()}"
'''
if __name__ == "__main__":
print(convert_title_case_names())
print('-'*101)
print(reverse_first_last_names())
print('-'*101)
pairs = gen_pairs()
for _ in range(10):
print(next(pairs))
print('-'*101)
pp(list(islice(pairs, 10)))
'''
| [
"alex-vegan@outlook.com"
] | alex-vegan@outlook.com |
f2cd62eafee08bf2d50c9b8038a29ff9f9fbc995 | 7127daafb38fb7bf9d79181af840ab414989c09d | /launcher.py | 3192ce218e689fc465ee1de98ca089a98217630e | [] | no_license | Nishi311/NEU_Capstone_2018_2019 | d5673db5ff7e529d02dba4c37572111054c779fc | efe5ae624182723e6d81ca0f5f2ec4354fdfb3d7 | refs/heads/master | 2020-04-11T19:31:48.497219 | 2019-04-08T03:31:53 | 2019-04-08T03:31:53 | 162,025,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from UI_code.ui import basic
if __name__=="__main__":
launcher = basic()
basic.run_module()
| [
"michaelnishida@gmail.com"
] | michaelnishida@gmail.com |
aa5b99b0d22fbdc44905c7fcd1f24fb1171ff13f | bb42e21d032315860ae79b6c90e741946f5be544 | /学员作业/李勇/atm_shopping/shopping/__init__.py | 8177868b0242dcebc49aef9ab04eba28f847f616 | [] | no_license | wuyongqiang2017/AllCode | 32a7a60ea5c24a681c1bdf9809c3091a1ff3f5fc | eee7d31a3ba2b29f6dec3a6d6c688d40cba7e513 | refs/heads/master | 2023-04-09T16:38:26.847963 | 2018-03-06T02:50:06 | 2018-03-06T02:50:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # _author_ : yong
# date : 2018/1/31
# _*_coding:utf-8_*_ | [
"454381958@qq.com"
] | 454381958@qq.com |
83ef4a6d7e5cdbfb45c05ea36208a409740e1e33 | 2280e309df300fe1d4cd684799b9aeeb3495c6cc | /core/inbound.py | c6ecbcc422d08468584f3ea64b30969da4f41629 | [] | no_license | cming091/psshutlle-toots | 471fe1a9505116b6d9571259e9de04b3d7404f98 | 1445c2efd024fe33743c09bac799ed9f4a3f15cb | refs/heads/master | 2023-05-30T12:34:39.364337 | 2021-06-21T07:23:23 | 2021-06-21T07:23:23 | 378,834,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,856 | py | import time
import os
from .base import Base
from config import CONF
from utils import *
from psgrpc import wholeInboundBcr
logger = LogHandler(__name__)
class Inbound(Base):
def __init__(self, data):
super(Inbound,self).__init__(data)
def singleProcess(self):
self.common.tearDownStockAndAssignTables(self.db,self.data, defaultdbs=['wes'])
if self.data['isSimulate']:
status = self.registerFrame()
if not status:
self.statusCode = 402
raise Exception('[{} registerFrame error]'.format(self.data['sName']))
else:
logger.info('[{} registerFrame succ]'.format(self.data['sName']))
time.sleep(CONF['delay'])
if self.addWorkOrder():
logger.info('[{} addWorkOrder succ]'.format(self.data['sName']))
time.sleep(CONF['delay'])
if self.creatAggs():
logger.info('[{} creatAggs succ]'.format(self.data['sName']))
time.sleep(CONF['delay'])
self.triggerBcr()
self.checkAggOpStatus()
self.checkBound()
else:
self.statusCode = 404
else:
self.statusCode = 403
return self.data['containerCode']
def registerFrame(self):
self.sqlRmStartNodePods()
data = {
"warehouseID": self.data['warehouseID'],
"frameID": self.data['frameID'],
"nodeID": self.data['nodeID'],
"dir": 1
}
url = '{}/tes/api/frame/registerFrame'.format(CONF['baseUrl'])
res = RequestApi.sendReuest('registerFrame', 'POST', url, data).json()
logger.info('[{} registerFrame: res:{}]'.format(self.data['sName'],res))
if res.get(self.data['returnCode'], None) == 0:
return True
return False
def init(self):
logger.info('[{} init ]'.format(self.data['sName']))
self.sqlRmAllPods()
def triggerBcr(self):
info = wholeInboundBcr(self.data['ip'],self.data['warehouseCode'],self.data['containerCode'],self.data['warehouseID'])
logger.info('[{} bcr res:{}]'.format(self.data['sName'],info))
def addWorkOrder(self):
url = '{}/invtransaction/api/workorder/inbound/add'.format(CONF['baseUrl'])
data ={
"woNo": self.data['no'],
"warehouseCode": self.data['warehouseCode'],
"regionCode": self.data['regionCode'],
"waveNo": self.data['no'],
"inBoundNo": self.data['no'],
"originStation": "PS-IN-001",
"priority": 0,
"transportUnit": self.data['containerCode'],
"containerCode": self.data['containerCode'],
"skuCode": self.data['skuCode'],
"skuName": self.data['skuName'],
"lot": "",
"grade": 0,
"quantity": self.data['quantity'],
"boxQuantity": 1,
"bizType": 1,
"transType": self.data['transType'],
"bizDate": 1594292882000,
"destination": "309843433806102535",
"rely_wo_no": "",
"extension": "",
"user": "user",
'palletModel':0,
}
res = RequestApi.sendReuest('addWorkOrder', 'POST', url, data, headers=self.headers).json()
logger.info('[{} addWorkOrder: res:{}]'.format(self.data['sName'],res))
if res.get(self.data['returnCode'],None) == 0:
return True
return False
def sqlRmStartNodePods(self):
sql = 'delete from tes.frame where status=1 and node=\'{}\';'.format(self.data['startNodeId'])
self.db.get_connection('tes')
res = self.db.execute('tes', sql)
logger.info('[{} sqlRmStartNodePods tes res:{}]'.format(self.data['sName'],res))
| [
"349152234@qq.com"
] | 349152234@qq.com |
dfcc94ca95ac67ecfd8bbd2cabc33a32cd9be481 | 29f6cc55010d2c7bd4b98a8531f683463307ff0e | /demo/settings.py | 123f6056a164bf82f0a306232b7f31187874ebc6 | [] | no_license | TDTX30002/test02 | 0f69199dcf41abf3321758eef60fd55c7fba06c1 | 45e9977c22b795989ed995191234b9f9b7934459 | refs/heads/master | 2020-06-13T03:17:28.131135 | 2019-06-30T12:43:38 | 2019-06-30T12:43:38 | 194,515,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,091 | py | """
Django settings for demo project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a*s%mod1aa2xxqrp!n&#vny%87r$-k07-i^jf&wj*z1sf6j=kr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"1239572177@qq.com"
] | 1239572177@qq.com |
bbd473478b8637d787d250a8a92e65e8fd0aea0b | 41d42b041ae3f73b495b7f60f6aae3a334b5dbc2 | /convert_cidr.py | c90802d5e6898da641219739eee040329d8d3b49 | [] | no_license | ramheinzelmann/convert_cidr | 2403dd2d99a9b75f05145817a72069087c9bce1f | c68204002d4b76858a59dce4bee79aa1bd78fe3f | refs/heads/master | 2022-08-22T09:47:55.074819 | 2020-05-26T01:24:38 | 2020-05-26T01:24:38 | 266,913,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | #!/usr/bin/python
# coding: utf8
"""
Autor: Renato Machado
Objetivo: Function to convert mask and cidr, returns dictionary with address, mask and cidr
Alterações:
"""
import socket
import struct
def convert_cidr(address):
network, net_bits = address.split('/')
host_bits = 32 - int(net_bits)
netmask = socket.inet_ntoa(struct.pack('!I', (1 << 32) - (1 << host_bits)))
mask = address.split('/')[0] + ' ' + netmask
cidr_mask = {
'address': address.split('/')[0],
'mask': str(mask).split(' ')[1],
'cidr': '/'+str(address.split('/')[1])
}
print(cidr_mask)
return cidr_mask
if __name__ == "__main__":
convert_cidr('172.15.15.0/24')
| [
"renato.machado@bb.com.br"
] | renato.machado@bb.com.br |
c839049b338a706a4952f3229522722fee738d6d | 167515d25c08cb81f675524ef79724b74cc5df09 | /nn_utils.py | 8324c2b84b2c6898967506948d55344a278ec768 | [] | no_license | LinHungShi/SpatialTransformerImplementation | d962b32b4a8e32640ae3809b47dcf599732766af | baff87bbbd228c36902db10f0b635ef3de2a6a90 | refs/heads/master | 2021-01-11T11:35:26.850908 | 2016-12-20T01:09:23 | 2016-12-20T01:09:23 | 76,910,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,905 | py | """
This code contains implementation of some basic components in neural network.
Based on examples from http://deeplearning.net/tutorial/
"""
import numpy as np
import theano
import timeit
import inspect
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.nnet import conv2d
from theano.tensor.signal import pool
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=np.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=np.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class MultiLayerPerceptron(object):
"""Multi-Layefr Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, act_function):
"""Initialize the parameters for the multilayer perceptron
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int or list of ints
:param n_hidden: number of hidden units. If a list, it specifies the
number of units in each hidden layers, and its length should equal to
n_hiddenLayers.
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
:type n_hiddenLayers: int
:param n_hiddenLayers: number of hidden layers
"""
# If n_hidden is a list (or tuple), check its length is equal to the
# number of hidden layers. If n_hidden is a scalar, we set up every
# hidden layers with same number of units.
if hasattr(n_hidden, '__iter__'):
assert(len(n_hidden) == n_hiddenLayers)
else:
n_hidden = (n_hidden,)*n_hiddenLayers
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function.
self.hiddenLayers = []
for i in xrange(n_hiddenLayers):
h_input = input if i == 0 else self.hiddenLayers[i-1].output
h_in = n_in if i == 0 else n_hidden[i-1]
self.hiddenLayers.append(
HiddenLayer(
rng=rng,
input=h_input,
n_in=h_in,
n_out=n_hidden[i],
activation=act_function
))
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayers[-1].output,
n_in=n_hidden[-1],
n_out=n_out
)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
sum([abs(x.W).sum() for x in self.hiddenLayers])
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
sum([(x.W ** 2).sum() for x in self.hiddenLayers])
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params
# keep track of model input
self.input = input
class DropoutHiddenLayer(object):
def __init__(self, rng, is_train, input, n_in, n_out, W=None, b=None,
activation=T.tanh, p=0.5):
"""
Hidden unit activation is given by: activation(dot(input,W) + b)
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type is_train: theano.iscalar
:param is_train: indicator pseudo-boolean (int) for switching between training and prediction
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
:type p: float or double
:param p: probability of NOT dropping out a unit
"""
self.input = input
if W is None:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
output = activation(lin_output)
def drop(input, p):
"""
:type input: np.array
:param input: layer or weight matrix on which dropout is applied
:type p: float or double between 0. and 1.
:param p: p probability of NOT dropping out a unit, therefore (1.-p) is the drop rate.
"""
rng = np.random.RandomState(1234)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
mask = srng.binomial(n=1, p=p, size=input.shape, dtype=theano.config.floatX)
return input * mask
# multiply output and drop -> in an approximation the scaling effects cancel out
train_output = drop(output,p)
#is_train is a pseudo boolean theano variable for switching between training and prediction
self.output = T.switch(T.neq(is_train, 0), train_output, p*output)
# parameters of the model
self.params = [self.W, self.b]
class ConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) //
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
input_shape=image_shape,
border_mode='full'
)
# pool each feature map individually, using maxpooling
pooled_out = pool.pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def train_nn(train_model, validate_model, test_model,
n_train_batches, n_valid_batches, n_test_batches, n_epochs,
verbose = True):
"""
Wrapper function for training and test THEANO model
:type train_model: Theano.function
:param train_model:
:type validate_model: Theano.function
:param validate_model:
:type test_model: Theano.function
:param test_model:
:type n_train_batches: int
:param n_train_batches: number of training batches
:type n_valid_batches: int
:param n_valid_batches: number of validation batches
:type n_test_batches: int
:param n_test_batches: number of testing batches
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type verbose: boolean
:param verbose: to print out epoch summary or not to
"""
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 10 # wait this much longer when a new best is
# found
improvement_threshold = 0.9995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = np.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter % 100 == 0) and verbose:
print('training @ iter = ', iter)
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
if verbose:
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in range(n_test_batches)
]
test_score = np.mean(test_losses)
if verbose:
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1,
n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
# Retrieve the name of function who invokes train_nn() (caller's name)
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
# Print out summary
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The training process for function ' +
calframe[1][3] +
' ran for %.2fm' % ((end_time - start_time) / 60.)))
| [
"noreply@github.com"
] | LinHungShi.noreply@github.com |
198bea5cdc4acb5ef96b2919621420a679239a16 | 2bb2dab311f602a70ff4084c718c73e766781ebb | /app/QueueA.py | bb9a75b7419053bf175c202c298c98ee3eabb861 | [] | no_license | dweinflash/CyVerseSEChallenge | aa580870aa38a2f11112786e940920627aa54a95 | 9c9311ba18bdbc6b00f2e7a8ebddada23eba3bca | refs/heads/main | 2022-12-29T14:32:20.025065 | 2020-10-19T05:07:35 | 2020-10-19T05:07:35 | 304,516,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,792 | py | import os
import sys
import json
import asyncio
import sqlite3
import requests
from flask import Flask
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers
app = Flask(__name__)
db_filename = 'quotes.db'
schema_filename = 'quotes_schema.sql'
website = 'https://friends-quotes-api.herokuapp.com/quotes/random'
async def run(loop):
nc = NATS()
await nc.connect("demo.nats.io:4222", loop=loop)
async def store(msg):
data = msg.data.decode()
if (data == "end"): return
data_json = json.loads(data)
character = data_json["character"]
quote = data_json["quote"]
with sqlite3.connect(db_filename) as conn:
conn.execute("INSERT INTO quote VALUES (?, ?)", (character, quote))
sid = await nc.subscribe("QueueA", cb=store)
quote = requests.get(website)
await nc.publish("QueueA", quote.content)
try:
await nc.request("QueueA", b'end')
except ErrTimeout:
await nc.unsubscribe(sid)
await nc.close()
def setup_db():
if (not os.path.exists(db_filename)):
with sqlite3.connect(db_filename) as conn:
with open(schema_filename, 'rt') as f:
schema = f.read()
conn.executescript(schema)
@app.route("/")
def start():
setup_db()
loop = asyncio.new_event_loop()
loop.run_until_complete(run(loop))
loop.close()
res = ""
with sqlite3.connect(db_filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM quote")
for row in cursor.fetchall():
character, quote = row
res += character + ": " + quote + "\n"
return res
if __name__ == '__main__':
app.run(host='0.0.0.0')
| [
"dweinflash@vm142-28.cyverse.org"
] | dweinflash@vm142-28.cyverse.org |
87a388c789a66347a637a92ea746b8e5cf978678 | fe3082ebc284c10de3cc59656110277da2a5f5d8 | /DynamicKey/ARDynamicKey/python/test/RtcTokenBuilderTest.py | 9f0c3ca14e0a1d0e52dc73ebf7a1bfc063778eee | [] | no_license | efarsoft/Tools | 2dfd771ca0b86d1d3ca96cd77dd4998ea5bdcbbc | 9d097454ebff78c16f2ea73a3bc35449f1db4a87 | refs/heads/master | 2023-06-24T01:52:37.035396 | 2021-07-28T09:01:24 | 2021-07-28T09:01:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | import sys
import unittest
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../src'))
from RtcTokenBuilder import *
from AccessToken import *
appID = "970CA35de60c44645bbae8a215061b33"
appCertificate = "5CFd2fd1755d40ecb72977518be15d3b"
channelName = "7d72365eb983485397e3e3f9d460bdda"
uid = 2882341273
userAccount = "2082341273";
expireTimestamp = 1446455471
salt = 1
ts = 1111111
class RtcTokenBuilderTest(unittest.TestCase):
def test_(self):
token = RtcTokenBuilder.buildTokenWithUid(appID, appCertificate, channelName, uid, Role_Subscriber, expireTimestamp)
parser = AccessToken()
parser.fromString(token)
self.assertEqual(parser.messages[kJoinChannel], expireTimestamp)
self.assertNotIn(kPublishVideoStream, parser.messages)
self.assertNotIn(kPublishAudioStream, parser.messages)
self.assertNotIn(kPublishDataStream, parser.messages)
if __name__ == "__main__":
unittest.main()
| [
"skyline@skylinedeMBP.lan"
] | skyline@skylinedeMBP.lan |
d220be12fc40bef7288109544ab3927eb1d6b23d | b8cbd18f5d9aa7fccd1330be485dbb8704a06514 | /cifar10.py | a14ae4f647bd17714c69accbec54750ccf1da9d0 | [] | no_license | mercileesb/denception | 29a11197ca392cca0c55ec1255ee76bd19828b55 | ed713a29d0b61313c243eb28b2396525254c36c4 | refs/heads/master | 2021-04-28T09:08:18.030217 | 2018-02-27T01:02:35 | 2018-02-27T01:02:35 | 122,032,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,838 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR-10 data set.
See http://www.cs.toronto.edu/~kriz/cifar.html.
"""
import os
import tensorflow as tf
HEIGHT = 32
WIDTH = 32
DEPTH = 3
class Cifar10DataSet(object):
"""Cifar10 data set.
Described by http://www.cs.toronto.edu/~kriz/cifar.html.
"""
def __init__(self, data_dir, subset='train', use_distortion=True):
self.data_dir = data_dir
self.subset = subset
self.use_distortion = use_distortion
def get_filenames(self):
if self.subset in ['train', 'validation', 'eval']:
return [os.path.join(self.data_dir, self.subset + '.tfrecords')]
else:
raise ValueError('Invalid data subset "%s"' % self.subset)
def parser(self, serialized_example):
"""Parses a single tf.Example into image and label tensors."""
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features['image'], tf.uint8)
image.set_shape([DEPTH * HEIGHT * WIDTH])
# Reshape from [depth * height * width] to [depth, height, width].
image = tf.cast(
tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),
tf.float32)
label = tf.cast(features['label'], tf.int32)
# Custom preprocessing.
image = self.preprocess(image)
return image, label
def make_batch(self, batch_size):
"""Read the images and labels from 'filenames'."""
filenames = self.get_filenames()
# Repeat infinitely.
dataset = tf.contrib.data.TFRecordDataset(filenames).repeat()
# Parse records.
dataset = dataset.map(
self.parser, num_threads=batch_size, output_buffer_size=2 * batch_size)
# Potentially shuffle records.
if self.subset == 'train':
min_queue_examples = int(
Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)
# Ensure that the capacity is sufficiently large to provide good random
# shuffling.
dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)
# Batch it up.
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
image_batch, label_batch = iterator.get_next()
return image_batch, label_batch
def preprocess(self, image):
"""Preprocess a single image in [height, width, depth] layout."""
if self.subset == 'train' and self.use_distortion:
# Pad 4 pixels on each dimension of feature map, done in mini-batch
image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)
image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])
image = tf.image.random_flip_left_right(image)
return image
@staticmethod
def num_examples_per_epoch(subset='train'):
if subset == 'train':
return 45000
elif subset == 'validation':
return 5000
elif subset == 'eval':
return 10000
else:
raise ValueError('Invalid data subset "%s"' % subset) | [
"mercileesb@sogang.ac.kr"
] | mercileesb@sogang.ac.kr |
c0b6d69fb6e5380d90a59d03f0d2f3e57c3a202d | 6aed6349435840b0b5cae7d7699cfd9b65481922 | /blog/migrations/0008_auto_20200906_1651.py | 0c101b811af84ceac8d087010e04c70db185172b | [] | no_license | avraam1616/my-first-blog | 5ff468787d15e9d07c289b0e6918bbed0fa68278 | b1aca3e61d829e1bec26fc32b5804544b5589e90 | refs/heads/master | 2022-12-26T08:01:55.955463 | 2020-10-03T01:06:06 | 2020-10-03T01:06:06 | 295,801,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # Generated by Django 3.1.1 on 2020-09-06 13:51
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_post_created_date'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"avraam1616@gmail.com"
] | avraam1616@gmail.com |
a507b4353433e1560721c1553b6ea8ce23dca5c9 | 305cc8ff665d07cf328509f1055769ed50c5bc22 | /src/blog/views.py | 387087cb6023419dc0cd13abfd2c6d7620a1cdcf | [] | no_license | AliSulieman/try_django | 4cc287763717cbb62a60fdc35f3b973ba0c25455 | e6d14662e4495586c2478ecbe03f1c3208d040d5 | refs/heads/master | 2022-12-29T12:24:03.862104 | 2020-10-17T03:05:36 | 2020-10-17T03:05:36 | 299,778,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | from django.shortcuts import render, get_object_or_404
from .models import BlogPost
from django.http import Http404
# Create your views here.
def blog_post_list_view(request):
qs = BlogPost.objects.all()
template_name = 'blog/list.html'
context = {"object_list": qs}
return render(request, template_name, context)
def blog_post_create_view(request):
template_name = 'blog/create.html'
context = {"form": ''}
return render(request, template_name, context)
def blog_post_detail_view(request, slug):
obj = get_object_or_404(BlogPost, slug=slug)
template_name = 'blog/detail.html'
context = {"object": obj}
return render(request, template_name, context)
def blog_post_update_view(request):
obj = get_object_or_404(BlogPost, slug=slug)
template_name = 'blog/update.html'
context = {"object": obj, 'form': None}
return render(request, template_name, context)
def blog_post_delete_view(request):
obj = get_object_or_404(BlogPost, slug=slug)
template_name = 'blog/delete.html'
context = {"object": obj}
return render(request, template_name, context)
| [
"ahsuliem@uno.edu"
] | ahsuliem@uno.edu |
dea88af914d4f23e13cccc093c8efbc623c611f6 | 5fdefe5dbe311ebd27ef222c24556f0ae8ae998c | /trainer.py | 451659962c9e56374770e9c3f232ff9e138eb8ed | [] | no_license | shubham-dayma/face-recognition | 85728b71535c26b241d8ee9faef041ab14c197ac | 87163b7f3d4c9a515348aa5e3bfa6be908165686 | refs/heads/main | 2023-02-21T12:44:40.069856 | 2021-01-28T09:44:32 | 2021-01-28T09:44:32 | 333,711,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | import cv2
import numpy as np
from PIL import Image
import os
# Path for face image database
path = 'dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml");
# function to get the images and label data
def getImagesAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # grayscale
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))
# Save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml')
# Print the numer of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids)))) | [
"35252025+shubham-dayma@users.noreply.github.com"
] | 35252025+shubham-dayma@users.noreply.github.com |
dcc7adaa49fada352d2eb346c9e51df6ed8c9dd4 | 0a5c468cee07b79ddb5368aa7b0fe118f4b11e72 | /lazy_slides/download.py | 72beca49e25d6ab0de60cb57f0674c4ab1b133c7 | [] | no_license | abingham/lazy_slides | c36e451571c14e53cbc2817d4f72475fa5c400ba | ca8eb4618415df6eaa9fb3c3f721cb168708f52b | refs/heads/master | 2020-05-19T16:34:20.286129 | 2013-06-18T17:58:05 | 2013-06-18T17:58:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | import contextlib
import logging
import os
import urllib2
import urlparse
import uuid
log = logging.getLogger(__name__)
def download(url, directory):
'''Download a file specified by a URL to a local file.
This generates a unique name for the downloaded file and saves
into that.
:param url: The URL to download.
:param directory: The directory into which to save the file.
'''
parsed = urlparse.urlparse(url)
# Calculate the save-file name
filename = os.path.split(parsed.path)[1]
filename_comps = os.path.splitext(filename)
filename = '{}_{}{}'.format(
filename_comps[0],
uuid.uuid4(),
filename_comps[1])
filename = os.path.join(directory, filename)
log.info('Downloading {} to {}'.format(
url, filename))
# Save the URL data to the new filename.
with contextlib.closing(urllib2.urlopen(url)) as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read())
return filename
| [
"austin.bingham@gmail.com"
] | austin.bingham@gmail.com |
5b13c731015934ca38a850cd358dbf8fee89149f | a545e2e3b4cbd2b274d7c51f41f8eae8b23844aa | /venv/Lib/site-packages/pikepdf/objects.py | 84631b5cdf62dd40271694feb5a6b1d5552c0dee | [] | no_license | Sandeep-2511/Imgtopdf | 795268419bb27fe820c42f60451536009eb89371 | 1293286aade760f4d3a74ccdc9a63bfebb94ae97 | refs/heads/master | 2023-02-11T03:29:21.923148 | 2021-01-19T11:40:13 | 2021-01-19T11:40:13 | 330,951,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,180 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2017, James R. Barlow (https://github.com/jbarlow83/)
"""Provide classes to stand in for PDF objects
The purpose of these is to provide nice-looking classes to allow explicit
construction of PDF objects and more pythonic idioms and facilitate discovery
by documentation generators and linters.
It's also a place to narrow the scope of input types to those more easily
converted to C++.
There is some deliberate "smoke and mirrors" here: all of the objects are truly
instances of ``pikepdf.Object``, which is a variant container object. The
``__new__`` constructs a ``pikepdf.Object`` in each case, and the rest of the
class definition is present as an aide for code introspection.
"""
# pylint: disable=unused-import, abstract-method
from secrets import token_urlsafe
from typing import TYPE_CHECKING, Iterable, Optional, Union
from warnings import warn
from . import _qpdf
from ._qpdf import Object, ObjectType
if TYPE_CHECKING:
from pikepdf import Pdf
# By default pikepdf.Object will identify itself as pikepdf._qpdf.Object
# Here we change the module to discourage people from using that internal name
# Instead it will become pikepdf.objects.Object
Object.__module__ = __name__
ObjectType.__module__ = __name__
# type(Object) is the metaclass that pybind11 defines; we wish to extend that
# pylint cannot see the C++ metaclass definition is thoroughly confused.
# pylint: disable=invalid-metaclass
class _ObjectMeta(type(Object)): # type: ignore
"""Supports instance checking"""
def __instancecheck__(self, instance):
# Note: since this class is a metaclass, self is a class object
if type(instance) != Object:
return False
return self.object_type == instance._type_code
class _NameObjectMeta(_ObjectMeta):
"""Supports usage pikepdf.Name.Whatever -> Name('/Whatever')"""
def __getattr__(self, attr):
if attr.startswith('_'):
return _ObjectMeta.__getattr__(attr)
return Name('/' + attr)
def __setattr__(self, attr, value):
if attr.startswith('_'):
return _ObjectMeta.__setattr__(attr, value)
raise TypeError("Attributes may not be set on pikepdf.Name")
def __getitem__(self, item):
if item.startswith('/'):
item = item[1:]
raise TypeError(
"pikepdf.Name is not subscriptable. You probably meant:\n"
f" pikepdf.Name.{item}\n"
"or\n"
f" pikepdf.Name('/{item}')\n"
)
class Name(Object, metaclass=_NameObjectMeta):
"""Constructs a PDF Name object
Names can be constructed with two notations:
1. ``Name.Resources``
2. ``Name('/Resources')``
The two are semantically equivalent. The former is preferred for names
that are normally expected to be in a PDF. The latter is preferred for
dynamic names and attributes.
"""
object_type = ObjectType.name
def __new__(cls, name: str):
# QPDF_Name::unparse ensures that names are always saved in a UTF-8
# compatible way, so we only need to guard the input.
if isinstance(name, bytes):
raise TypeError("Name should be str")
return _qpdf._new_name(name)
@classmethod
def random(cls, len_: int = 16, prefix: str = ''):
"""Generate a cryptographically strong random, valid PDF Name.
When the length paramater is 16 (16 random bytes or 128 bits), the result
is probably globally unique and can be treated as never colliding with
other names.
"""
# It so happens that urlsafe names are also safe names for PDF Name objects
return _qpdf._new_name(f"/{prefix}{token_urlsafe(len_)}")
class Operator(Object, metaclass=_ObjectMeta):
"""Constructs an operator for use in a content stream.
An Operator is one of a limited set of commands that can appear in PDF content
streams (roughly the mini-language that draws objects, lines and text on a
virtual PDF canvas). The commands :func:`parse_content_stream` and
:func:`unparse_content_stream` create and expect Operators respectively, along
with their operands.
pikepdf uses the special Operator "INLINE IMAGE" to denote an inline image
in a content stream.
"""
object_type = ObjectType.operator
def __new__(cls, name: str):
return _qpdf._new_operator(name)
class String(Object, metaclass=_ObjectMeta):
"""Constructs a PDF String object"""
object_type = ObjectType.string
def __new__(cls, s: Union[str, bytes]):
"""
Args:
s: The string to use. String will be encoded for
PDF, bytes will be constructed without encoding.
Returns:
pikepdf.Object
"""
if isinstance(s, bytes):
return _qpdf._new_string(s)
return _qpdf._new_string_utf8(s)
class Array(Object, metaclass=_ObjectMeta):
"""Constructs a PDF Array object"""
object_type = ObjectType.array
def __new__(cls, a: Optional[Iterable] = None):
"""
Args:
a: An iterable of objects. All objects must be either
`pikepdf.Object` or convertible to `pikepdf.Object`.
Returns:
pikepdf.Object
"""
if isinstance(a, (str, bytes)):
raise TypeError('Strings cannot be converted to arrays of chars')
if a is None:
a = []
return _qpdf._new_array(a)
class Dictionary(Object, metaclass=_ObjectMeta):
"""Constructs a PDF Dictionary object"""
object_type = ObjectType.dictionary
def __new__(cls, d=None, **kwargs):
"""
Constructs a PDF Dictionary from either a Python ``dict`` or keyword
arguments.
These two examples are equivalent:
.. code-block:: python
pikepdf.Dictionary({'/NameOne': 1, '/NameTwo': 'Two'})
pikepdf.Dictionary(NameOne=1, NameTwo='Two')
In either case, the keys must be strings, and the strings
correspond to the desired Names in the PDF Dictionary. The values
must all be convertible to `pikepdf.Object`.
Returns:
pikepdf.Object
"""
if kwargs and d is not None:
raise ValueError('Unsupported parameters')
if kwargs:
# Add leading slash
# Allows Dictionary(MediaBox=(0,0,1,1), Type=Name('/Page')...
return _qpdf._new_dictionary({('/' + k): v for k, v in kwargs.items()})
if not d:
d = {}
if d and any(not key.startswith('/') for key in d.keys()):
raise ValueError("Dictionary created from strings must begin with '/'")
return _qpdf._new_dictionary(d)
class Stream(Object, metaclass=_ObjectMeta):
"""Constructs a PDF Stream object"""
object_type = ObjectType.stream
def __new__(cls, owner: 'Pdf', data: bytes = None, d=None, **kwargs):
"""
Args:
owner: The Pdf to which this stream shall be attached.
obj: The data bytes for the stream.
d: A mapping object that will be used to construct a ``Dictionary``.
kwargs: Keyword arguments that will define the dictionary. Do not set
/Filter or /Length here as pikepdf will manage these.
Returns:
pikepdf.Object
"""
# Support __new__(...obj=bytes) which should have been data=bytes,
# drop in pikepdf 3
if 'obj' in kwargs:
warn("Deprecated parameter 'obj', use 'data' instead", DeprecationWarning)
if data is None:
data = kwargs['obj']
del kwargs['obj']
if data is None:
raise TypeError("Must make Stream from binary data")
stream = _qpdf._new_stream(owner, data)
if d or kwargs:
stream_dict = Dictionary(d, **kwargs)
stream.stream_dict = stream_dict
return stream
| [
"potdukhesandeep7gmail.com"
] | potdukhesandeep7gmail.com |
8a7a8ab8b8ab21844bdccbab9416c8b795323f58 | 904690ecc39d0249ebb6f480f7e11e71b4916d91 | /whu_gs/com/crawler/test/TestConfig.py | a7636be418f47758be4da4a9728d1f03784277d8 | [] | no_license | Zhangrui19951107/whu_gs | d1b5d8441b9f3caedf81081f2ed954178b4b00df | 36eb34de4eaa16aef5cff0dc5b17922587a6c381 | refs/heads/master | 2021-06-26T17:14:55.386309 | 2017-09-16T09:24:00 | 2017-09-16T09:24:00 | 103,739,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | #coding=UTF-8
'''
Created on 2015年7月20日
@author: Administrator
'''
import ConfigParser
conf = ConfigParser.ConfigParser()
conf.read("../../../config/db.cfg")
print conf.get("mysql", "ip")
| [
"1358877565@qq.com"
] | 1358877565@qq.com |
80cd8baa4841a770e7eb7696c77d6f7a99d12ad2 | 23130cd12e38dbce8db8102810edaad70b240ae2 | /lintcode/235.py | e2c5a50114f99694f5bfed245e493ea6148b0de9 | [
"MIT"
] | permissive | kangli-bionic/algorithm | ee6687c82101088db20f10fb958b4e45e97d3d31 | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | refs/heads/master | 2023-01-05T09:29:33.204253 | 2020-10-25T17:29:38 | 2020-10-25T17:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | """
235. Prime Factorization
https://www.lintcode.com/problem/prime-factorization/
"""
import math
class Solution:
"""
@param num: An integer
@return: an integer array
"""
def primeFactorization(self, num):
result = []
up = math.sqrt(num)
k = 2
while k <= up and num > 1:
while num % k == 0:
result.append(k)
num //= k
k += 1
if num > 1:
result.append(num)
return result
| [
"hipaulshi@gmail.com"
] | hipaulshi@gmail.com |
8879d084898863cce23dedb47389a370ebb7adcf | 11a1e1140fe869e83e337518ca99162cca8780dd | /BHScripts_8TeV_postICHEP_Final_WithRun2012C_NewFitRange/histograms/DataAnalysis_FitRanges/Styles.py | ec5775c7f79b34278912b0d67309b9cfba720b4c | [] | no_license | jhakala/BHMacros | 6bdd1ac855df8a803f39f06e7e218b24b2eb76b1 | bc3cf2e3c1d3570a9e042c865214035e60d20021 | refs/heads/master | 2021-01-19T04:52:27.624800 | 2015-04-09T12:14:21 | 2015-04-09T12:14:21 | 33,666,386 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,718 | py | pattle = [862, 814, 797, 899, 614, 921]
marker = [20, 21, 22, 25, 24, 26]
from ROOT import gStyle
from ROOT import gROOT
from ROOT import TStyle
gStyle.SetPadTopMargin(0.05)
gStyle.SetPadRightMargin(0.05)
def formatST(h):
h.SetMarkerStyle(20)
h.SetMarkerColor(923)
h.SetLineColor(923)
h.SetXTitle("S_{T} (GeV)")
h.SetYTitle("Events / %d GeV" % h.GetBinWidth(1))
h.GetYaxis().SetTitleOffset(1.2)
def formatTemplate(f, N, iformula):
f.SetLineWidth(2)
f.SetLineColor(pattle[iformula])
if N == 2:
f.SetLineStyle(1)
elif N == 3:
f.SetLineStyle(2)
def formatUncertainty(g):
g.SetLineWidth(2)
g.SetFillColor(862)
#g.SetLineColor(33)
g.SetLineColor(862)
g.SetFillColor(33)
#g.SetFillStyle()
g.GetXaxis().SetTitle("S_{T} (GeV)")
g.GetYaxis().SetTitle("Events / 100 GeV")
g.GetYaxis().SetTitleOffset(1.2)
def formatCL(g, type, width=4):
g.SetLineWidth(width)
g.GetXaxis().SetTitle("S_{T}^{ min} (GeV)")
g.GetXaxis().SetNdivisions(5,5,0)
g.GetYaxis().SetTitle("#sigma(S_{T} > S_{T}^{ min}) #times A (pb)")
g.GetYaxis().SetTitleOffset(1.2)
if type == "CL95":
g.SetLineColor(862)
g.SetFillColor(862)
elif type == "CLA":
g.SetLineColor(899)
g.SetFillColor(899)
g.SetLineStyle(2)
def formatXsecCL(g, icolor, line_style=1):
g.SetLineWidth(2)
g.SetLineColor(pattle[icolor])
g.SetLineStyle(line_style)
g.SetMarkerColor(pattle[icolor])
g.SetMarkerSize(1)
g.GetXaxis().SetTitle("M_{BH}^{ min} (TeV)")
g.GetYaxis().SetTitle("#sigma (pb)")
g.GetYaxis().SetTitleOffset(1.2)
def formatExcludedMass(g, name = ""):
g.GetXaxis().SetTitle("M_{D} (TeV)")
g.GetYaxis().SetTitle("Excluded M_{BH}^{ min} (TeV)")
g.GetYaxis().SetTitleOffset(1.2)
if not name == "":
g.SetLineWidth(3)
g.SetMarkerSize(1)
if "BH1_BM" in name or "BH4_CH" in name:
color = 922
marker_style = 20
line_style = 1
if "BH2_BM" in name or "BH2_CH" in name:
color = 862
marker_style = 21
line_style = 2
if "BH8_CH" in name:
color = 899
marker_style = 22
line_style = 3
if "BH6_CH" in name or "BH5_BM" in name:
color = 797
marker_style = 20
line_style = 1
if "BH10_CH" in name:
color = 2
marker_style = 23
line_style = 2
if "BH9_CH" in name:
color = 4
marker_style = 24
line_style = 3
g.SetLineColor(color)
g.SetLineStyle(line_style)
g.SetMarkerStyle(marker_style)
g.SetMarkerSize(1)
g.SetMarkerColor(color)
def formatRatio(h, icolor):
h.SetMarkerColor(pattle[icolor])
#h.SetMarkerStyle(marker[icolor])
h.SetLineColor(pattle[icolor])
| [
"john_hakala@brown.edu"
] | john_hakala@brown.edu |
8d5b4db0f71ad96b3f5cb5beb8dbdf1c2d0cad92 | 584219e41b026a112e98ebe8d35537e85f2408dd | /fsaverage_LR32k/162733.32k_fs_LR.wb.spec | 4e7e082163e97568b9fd7a9f8bbf846faf023ed1 | [] | no_license | yarikoptic/demo1 | 01b1e3b186a1f7b7a099f3ede1425f6e351bedd7 | 5df98f632429e98c47d147995fdd23e0e57125be | refs/heads/master | 2020-12-30T02:40:01.120571 | 2020-02-06T19:00:49 | 2020-02-06T19:00:49 | 238,762,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | spec | ../.git/annex/objects/ZZ/Zv/MD5E-s4228--58fc222f97ea9f00594d842b611b8fee.wb.spec/MD5E-s4228--58fc222f97ea9f00594d842b611b8fee.wb.spec | [
"t.kadelka@fz-juelich.de"
] | t.kadelka@fz-juelich.de |
402c3791e9c3d032e7fe7bbfbe6d0bddc4cc5638 | 3dd456cec49dfcd7c838c4dda8526fd1b43819ad | /assignment1/cs231n/classifiers/softmax.py | 0a0f8df4bcf81d0f7a425c1114169fe33670ce6c | [] | no_license | kongmo/cs231n_2016_winter | 8adbed6194a423f15b69ff36b950bc4ed71ae213 | 3790011a997423f67e1a0da4d2e20a327c69dd0a | refs/heads/master | 2021-01-09T23:42:23.473584 | 2016-07-01T13:04:22 | 2016-07-01T13:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,576 | py | import numpy as np
import numpy.matlib
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
num_classes = W.shape[1]
num_train = X.shape[0]
dWp = np.zeros((num_train, num_classes))
for i in xrange(num_train):
scores = X[i].dot(W)
scores = scores - scores.max()
scores = np.exp(scores)
p = scores / scores.sum()
groundTruth = np.zeros(p.shape)
for j in xrange(num_classes):
if j == y[i]:
loss += -np.log(p[j])
groundTruth[j] = 1
# After for j finishes
dWp[i,:] = groundTruth - p
# After for finishes
loss /= num_train
loss += 0.5 * reg * np.sum(W * W)
dW = (-1) * X.T.dot(dWp)
dW /= num_train
dW += reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
Num_train = X.shape[0]
Num_classes = W.shape[1]
scores = X.dot(W)
scores = scores.T
scores = scores - scores.max(axis=0)
scores = np.exp(scores)
p = scores/scores.sum(axis=0)
loss = np.sum(-np.log(p[y, range(Num_train)]))
loss /= Num_train
loss += 0.5*reg*np.sum(W**2)
groundTruth = np.zeros((Num_classes, Num_train))
groundTruth[y, range(Num_train)] = 1
dW = (-1) * (groundTruth - p).dot(X).T
dW /= Num_train
dW += reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
| [
"513485189@qq.com"
] | 513485189@qq.com |
c2a917da7465195d2886f59c0fdae4377191eb40 | 7becb20530c3da3051128e87b984fd234656fd12 | /Coordinate Converters/xyzToOthers.py | f8b9c1a531d03d00a023d20637c9a06945a7e267 | [
"MIT"
] | permissive | Mehmet-Emre-Dogan/calculationToolbox | 1e4ded2994044203021d7caa84aee47a467256af | 1af4515b0cbb4e31e8bb608ac7a84e74221c83ac | refs/heads/main | 2023-06-13T08:10:04.840711 | 2021-07-12T00:22:22 | 2021-07-12T00:22:22 | 384,993,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | from math import sqrt, atan2, degrees
import ctypes
from os import read
ctypes.windll.kernel32.SetConsoleTitleW("xyz input")
while True:
try:
print("#"*70)
with open("precision.txt", "r", encoding="utf-8") as fil:
precision = int(fil.read())
print("Please enter Cartesian coordinates. Press CTRL^C to exit.")
print(f"Precision is set to {precision}. You can change it by editing 'precision.txt'\n")
x = float(input("x (meters): "))
y = float(input("y (meters): "))
z = float(input("z (meters): "))
print("\n" + " Cylindrical ".center(70, "="))
rCyl = sqrt(x**2 + y**2)
phiCyl = atan2(y, x)
print(f"--> r: {round(rCyl, precision)} meters")
print(f"--> ϕ: {round(degrees(phiCyl), precision)}° --> ϕ: {round(phiCyl, precision)} radians")
print(f"--> z: {round(z, precision)} meters")
print("\n" + " Spherical ".center(70, "="))
rSph = sqrt(x**2 + y**2 + z**2)
thetaSph = atan2(sqrt(x**2 + y**2), z)
phiSph = atan2(y, x)
print(f"--> R: {round(rSph, precision)} meters")
print(f"--> θ: {( str(round(degrees(thetaSph), precision)) + '°').ljust(5 + precision)} --> θ: {str(round(thetaSph, precision)).ljust(4 + precision)} radians")
print(f"--> ϕ: {( str(round(degrees(phiSph), precision)) + '°').ljust(5 + precision)} --> ϕ: {str(round(phiSph, precision)).ljust(4 + precision)} radians")
print(" ")
except KeyboardInterrupt:
break
except Exception as ex:
print(ex)
| [
"noreply@github.com"
] | Mehmet-Emre-Dogan.noreply@github.com |
8e3e24dbaf8f060baabdb27e369e694fda8859dc | 7dc9622dd18814420c902b24c00905426fc9fdde | /codesignal/twosigma/optimalstockbasket.py | 288bd2eb113d4344471d9a554bf38c0fb79d6f90 | [] | no_license | aaronbae/leetcode | 8ad620dd12998d7a750173752f348ca6ac7819d4 | 4cdcb4f31f2b7c02e43a0757e04a9709cc6ede5d | refs/heads/master | 2022-12-18T22:34:59.071961 | 2020-09-18T23:46:55 | 2020-09-18T23:46:55 | 276,296,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | def optimalStockBasket(stocks, riskBudget):
N = len(stocks)
dp = [[0 for _ in range(riskBudget + 1)] for _ in range(N+1)]
for i in range(N+1):
if i > 0:
curr_val, curr_risk = stocks[i-1]
for total_budget in range(riskBudget + 1):
dont = dp[i - 1][total_budget]
do = 0
if total_budget >= curr_risk:
do = curr_val + dp[i - 1][total_budget - curr_risk]
dp[i][total_budget] = max(dont, do)
print(dp[i])
return dp[N][riskBudget]
a = [[-1, 2], [10, 15], [11, 13], [9, 10]]
b = 30
print(optimalStockBasket(a,b)) | [
"aaron@aaronbae.com"
] | aaron@aaronbae.com |
3b04726b71cab5d88aeae61b12601bafd180287f | bdc08f392ff275ccd76e7869ca6e79c527a4bf27 | /Parte1/UnitTest/text_analyzer1.py | c7c3da0359e3e8dc7db05fc17f9558de515ecb87 | [] | no_license | Wolfant/EspePython | 2f622c06e51d0c85f844a1437cc6ba79e916a96a | d2f6cab5993e0f83a95664d136cc348774a79965 | refs/heads/master | 2020-04-07T00:12:43.626498 | 2018-11-16T19:43:15 | 2018-11-16T19:43:15 | 157,894,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import unittest
class TextAnalysisTests(unittest.TestCase):
"""Test para la funcion ``analyze_text()`` """
# Toda funcion que inicia con test_ se ejecuta
def test_funciont_runs(self):
"""Basic smoke test: ejecuta la funcion. """
analyze_text()
if __name__ == '__main__':
unittest.main()
| [
"antonio@insuasti.ec"
] | antonio@insuasti.ec |
eabf8c7433d5935d351c7bf6ec3ca5eec9a6e73c | d2205d4031ddf39ae42cba4adfd544d9a77f6b48 | /pesa_exchange/wallet/views.py | 3ceb199cebbdbc275035305f47983d77353bb306 | [] | no_license | jimmyaduvagah/pesa-exchange | c1f1a218352ce3a51accdbe2f8a6e0d7a55a80d7 | b5b1941988236890a7cb05e6612295afa2df39e0 | refs/heads/main | 2023-06-03T07:46:39.664323 | 2021-06-23T17:42:21 | 2021-06-23T17:42:21 | 378,160,993 | 0 | 1 | null | 2021-06-23T17:42:22 | 2021-06-18T13:35:22 | Python | UTF-8 | Python | false | false | 4,981 | py | from django import db
from rest_framework import viewsets
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from pesa_exchange.wallet.serializer import (WalletSerializer,
AccountEntrySerializer, TransactionSerializer, Wallet, AccountEntry,
Transaction)
from pesa_exchange.wallet.models import (create_cr_entry,
create_dr_entry, post_transaction)
from pesa_exchange.currency.models import (get_user_currency_rate,
get_usd_amount)
class WalletViewSet(viewsets.ModelViewSet):
"""
A viewset for Wallet instances.
"""
permission_classes = (IsAuthenticated,)
serializer_class = WalletSerializer
queryset = Wallet.objects.all()
class AccountEntryViewSet(viewsets.ModelViewSet):
"""
A viewset for AccountEntry instances.
"""
permission_classes = (IsAuthenticated,)
serializer_class = AccountEntrySerializer
queryset = AccountEntry.objects.all()
class TransactionViewSet(viewsets.ModelViewSet):
"""
A viewset for Transaction instances.
"""
permission_classes = (IsAuthenticated,)
serializer_class = TransactionSerializer
queryset = Transaction.objects.all()
@api_view(['POST'])
def transact(request):
"""
function for all transactions a user can make on the system ie Deposit,
Withdrawal, Transfer.
"""
amount = request.data.get('amount', 0)
user = request.user
if request.method == 'POST' and user and amount > 0:
user_wallet = Wallet.objects.get(owner=user)
if not user_wallet.currency:
response_message = {
"Currency": "Set your default currency in your profile page before transacting"}
return Response(data=response_message)
if user_wallet.balance < amount:
response_message = {
"Balance":"Your Wallet Balance is lower than the Transaction amount of {}.".format(amount)}
return Response(data=response_message)
transaction_type = request.data.get('transaction_type', None)
rate = get_user_currency_rate(user_wallet.currency_default)
usd_amount = get_usd_amount(rate, amount)
if transaction_type == "deposit" or "withdrawal":
try:
make_deposit_or_withdrawal(
user_wallet, amount, usd_amount, transaction_type)
serializer = WalletSerializer(data=user_wallet)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
elif transaction_type == "transfer":
transfer_to = request.data.pop('transfer_to', None)
receiver_wallet = Wallet.objects.get(owner__email=transfer_to)
try:
make_transfer(user_wallet, receiver_wallet, amount, usd_amount)
serializer = WalletSerializer(data=user_wallet)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
response_message = {"WrongTransactionType":"Wrong Transactin Type."}
return Response(data=response_message)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
def make_deposit_or_withdrawal(user_wallet, amount, usd_amount, transaction_type):
"""Helper function for carrying out the actual deposit or withdrawal transactions."""
facilitating_wallet = Wallet.objects.get(owner__email="facilitatingUser@pesaexchange.com")
if transaction_type == "deposit":
dr_entry = create_dr_entry(user_wallet, usd_amount, 'D')
cr_entry = create_cr_entry(facilitating_wallet, usd_amount, 'W')
post_transaction(dr_entry, cr_entry)
user_wallet.balance += amount
user_wallet.save()
else:
dr_entry = create_dr_entry(facilitating_wallet, usd_amount, 'D')
cr_entry = create_cr_entry(user_wallet, usd_amount, 'W')
post_transaction(dr_entry, cr_entry)
user_wallet.balance -= amount
user_wallet.save()
def make_transfer(user_wallet, receiver_wallet, amount, usd_amount):
"""Helper function for carrying out the actual Transfer transaction."""
rate = 1
if receiver_wallet.currency_default:
rate = get_user_currency_rate(receiver_wallet.currency_default)
transfer_amount = round(rate * usd_amount, 4)
dr_entry = create_dr_entry(receiver_wallet, usd_amount, 'D')
cr_entry = create_cr_entry(user_wallet, usd_amount, 'T')
post_transaction(dr_entry, cr_entry)
user_wallet.balance -= amount
user_wallet.save()
receiver_wallet.balance -= transfer_amount
receiver_wallet.save()
| [
"jimmy.aduvagah@gmail.com"
] | jimmy.aduvagah@gmail.com |
f3a5e46acb64711021bf454c7e8f5af682764ebf | 48f10cc3520ba8cfa5f3478e4b021766e4d5f29b | /openpyexcel/drawing/tests/test_text.py | 82b747c27e487d5e6e9267a416b1ef8698b4401b | [
"MIT"
] | permissive | sciris/openpyexcel | bef5094d193e62806164c77777fe8c741511aaec | 1fde667a1adc2f4988279fd73a2ac2660706b5ce | refs/heads/master | 2022-01-20T14:04:52.196385 | 2019-02-06T22:48:38 | 2019-02-06T22:48:38 | 168,293,752 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,372 | py | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
import pytest
from openpyexcel.xml.functions import fromstring, tostring
from openpyexcel.tests.helper import compare_xml
@pytest.fixture
def Paragraph():
from ..text import Paragraph
return Paragraph
class TestParagraph:
def test_ctor(self, Paragraph):
text = Paragraph()
xml = tostring(text.to_tree())
expected = """
<p xmlns="http://schemas.openxmlformats.org/drawingml/2006/main">
<r>
<t/>
</r>
</p>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Paragraph):
src = """
<p />
"""
node = fromstring(src)
text = Paragraph.from_tree(node)
assert text == Paragraph()
def test_multiline(self, Paragraph):
src = """
<p>
<r>
<t>Adjusted Absorbance vs.</t>
</r>
<r>
<t> Concentration</t>
</r>
</p>
"""
node = fromstring(src)
para = Paragraph.from_tree(node)
assert len(para.text) == 2
@pytest.fixture
def ParagraphProperties():
from ..text import ParagraphProperties
return ParagraphProperties
class TestParagraphProperties:
def test_ctor(self, ParagraphProperties):
text = ParagraphProperties()
xml = tostring(text.to_tree())
expected = """
<pPr xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, ParagraphProperties):
src = """
<pPr />
"""
node = fromstring(src)
text = ParagraphProperties.from_tree(node)
assert text == ParagraphProperties()
from ..spreadsheet_drawing import SpreadsheetDrawing
class TestTextBox:
def test_from_xml(self, datadir):
datadir.chdir()
with open("text_box_drawing.xml") as src:
xml = src.read()
node = fromstring(xml)
drawing = SpreadsheetDrawing.from_tree(node)
anchor = drawing.twoCellAnchor[0]
box = anchor.sp
meta = box.nvSpPr
graphic = box.graphicalProperties
text = box.txBody
assert len(text.p) == 2
@pytest.fixture
def CharacterProperties():
from ..text import CharacterProperties
return CharacterProperties
class TestCharacterProperties:
def test_ctor(self, CharacterProperties):
from ..text import Font
normal_font = Font(typeface='Arial')
text = CharacterProperties(latin=normal_font, sz=900, b=False, solidFill='FFC000')
xml = tostring(text.to_tree())
expected = ("""
<a:defRPr xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"
b="0" sz="900">
<a:solidFill>
<a:srgbClr val="FFC000"/>
</a:solidFill>
<a:latin typeface="Arial"/>
</a:defRPr>
""")
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, CharacterProperties):
src = """
<defRPr sz="110"/>
"""
node = fromstring(src)
text = CharacterProperties.from_tree(node)
assert text == CharacterProperties(sz=110)
@pytest.fixture
def Font():
from ..text import Font
return Font
class TestFont:
def test_ctor(self, Font):
fut = Font("Arial")
xml = tostring(fut.to_tree())
expected = """
<latin typeface="Arial"
xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Font):
src = """
<latin typeface="Arial" pitchFamily="40"
xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
node = fromstring(src)
fut = Font.from_tree(node)
assert fut == Font(typeface="Arial", pitchFamily=40)
@pytest.fixture
def Hyperlink():
from ..text import Hyperlink
return Hyperlink
class TestHyperlink:
def test_ctor(self, Hyperlink):
link = Hyperlink()
xml = tostring(link.to_tree())
expected = """
<hlinkClick xmlns="http://schemas.openxmlformats.org/drawingml/2006/main"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Hyperlink):
src = """
<hlinkClick tooltip="Select/de-select all"/>
"""
node = fromstring(src)
link = Hyperlink.from_tree(node)
assert link == Hyperlink(tooltip="Select/de-select all")
@pytest.fixture
def LineBreak():
from ..text import LineBreak
return LineBreak
class TestLineBreak:
def test_ctor(self, LineBreak):
fut = LineBreak()
xml = tostring(fut.to_tree())
expected = """ <br xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" /> """
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, LineBreak):
src = """
<br />
"""
node = fromstring(src)
fut = LineBreak.from_tree(node)
assert fut == LineBreak()
| [
"questionably@gmail.com"
] | questionably@gmail.com |
2b4eef58060859c6c9fd550d8856fc0b735737c1 | b1f9c4393cea13eac9c6deba02e98ea30e637d51 | /Telegram-File-Observer.py | b7ee4b08788ccc3581b6c7bd4251969d1f024600 | [] | no_license | NilsDeckert/Telegram-File-Observer | b492a6dc8773f027043022967cad2ce6ed416e51 | 63c8977c0d2549b357aeb82be0c73e2726fb6b21 | refs/heads/master | 2023-05-26T10:52:16.834404 | 2021-06-02T06:15:44 | 2021-06-02T06:15:44 | 242,402,897 | 1 | 0 | null | 2023-05-22T21:38:55 | 2020-02-22T19:56:29 | Python | UTF-8 | Python | false | false | 13,399 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import telegram
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from functools import wraps
import logging
import filecmp
import threading
import calendar
import datetime
import time
import cmp
import printPdf
from config import mysql_pw
from config import telegram_pw
from config import settings
import mysql.connector
from mysql.connector import errorcode
#Telegram Setup
bot = telegram_pw.bot
updater = telegram_pw.updater
dispatcher = updater.dispatcher
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logging.info("Script started")
#DB setup
global mydb
global mycursor
mydb = mysql_pw.login
mycursor = mydb.cursor()
#File Observer Setup
file = settings.file
# checks if file was updated, if so sends it every user in the database who subscribed to the newsletter
def check():
n = 0
mydb = mysql_pw.login
mycursor = mydb.cursor()
if cmp.check_v():
v_prm = 'v/v.pdf'
v_prm_size = os.stat(v_prm).st_size // 1024 # file size in kb
logging.info('File was updated ({}kb)'.format(str(v_prm_size))) # prints file size
updates_time_date() # triggers function that creates database entry to log the update
logging.info('Converting pdf to text...')
printPdf.pdftotext()
logging.info('Finished!')
logging.info('Sending newsletter...')
try:
mycursor.execute("SELECT user_id, grade FROM users WHERE sub=1") # lists all users who subscribed to newsletter
records = mycursor.fetchall()
for row in records:
grade = str(row[1])
u_id = str(row[0])
if printPdf.grade_check(grade): # checks if users grade is in the pdfs content. empty string -> user receives every update
try:
send_v(u_id, 'v/v.pdf')
logging.info('Sent newsletter to {} ({})'.format(u_id, grade))
n += 1
except:
logging.error("Error in grade_check")
mycursor.close()
logging.info("Finished sending newsletter")
print("Sent newsletter to {} users".format(str(n)))
except:
logging.error("check() - Error sending newsletter")
ErrorNot("Sending newsletter")
else:
if settings.no_update_message:
logging.info("No updated file")
threading.Timer(settings.update_interval, check).start()
# Creates database entry to log every update of the file
def updates_time_date():
mydb = mysql_pw.login
mycursor = mydb.cursor()
current_date = datetime.datetime.now().strftime('%Y-%m-%d')
current_time = datetime.datetime.now().strftime('%H:%M:%S')
current_day = datetime.datetime.now().strftime('%A')
v_prm = 'v/v.pdf'
v_prm_size = os.stat(v_prm).st_size // 1024
try:
# creates database entry with the current date, time, day of the week and the file size of the updated file
sql = "INSERT INTO updates_time (date, time, day, file_size_kb) VALUES (%s, %s, %s, %s)"
val = (current_date, current_time, current_day, v_prm_size)
mycursor.execute(sql, val)
mydb.commit()
mycursor.close()
mydb = mysql_pw.login
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM updates_date WHERE date='{}'".format(current_date))
records = mycursor.fetchall()
# if this is the first update today, a new entry with today's date and day of the week is created
if records == []:
sql = "INSERT INTO updates_date (date, day, updates) VALUES (%s, %s, %s)"
val = (current_date, current_day, '1')
mycursor.execute(sql,val)
logging.info("New entry for {} in updates_date".format(current_date))
# if the file has been updated earlier this day, the count goes up by 1 for every additional update
else:
for row in records:
date_updates = row[2] + 1
mycursor.execute("UPDATE updates_date SET updates = {} WHERE date = '{}'".format(date_updates, current_date))
logging.info("Updated entry for {} in updates_date ({})".format(current_date, date_updates))
mydb.commit()
mycursor.close()
except:
logging.error('updates_time_date() - Error commiting to database')
ErrorNot("updates_time_date() - Error commiting to database")
# Handles the start of a new conversation
def start(update, context):
mydb = mysql_pw.login
mycursor = mydb.cursor()
# logs user info
# Note: user id is the only string that's never empty, users can choose to only set an username or their first name
u_id = str(update.message.from_user.id) # user id
u_username = str(update.message.from_user.username) # username
u_first_name = str(update.message.from_user.first_name) # first name
u_last_name = str(update.message.from_user.last_name) # last name
u_lang = str(update.message.from_user.language_code) # language code
logging.info("{}: /start".format(u_id))
try:
# checks if user has uses the bot before / if there's an entry for this user already
mycursor.execute("SELECT * FROM users WHERE user_id='{}'".format(u_id))
records = mycursor.fetchall()
# if there's no entry for this user in the database
# all user info is saved in it.
if records == []:
logging.info("New user")
print("\n" + "New user!")
print("---")
print("id: " + u_id)
print("username: " + u_username)
print("first name: " + u_first_name)
print("last name: " + u_last_name)
print("lang: " + u_lang)
print("---")
mycursor.close()
try:
mydb = mysql_pw.login
mycursor = mydb.cursor()
sql1 = "INSERT INTO users (user_id, username, first_name, last_name, lang, grade, sub) VALUES (%s, %s, %s, %s, %s, %s, 1)"
val1 = (u_id, u_username, u_first_name, u_last_name, u_lang, '')
mycursor.execute(sql1, val1)
mydb.commit()
mycursor.close()
except:
logging.error('Error commiting to database')
ErrorNot("Commiting new user info to database")
# Sends message to the bots admin containing the new user's information
if settings.new_user_notification:
bot.send_message(chat_id=settings.admin_id, text="New User:\n"
+ "id: {} \n".format(u_id)
+ "username: {} \n".format(u_username)
+ "first name: {} \n".format(u_first_name)
+ "last name: {} \n".format(u_last_name)
+ "lang: {}".format(u_lang))
else:
mycursor.close()
except:
logging.error('Error in start()')
ErrorNot("Error in start()")
context.bot.send_message(chat_id=update.effective_chat.id, text=settings.welcome_message,parse_mode=telegram.ParseMode.MARKDOWN)
# if the bot runs into problems, it sends a message to the bots admin
def ErrorNot(source):
bot.send_message(chat_id=settings.admin_id, text="Error: " + source)
logging.info("Sent error Notification")
# Lets users subscribe to the newsletter
def subscribe(update,context):
try:
mydb = mysql_pw.login
mycursor = mydb.cursor()
u_id = str(update.message.from_user.id)
#sql = "UPDATE users SET sub = 1 WHERE user_id = '{}'".format(u_id)
mycursor.execute("UPDATE users SET sub = 1 WHERE user_id = '{}'".format(u_id))
mydb.commit()
mycursor.close()
logging.info('{} subscribed to newsletter'.format(u_id))
context.bot.send_message(chat_id=update.effective_chat.id,
text=settings.subscribe_message,parse_mode=telegram.ParseMode.MARKDOWN)
except:
logging.error('Failed subscribing to newsletter')
# Lets users unsubscribe from newsletter
def unsubscribe(update,context):
try:
mydb = mysql_pw.login
mycursor = mydb.cursor()
u_id = str(update.message.from_user.id)
#sql = "UPDATE users SET sub = 1 WHERE user_id = '{}'".format(u_id)
mycursor.execute("UPDATE users SET sub = 0 WHERE user_id = '{}'".format(u_id))
mydb.commit()
mycursor.close()
logging.info('{} unsubscribed from newsletter'.format(u_id))
context.bot.send_message(chat_id=update.effective_chat.id,
text="settings.unsubscribe_message)
except:
logging.error('Failed unsubscribing from newsletter')
# let users set the string they want to be notified if its in the pdf
def grade(update,context):
u_id = str(update.message.from_user.id)
args = context.args
mydb = mysql_pw.login
mycursor = mydb.cursor()
if len(args) == 0:
mycursor.execute("SELECT * FROM users WHERE user_id='{}'".format(u_id))
records = mycursor.fetchall()
for row in records:
# if no string has been set yet
if row[6] == "":
context.bot.send_message(chat_id=update.effective_chat.id, text="settings.grade_message_new,parse_mode=telegram.ParseMode.MARKDOWN)
# sends users their current string
else:
context.bot.send_message(chat_id=update.effective_chat.id, text=settings.grade_message_current + row[6]
+ settings.grade_message_current_reset,parse_mode=telegram.ParseMode.MARKDOWN)
mycursor.close()
elif len(args) == 1:
# resets users string
if args[0] == "reset":
mycursor.execute("UPDATE users SET grade = '' WHERE user_id = '{}'".format(u_id))
mydb.commit()
logging.info('{}: Reset grade'.format(u_id))
context.bot.send_message(chat_id=update.effective_chat.id, text=settings.grade_message_reset,parse_mode=telegram.ParseMode.MARKDOWN)
# sets users string
else:
u_grade = args[0]
mycursor.execute("UPDATE users SET grade = '{}' WHERE user_id = '{}'".format(u_grade, u_id))
mydb.commit()
logging.info('{}: Set grade to {}'.format(u_id, u_grade))
context.bot.send_message(chat_id=update.effective_chat.id, text=settings.grade_message_new + u_grade
+ settings.grade_message_reset,parse_mode=telegram.ParseMode.MARKDOWN)
mycursor.close()
elif len(args) > 1:
context.bot.send_message(chat_id=update.effective_chat.id, text=settings.grade_message_too_many_arguments)
# sends the given file to the given userid
def send_v(u_id, v_file):
bot.sendChatAction(chat_id=u_id, action="upload_document")
bot.send_document(chat_id=u_id, document=open(v_file, 'rb'))
# Manually request file
def PDF(update, context):
u_id = str(update.message.from_user.id)
v_prm = 'v/v.pdf'
logging.info("{}: /PDF".format(u_id))
bot.send_document(chat_id=u_id, document=file)
# Sends the bot's link
def share(update, context):
u_id = str(update.message.from_user.id)
bot_username = str(bot.get_me().username)
logging.info("{}: /link".format(u_id))
share_link = "https://t.me/" + bot_username
context.bot.send_message(chat_id=update.effective_chat.id,text=share_link)
# Resends user's message back to them if it's no command
def echo(update, context):
u_id = str(update.message.from_user.id)
m_text = update.message.text
context.bot.send_message(chat_id=u_id, text=m_text)
logging.info("{}: '{}'".format(u_id, m_text))
# lists all commands
def help(update,context):
logging.info("{}: /help".format(update.message.from_user.id))
context.bot.send_message(chat_id=update.effective_chat.id,text=settings.help_message,parse_mode=telegram.ParseMode.MARKDOWN)
#handler
start_handler = CommandHandler('start', start)
PDF_handler = CommandHandler('PDF', PDF)
sub_handler = CommandHandler('subscribe', subscribe)
unsub_handler = CommandHandler('unsubscribe', unsubscribe)
echo_handler = MessageHandler(Filters.text, echo)
help_handler = CommandHandler('help', help)
grade_handler = CommandHandler('grade', grade)
share_handler = CommandHandler('link', share)
#dispatcher
dispatcher.add_handler(start_handler)
dispatcher.add_handler(PDF_handler)
dispatcher.add_handler(echo_handler)
dispatcher.add_handler(sub_handler)
dispatcher.add_handler(unsub_handler)
dispatcher.add_handler(help_handler)
dispatcher.add_handler(grade_handler)
dispatcher.add_handler(share_handler)
print("\n" + "--- Bot up and running ---")
bot_username = str(bot.get_me().username)
bot_first_name = str(bot.get_me().first_name)
bot_id = str(bot.get_me().id)
print("username: " + bot_username)
print("first_name: " + bot_first_name)
print("id: " + bot_id)
print("-----" + "\n")
check()
updater.start_polling()
| [
"nils_deckert@web.de"
] | nils_deckert@web.de |
f5721dc0ddc404eb5d47b884496febe60611c599 | 4c0385414e0ea56374ac1b723a6114a37d1b1726 | /Lucas Testing/base/ocr comp 2.py | cd39fe3ec4964af1e816d9c82b154a13be161667 | [] | no_license | MagicTravelAgent/Data-Science | ce7e1f4ec5a900b94d9aed90073214c75678c28e | 6de4101e31ec7b3e13e397489d4e2f7905280b52 | refs/heads/main | 2023-06-02T14:47:48.894875 | 2021-06-18T14:40:13 | 2021-06-18T14:40:13 | 339,427,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | py | try:
from PIL import Image
except ImportError:
import Image
import pytesseract as pt
import re
import io
import glob
import csv
import improve_spelling as imp
import os
cwd = os.getcwd() # Get the current working directory (cwd)
files = os.listdir(cwd) # Get all the files in that directory
print("Files in %r: %s" % (cwd, files))
#--------------------------------------------------------------------- simple OCR for scraped and manual download
text = pt.image_to_string(Image.open('scraped.jpeg'))
with open("simple_scraped.txt", "w", encoding="utf-8") as f:
f.write(text)
f.close()
text = pt.image_to_string(Image.open('manual.jpg'))
with open("simple_manual.txt", "w", encoding="utf-8") as f:
f.write(text)
f.close()
#--------------------------------------------------------------------- OCR for scraped and manual with dutch post from tesseract
text = pt.image_to_string(Image.open('scraped.jpeg'), lang='nld')
with open("dutch_post_scraped.txt", "w", encoding="utf-8") as f:
f.write(text)
f.close()
text = pt.image_to_string(Image.open('manual.jpg'), lang='nld')
with open("dutch_post_manual.txt", "w", encoding="utf-8") as f:
f.write(text)
f.close()
#--------------------------------------------------------------------- using the speller improver for simple scraped and manual
with open("simple_scraped.txt", "r", encoding="utf-8") as f:
text = f.read()
f.close()
i = imp.improve_spelling(text)
with open("simple_scraped_checked.txt", "w", encoding="utf-8") as f:
f.write(i)
f.close()
with open("simple_manual.txt", "r", encoding="utf-8") as f:
text = f.read()
f.close()
i = imp.improve_spelling(text)
with open("simple_manual_checked.txt", "w", encoding="utf-8") as f:
f.write(i)
f.close()
# --------------------------------------------------------------------- using the speller improver for simple and manual post dutch
with open("dutch_post_scraped.txt", "r", encoding="utf-8") as f:
text = f.read()
f.close()
i = imp.improve_spelling(text)
with open("dutch_post_scraped_checked.txt", "w", encoding="utf-8") as f:
f.write(i)
f.close()
with open("simple_manual.txt", "r", encoding="utf-8") as f:
text = f.read()
f.close()
i = imp.improve_spelling(text)
with open("dutch_post_manual_checked.txt", "w", encoding="utf-8") as f:
f.write(i)
f.close() | [
"lucas.puddifoot@student.ru.nl"
] | lucas.puddifoot@student.ru.nl |
41e9e39d9234f668e5bdebd3c69be5fac6a52ed8 | bc074a145c83c53c24288a62806e9806f4bf992f | /lib/bp_utils/filt.py | 8ef6443344a1f4016b9beb9ad690d9e0634a3618 | [] | no_license | Genomon-Project/GenomonBreakPoint | 4b9f44751894d67d8e19a0170f162ab15ce6b237 | 0eed3922c483edcc8a181af042fcce86ad9d9203 | refs/heads/master | 2021-06-09T06:36:31.676564 | 2016-11-20T13:26:36 | 2016-11-20T13:26:36 | 73,768,508 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,154 | py | #! /usr/bin/env python
import sys, gzip, math, numpy
import pysam
from scipy import stats
def filter_by_control(tumor_bp_file, output_file, matched_control_bp_file, merged_control_file,
min_support_num, min_median_mapq, min_max_clip_size, max_control_num_thres):
use_matched_control = True if matched_control_bp_file != "" else False
if use_matched_control: matched_control_db = pysam.TabixFile(matched_control_bp_file)
use_merged_control = True if merged_control_file != "" else False
if use_merged_control: merged_control_db = pysam.TabixFile(merged_control_file)
hout = open(output_file, 'w')
with gzip.open(tumor_bp_file, 'r') as hin:
for line in hin:
F = line.rstrip('\n').split('\t')
mapqs = [int(x) for x in F[5].split(';')]
clip_sizes = [int(x) for x in F[6].split(';')]
if len(mapqs) < min_support_num: continue
if numpy.median(mapqs) < min_median_mapq: continue
if max(clip_sizes) < min_max_clip_size: continue
# filtering using merged control file
merged_control_filt_flag = False
if use_merged_control:
tabixErrorFlag = 0
try:
records = merged_control_db.fetch(F[0], int(F[1]) - 1, int(F[1]) + 1)
except Exception as inst:
print >> sys.stderr, "%s: %s" % (type(inst), inst.args)
tabixErrorMsg = str(inst.args)
tabixErrorFlag = 1
if tabixErrorFlag == 0:
for record_line in records:
record = record_line.split('\t')
if record[0] == F[0] and record[1] == F[1] and record[2] == F[2] and record[3] == F[3]:
merged_control_filt_flag = True
if merged_control_filt_flag: continue
# get readnum from matched control file
if use_matched_control:
num_matched_control = 0
tabixErrorFlag = 0
try:
records = matched_control_db.fetch(F[0], int(F[1]) - 1, int(F[1]) + 1)
except Exception as inst:
print >> sys.stderr, "%s: %s" % (type(inst), inst.args)
tabixErrorMsg = str(inst.args)
tabixErrorFlag = 1
if tabixErrorFlag == 0:
for record_line in records:
record = record_line.split('\t')
if record[0] == F[0] and record[1] == F[1] and record[2] == F[2] and record[3] == F[3]:
num_matched_control = len(record[5].split(';'))
else:
num_matched_control = "---"
if use_matched_control and num_matched_control > max_control_num_thres: continue
print >> hout, '\t'.join(F[:4]) + '\t' + str(len(mapqs)) + '\t' + str(num_matched_control)
hout.close()
def filter_by_allele_freq(input_file, output_file, tumor_bam, matched_control_bam, tumor_AF_thres, control_AF_thres, max_fisher_pvalue):
hout = open(output_file, 'w')
print >> hout, '\t'.join(["Chr", "Pos", "Dir", "Junc_Seq",
"Num_Tumor_Total_Read", "Num_Tumor_Var_Read", "Num_Control_Total_Read", "Num_Control_Var_Read",
"Minus_Log_Fisher_P_value"])
with open(input_file, 'r') as hin:
for line in hin:
F = line.rstrip('\n').split('\t')
tumor_num = int(F[4])
control_num = int(F[5])
region = F[0] + ':' + F[1] + '-' + F[1]
depth_tumor_info = pysam.depth(tumor_bam, "-r", region)
depth_tumor = int(depth_tumor_info.rstrip('\n').split('\t')[2])
AF_tumor = float(tumor_num) / depth_tumor
if AF_tumor < tumor_AF_thres: continue
# print '\t'.join(F)
if matched_control_bam != "":
depth_control_info = pysam.depth(matched_control_bam, "-r", region)
depth_control = int(depth_control_info.rstrip('\n').split('\t')[2]) if depth_control_info != "" else 0
control_AF = float(control_num) / depth_control if depth_control > 0 else 1.0
else:
depth_control = "---"
control_AF = "---"
if control_AF != "---" and control_AF > control_AF_thres: continue
lpvalue = "---"
if control_AF != "":
oddsratio, pvalue = stats.fisher_exact([[depth_tumor - tumor_num, tumor_num], [depth_control - control_num, control_num]], 'less')
if pvalue < 1e-100: pvalue = 1e-100
lpvalue = (- math.log(pvalue, 10) if pvalue < 1 else 0)
lpvalue = round(lpvalue, 4)
if 10**(-lpvalue) > float(max_fisher_pvalue): continue
print >> hout, '\t'.join(F[:4]) + '\t' + str(depth_tumor) + '\t' + str(tumor_num) + '\t' + \
str(depth_control) + '\t' + str(control_num) + '\t' + str(lpvalue)
hout.close()
| [
"friend1ws@gmail.com"
] | friend1ws@gmail.com |
253c7a9a5bd941d34612a50c98e5e1909fb4e991 | 1524d7e021111089427a48c95f0b610ea06e7838 | /Leetcode/recursiveIntersection.py | 31434d46d9625d5ebcd8d6acde6d632dcd5c345a | [] | no_license | tailaiwang/Competitive-Programming | 06b9ec454e012a9f2827dd9bef0b46032b6aff85 | d42045c29c23784e421845533bc17762277e6b34 | refs/heads/master | 2023-09-03T17:08:05.316134 | 2023-08-29T14:41:23 | 2023-08-29T14:41:23 | 229,350,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #recursiveIntersection
#Recursively determines the size of the intersection of two lists
def search(L, item, count):
if item == L[count]:
return 1
elif count + 1 == len(L):
return 0
else:
return search(L, item, count + 1)
def helper(a, b, count, total):
if count == len(a) - 1:
return total
else:
return helper(a, b, count + 1, total + search(b, a[count + 1], 0))
def inter (a, b):
if (len(a) == 0 or len(b) == 0):
return 0
else:
return helper(a ,b , 0, search(b, a[0], 0))
# -- Test Cases -- #
a = [1,2,3,4,5]
b = [3,4,5,6,7]
print(inter(a,b))
| [
"Tailai@MacBook-Pro.lan"
] | Tailai@MacBook-Pro.lan |
503112cb84d3052674effd376ad5426f1f603e8f | ac980500929604a5224440018625c4c1603eb44d | /src/datasets/packaged_modules/json/json.py | c0a085b07317044d6c024a92cdfcfa85679f72c7 | [
"Apache-2.0"
] | permissive | stancld/datasets | 57128c2890b95e7dd9988b8bbca03efc1b6f5e8a | 3c492222557f76aad3ef3296daeb756cc8a61390 | refs/heads/master | 2023-06-18T08:50:34.538560 | 2021-07-16T12:46:06 | 2021-07-16T12:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,455 | py | # coding=utf-8
import json
from dataclasses import dataclass
from io import BytesIO
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
@dataclass
class JsonConfig(datasets.BuilderConfig):
"""BuilderConfig for JSON."""
features: Optional[datasets.Features] = None
field: Optional[str] = None
use_threads: bool = True
block_size: Optional[int] = None
newlines_in_values: Optional[bool] = None
@property
def pa_read_options(self):
return paj.ReadOptions(use_threads=self.use_threads, block_size=self.block_size)
@property
def pa_parse_options(self):
return paj.ParseOptions(newlines_in_values=self.newlines_in_values)
@property
def schema(self):
return pa.schema(self.features.type) if self.features is not None else None
class Json(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _generate_tables(self, files):
for i, file in enumerate(files):
if self.config.field is not None:
with open(file, encoding="utf-8") as f:
dataset = json.load(f)
# We keep only the field we are interested in
dataset = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(dataset, (list, tuple)):
pa_table = paj.read_json(
BytesIO("\n".join(json.dumps(row) for row in dataset).encode("utf-8")),
read_options=self.config.pa_read_options,
parse_options=self.config.pa_parse_options,
)
else:
pa_table = pa.Table.from_pydict(mapping=dataset)
else:
try:
with open(file, "rb") as f:
pa_table = paj.read_json(
f, read_options=self.config.pa_read_options, parse_options=self.config.pa_parse_options
)
except pa.ArrowInvalid:
with open(file, encoding="utf-8") as f:
dataset = json.load(f)
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. "
)
if self.config.features:
# Encode column if ClassLabel
for i, col in enumerate(self.config.features.keys()):
if isinstance(self.config.features[col], datasets.ClassLabel):
pa_table = pa_table.set_column(
i, self.config.schema.field(col), [self.config.features[col].str2int(pa_table[col])]
)
# Cast allows str <-> int/float, while parse_option explicit_schema does NOT
# Before casting, rearrange JSON field names to match passed features schema field names order
pa_table = pa.Table.from_arrays(
[pa_table[name] for name in self.config.features], schema=self.config.schema
)
yield i, pa_table
| [
"noreply@github.com"
] | stancld.noreply@github.com |
3e991d86f1a5f451311189788acb731c909742f4 | 97fe1eae2ef67dbb5ec298918b7b28598998217b | /code/environment.py | 0afefdb1236691463cbf4ce28a3e1bc608f02960 | [] | no_license | fiberleif/Tabular-Q-Learning | 01d06010f057106d7aa44ace7484c95325e7f53b | b09c075169778e3556952012f7ebd90d2fa3b139 | refs/heads/master | 2020-04-09T03:11:21.962988 | 2018-12-02T09:26:19 | 2018-12-02T09:26:19 | 159,972,829 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,003 | py | import argparse
# base class of environment
class Environment(object):
def __init__(self, env_path):
print("[Info]: Start to create maze env component from raw environment txt.")
self.env_path = env_path
self._get_raw_env()
self._distil_raw_env()
print("[Info]: Finish creating maze env component from raw environment txt.")
def _get_raw_env(self):
# read raw environment from txt file
# input: nothing
# return: raw_env from List class
fo = open(self.env_path, "r")
self.raw_env = [line.strip("\n") for line in fo.readlines()]
assert (len(self.raw_env) >= 0)
self.row_num = len(self.raw_env)
self.col_num = len(self.raw_env[0])
def _distil_raw_env(self):
# extract state space from raw environment. (remove obstacle locations)
state_space = []
start_state_space = []
goal_state_space = []
obstacle_space = []
for row_idx in range(self.row_num):
for col_idx in range(self.col_num):
if self.raw_env[row_idx][col_idx] != "*":
state_space.append([row_idx, col_idx])
if self.raw_env[row_idx][col_idx] == "G":
goal_state_space.append([row_idx, col_idx])
if self.raw_env[row_idx][col_idx] == "S":
start_state_space.append([row_idx, col_idx])
else:
obstacle_space.append([row_idx, col_idx])
self.state_space = state_space
self.start_state_space = start_state_space
self.goal_state_space = goal_state_space
self.obstacle_space = obstacle_space
self.action_space = [0, 1, 2, 3]
def get_transition(self, state, action):
# return next state when taking action over state
# check
if state not in self.state_space:
print("[Error]: Input state is illegal!")
raise ValueError
if action not in self.action_space:
print("[Error]: Input action is illegal!")
raise ValueError
# if state is from goal state space
if state in self.goal_state_space:
return state
# compute pre next_state
next_state = [None, None]
if action == 0:
next_state[0] = state[0]
next_state[1] = state[1] - 1
elif action == 1:
next_state[0] = state[0] - 1
next_state[1] = state[1]
elif action == 2:
next_state[0] = state[0]
next_state[1] = state[1] + 1
elif action == 3:
next_state[0] = state[0] + 1
next_state[1] = state[1]
# compute final next_state
if (next_state[0] not in range(self.row_num)) or (next_state[1] not in range(self.col_num)): # consider wall situation
next_state = state
if next_state in self.obstacle_space: # consider obstacle situation
next_state = state
return next_state
def get_reward(self, state, action):
# return reward when taking action over state
# check
if state not in self.state_space:
print("[Error]: Input state is illegal!")
raise ValueError
if action not in self.action_space:
print("[Error]: Input action is illegal!")
raise ValueError
# compute reward
if state in self.goal_state_space:
return 0
else:
return -1
# subclass of environment for Value Iteration
class VIEnvironment(Environment):
def __init__(self, env_path):
super(VIEnvironment, self).__init__(env_path)
def step(self, state, action):
return self.get_transition(state, action), self.get_reward(state, action)
# subclass of environment for Q-Learning
class QLEnvironment(Environment):
def __init__(self, env_path):
super(QLEnvironment, self).__init__(env_path)
self.current_state = self.start_state_space[0]
def step(self, action):
next_state = self.get_transition(self.current_state, action)
reward = self.get_reward(self.current_state, action)
self.current_state = next_state
is_terminal = 0
if next_state in self.goal_state_space:
is_terminal = 1
return next_state, reward, is_terminal
def reset(self):
self.current_state = self.start_state_space[0]
return self.current_state
def parse_arguments():
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--maze_input', type=str, default='../env/maze_0.txt',
help=' path to the environment input.txt described previously')
parser.add_argument('--output_file', type=str, default='./output_file.txt',
help='path to output the value function')
parser.add_argument('--action_seq_file', type=str, default='./action_seq_file.txt',
help='path to output the q_value function')
# parse arguments
args = parser.parse_args()
return args
if __name__ == '__main__':
# parse arguments
args = parse_arguments()
# create env
maze_env = QLEnvironment(args.maze_input)
# get actions sequence
action_sequence = []
file = open(args.action_seq_file, "r")
for line in file.readlines():
truncated_line = [ int(e) for e in line.strip("\n").split(" ")]
action_sequence += truncated_line
file.close()
# interact with environment and record simultaneously
file = open(args.output_file, "w")
for action in action_sequence:
next_state, reward, is_terminal = maze_env.step(action)
file.write("{0} {1} {2} {3}\n".format(next_state[0], next_state[1], reward, is_terminal))
file.close()
| [
"guoqing@6CR622896X.fareast.corp.microsoft.com"
] | guoqing@6CR622896X.fareast.corp.microsoft.com |
c5f90e6b7d34da90ccbe0f093d27dcc13b6555d3 | e6096162bcb6890258e2bd840ba5ec16481db47d | /project_database.py | 44b297e5361fc5af61a83bdf8ce3b55781fe5904 | [] | no_license | Crkonova76/data_presentation_project | 077323f1530975f7622916a366473c09810ad581 | 3feb7cc3ffe9b93cde546c1832bea5e9a67ac3fb | refs/heads/master | 2023-02-13T02:33:21.623802 | 2020-12-28T15:16:58 | 2020-12-28T15:16:58 | 325,030,004 | 0 | 0 | null | 2020-12-28T15:16:59 | 2020-12-28T14:12:25 | HTML | UTF-8 | Python | false | false | 4,023 | py | import mysql.connector
import dbConfig as cfg
class KidsDAO:
db = ""
def __init__(self):
self.db = mysql.connector.connect(
host=cfg.mysql['host'],
user=cfg.mysql['user'],
password=cfg.mysql['password'],
database=cfg.mysql['database']
)
def create(self, kid):
cursor = self.db.cursor()
sql = "insert into kids (name, surname,belt,status,phoneNumber) values (%s,%s,%s,%s,%s)"
values = [
kid["name"],
kid["surname"],
kid["belt"],
kid["status"],
kid["phoneNumber"]
]
cursor.execute(sql, values)
self.db.commit()
return cursor.lastrowid
def getAll(self):
cursor = self.db.cursor()
sql = "select * from kids"
cursor.execute(sql)
results = cursor.fetchall()
returnArray = []
for result in results:
resultAsDict= self.convertToDict(result)
returnArray.append(resultAsDict)
return returnArray
def findByID(self, registration):
cursor = self.db.cursor()
sql = "select * from kids where registration = %s"
values = [registration]
cursor.execute(sql, values)
result = cursor.fetchone()
return self.convertToDict(result)
def update(self, kid):
cursor = self.db.cursor()
sql = "update kids set name=%s,surname=%s,belt=%s,status=%s,phoneNumber=%s where registration = %s"
values = [
kid["name"],
kid["surname"],
kid["belt"],
kid["status"],
kid["phoneNumber"],
kid["registration"]
]
cursor.execute(sql,values)
self.db.commit()
return kid
def delete(self, registration):
cursor = self.db.cursor()
sql = "delete from kids where registration = %s"
values = [registration]
cursor.execute(sql, values)
return{}
def convertToDict(self,result):
colnames=['registration','name','surname','belt','status','phoneNumber']
kid = {}
if result:
for i,colName in enumerate(colnames):
value=result[i]
kid[colName]=value
return kid
kidsDAO = KidsDAO()
class AdminsDAO:
db = ""
def __init__(self):
self.db = mysql.connector.connect(
host=cfg.mysql['host'],
user=cfg.mysql['user'],
password=cfg.mysql['password'],
database=cfg.mysql['database']
)
def createAdmin(self, admin):
cursor = self.db.cursor()
sql = "insert into admins (UserName, Password) values (%s,%s)"
values = [
admin["UserName"],
admin["Password"]
]
cursor.execute(sql, values)
self.db.commit()
return cursor.lastrowid
def deleteAdmin(self, id):
cursor = self.db.cursor()
if id == False:
return {}
else:
sql = "delete from admins where id = %s"
values = [id]
cursor.execute(sql, values)
return{}
def deleteAll(self):
cursor = self.db.cursor()
sql = "delete from admins where id != 1"
cursor.execute(sql)
return{}
def getAllAdmins(self):
cursor = self.db.cursor()
sql = "select * from admins"
cursor.execute(sql)
results = cursor.fetchall()
returnArray = []
for result in results:
resultAsDict= self.convertToDict(result)
returnArray.append(resultAsDict)
return returnArray
def convertToDict(self,result):
colnames=['id','UserName','Password']
admin = {}
if result:
for i,colName in enumerate(colnames):
value=result[i]
admin[colName]=value
return admin
adminsDAO = AdminsDAO() | [
"47481671+Crkonova76@users.noreply.github.com"
] | 47481671+Crkonova76@users.noreply.github.com |
d987bc08b9f2ae79a58cbb0196f9030e865a8b6c | ebc44c117cb4251367d112605806733389b84f92 | /leetcode/top_k_frequent_mentioned_keywords.py | 03b57d81130ed3f7519ed33b3cff81638d8147ce | [] | no_license | darshann25/ProblemSolving | 9c5ac9fab11c59705d62b7ba2519e79da5348eb7 | a667ed1e12b5019cc7a2dfdbc2c7c31b984f3588 | refs/heads/master | 2022-09-14T14:58:44.932273 | 2022-09-11T20:12:52 | 2022-09-11T20:12:52 | 102,069,130 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | # Review the following link for the question prompt: https://leetcode.com/discuss/interview-question/542597/
# O(N + nlog(n)) time | O(n) space
class Solution:
def topKFrequentKeywords(self, k, keywords, reviews):
keywordMemory = {}
keywords = set(keywords)
for review in reviews:
words = set(review.lower().replace('[^a-zA-Z0-9]','').split())
for word in words:
if word in keywords:
if word in keywordMemory:
currFreq = keywordMemory[word]
currFreq[1] += 1
keywordMemory[word] = currFreq
else:
currFreq = [word, 1]
keywordMemory[word] = currFreq
wordFreq = keywordMemory.values()
wordFreq.sort(reverse = True, key = lambda x : x[1])
wordFreq = [x[0] for x in wordFreq]
return wordFreq[:k] if k <= len(wordFreq) else wordFreq
s = Solution()
k = 2
keywords = ["anacell", "cetracular", "betacellular"]
reviews = [
"Anacell provides the best services in the city",
"betacellular has awesome services",
"Best services provided by anacell, everyone should use anacell",
]
expected_output = ["anacell", "betacellular"]
print('\nExample 1 :\nOutput: {}\nExpected Output: {}'.format(s.topKFrequentKeywords(k, keywords, reviews), expected_output))
k = 2
keywords = ["anacell", "betacellular", "cetracular", "deltacellular", "eurocell"]
reviews = [
"I love anacell Best services; Best services provided by anacell",
"betacellular has great services",
"deltacellular provides much better services than betacellular",
"cetracular is worse than anacell",
"Betacellular is better than deltacellular.",
]
expected_output = ["betacellular", "anacell"]
print('\nExample 2 :\nOutput: {}\nExpected Output: {}'.format(s.topKFrequentKeywords(k, keywords, reviews), expected_output)) | [
"darshanpatel25894@gmail.com"
] | darshanpatel25894@gmail.com |
38e297f2ecdcdafc8a850489ec195d720ca6a99a | fff5eeff850258b5208f41d4f6c3027044f5374a | /blog/tests/test_urls.py | e384ffffc42e1bbf5c436fcd0981a200d3649038 | [] | no_license | khabdrick/django-pytest | 3f4300f875ed4c6ad9d4fa1bb3bf0902c3e420e7 | 5ce5f5cd1973885dfa2d476b1817d00644e9b10c | refs/heads/main | 2023-04-01T17:10:22.220605 | 2021-04-20T17:27:43 | 2021-04-20T17:27:43 | 345,196,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from django.urls import reverse, resolve
class TestUrls:
def test_post_content_url(self):
path = reverse("content", kwargs={'pk':1})
assert resolve(path).view_name == "content" | [
"muhamzyali@gmail.com"
] | muhamzyali@gmail.com |
b496628ec50dfc121bbe1d4915de7ca9d6b07bee | 73a80bcb68ff430b84a6f074ece384dc183126fc | /OasisWebDev/Oasis/apps.py | 2911b2c30aa5bccb25106a5ada6b10eb8a78e5c1 | [] | no_license | Abdullah-Aziz/WebDevTest | afd055f077d447c6611444af2afa545c239a6e35 | 02dd7161682316df696c40f08069b4920c90190f | refs/heads/main | 2023-04-02T22:17:42.141718 | 2021-04-03T10:44:24 | 2021-04-03T10:44:24 | 353,661,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class OasisConfig(AppConfig):
name = 'Oasis'
| [
"55276894+Abdullah-Aziz@users.noreply.github.com"
] | 55276894+Abdullah-Aziz@users.noreply.github.com |
f4287ce21f7e16595d0c2b853e86aafe8ee2c8dd | 4169448f2242fe81705b6ad0b054a6256f1d3ea3 | /source/gui/ui_dataview.py | 4ed514c7cd47e8cc54d47fea4c719dd8b6a19676 | [
"Apache-2.0",
"MIT"
] | permissive | qinyuLT/starquant | 2bd0bbaf22a1a219275b37ec811c53f4156b8432 | 94b10ad9b520f79c41dceb2f37771d49fb1caaf2 | refs/heads/master | 2020-06-23T09:29:45.870235 | 2019-07-23T10:34:02 | 2019-07-23T10:34:02 | 198,584,794 | 0 | 1 | Apache-2.0 | 2019-07-24T07:45:36 | 2019-07-24T07:45:35 | null | UTF-8 | Python | false | false | 19,393 | py | from .ui_basic import CandlestickItem, VolumeItem
from ..common.constant import Interval
from ..data import database_manager
from ..common.utility import extract_full_symbol
from ..common.datastruct import Event
from ..data.data_board import BarGenerator
import sys
import numpy as np
from PyQt5 import QtCore, QtWidgets, QtGui
from datetime import timedelta, datetime
import pyqtgraph as pg
sys.path.insert(0, "../..")
class MarketDataView(QtWidgets.QWidget):
tick_signal = QtCore.pyqtSignal(Event)
symbol_signal = QtCore.pyqtSignal(str)
def __init__(self, sym: str = ""):
""""""
super().__init__()
self.full_symbol = ""
self.init_ui()
self.register_event()
def init_ui(self):
self.datachart = QuotesChart(self.full_symbol)
self.orderbook = OrderBookWidget()
self.scroll = QtWidgets.QScrollArea()
self.scroll.setWidget(self.datachart)
self.scroll.setWidgetResizable(True)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.scroll)
hbox.addWidget(self.orderbook)
self.setLayout(hbox)
def register_event(self):
""""""
self.tick_signal.connect(self.orderbook.tick_signal.emit)
self.tick_signal.connect(self.datachart.on_tick)
self.symbol_signal.connect(self.orderbook.symbol_signal.emit)
self.orderbook.symbol_signal.connect(self.datachart.reset)
self.orderbook.day_signal.connect(self.datachart.reload)
# self.symbol_signal.connect(self.datachart.reset)
class DateAxis(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, dates: dict, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.dates = dates
def tickStrings(self, values, scale, spacing):
""""""
strings = []
for v in values:
dt = self.dates.get(v, "")
strings.append(str(dt))
return strings
class DateAxis2(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, datalist: list, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.data = datalist
def tickStrings(self, values, scale, spacing):
""""""
strings = []
# 60*(self.data[0].datetime.hour - 9) + self.data[0].datetime.minute
xstart = 0
for value in values:
v = value - xstart
if v > len(self.data) - 1 or v < 0:
return strings
dt = self.data[int(v)].datetime
strings.append(dt.strftime('%H:%M\n %b-%d '))
return strings
class PriceAxis(pg.AxisItem):
def __init__(self):
super().__init__(orientation='right')
self.style.update({'textFillLimits': [(0, 0.8)]})
def tickStrings(self, vals, scale, spacing):
digts = max(0, np.ceil(-np.log10(spacing * scale)))
return [
('{:<8,.%df}' % digts).format(v).replace(',', ' ') for v in vals
]
class VolumeAxis(pg.AxisItem):
def __init__(self):
super().__init__(orientation='right')
self.style.update({'textFillLimits': [(0, 0.8)]})
def tickStrings(self, vals, scale, spacing):
digts = max(0, np.ceil(-np.log10(spacing * scale)))
return [
('{:<8,.%df}' % digts).format(v).replace(',', ' ') for v in vals
]
class OpenInterestAxis(pg.AxisItem):
def __init__(self):
super().__init__(orientation='left')
self.style.update({'textFillLimits': [(0, 0.8)]})
def tickStrings(self, vals, scale, spacing):
digts = max(0, np.ceil(-np.log10(spacing * scale)))
return [
('{:<8,.%df}' % digts).format(v).replace(',', ' ') for v in vals
]
CHART_MARGINS = (0, 0, 20, 10)
class QuotesChart(QtGui.QWidget):
signal = QtCore.pyqtSignal(Event)
long_pen = pg.mkPen('#006000')
long_brush = pg.mkBrush('#00ff00')
short_pen = pg.mkPen('#600000')
short_brush = pg.mkBrush('#ff0000')
zoomIsDisabled = QtCore.pyqtSignal(bool)
def __init__(self, symbol: str = ""):
super().__init__()
self.full_symbol = symbol
self.data = []
self.bg = BarGenerator(self.on_bar)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.chart = None
self.charv = None
self.load_bar()
self.plot()
def reset(self, symbol: str):
# if not self.layout.isEmpty():
# self.layout.removeWidget(self.splitter)
# self.splitter.deleteLater()
# self.full_symbol = symbol
# self.bg = BarGenerator(self.on_bar)
# self.load_bar()
# self.plot()
self.full_symbol = symbol
self.bg = BarGenerator(self.on_bar)
self.load_bar()
self.klineitem.generatePicture()
self.volumeitem.generatePicture()
self.oicurve.setData([bar.open_interest for bar in self.data])
def plot(self):
self.xaxis = DateAxis2(self.data, orientation='bottom')
self.xaxis.setStyle(
tickTextOffset=7, textFillLimits=[(0, 0.80)], showValues=True
)
self.klineitem = CandlestickItem(self.data)
self.volumeitem = VolumeItem(self.data)
self.oicurve = pg.PlotCurveItem(
[bar.open_interest for bar in self.data], pen='w')
self.init_chart()
self.init_chart_item()
def load_bar(self, days: int = 1, interval: Interval = Interval.MINUTE):
symbol, exchange = extract_full_symbol(self.full_symbol)
end = datetime.now()
start = end - timedelta(days)
if start > end:
tmp = end
end = start
start = tmp
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
self.data.clear()
self.data.extend(bars)
def reload(self, count=1):
self.load_bar(days=count)
self.klineitem.generatePicture()
self.volumeitem.generatePicture()
self.oicurve.setData([bar.open_interest for bar in self.data])
def on_bar(self, bar):
self.data.append(bar)
self.klineitem.on_bar(bar)
self.volumeitem.on_bar(bar)
self.oicurve.setData([abar.open_interest for abar in self.data])
# self.xaxis.on_bar(bar)
# self.tmax += 1
# self.pmax = max(self.pmax,bar.high_price)
# self.pmin = min(self.pmin,bar.low_price)
# self.chart.setLimits(
# xMin=self.tmin,
# xMax=self.tmax,
# minXRange=60,
# yMin=self.pmin * 0.95,
# yMax=self.pmax * 1.05,
# )
def on_tick(self, tickevent):
tick = tickevent.data
if tick.full_symbol == self.full_symbol:
self.bg.update_tick(tick)
def init_chart_item(self):
self.chart.addItem(self.klineitem)
# barf = self.data[0]
# bare = self.data[-1]
# self.tmin = 60*(barf.datetime.hour - 9) + barf.datetime.minute - 20
# self.tmax = 60*(bare.datetime.hour - 9) + bare.datetime.minute + 20
# self.pmax = 0
# self.pmin = 999999
# for bar in self.data:
# self.pmax = max(self.pmax,bar.high_price)
# self.pmin = min(self.pmin,bar.low_price)
# self.chart.setLimits(
# xMin=self.tmin,
# xMax=self.tmax,
# minXRange=60,
# yMin=self.pmin * 0.95,
# yMax=self.pmax * 1.05,
# )
self.chartv.addItem(self.volumeitem)
# self.chart.setCursor(QtCore.Qt.BlankCursor)
# self.chart.sigXRangeChanged.connect(self._update_yrange_limits)
# def _update_yrange_limits(self):
# vr = self.chart.viewRect()
# lbar, rbar = max(0,int(vr.left())), min(len(self.data),int(vr.right()))
# bars = self.data[lbar:rbar]
# pmax = 0
# pmin = 999999
# pmean = 0
# for bar in bars:
# pmax = max(pmax,bar.high_price)
# pmin = min(pmin,bar.low_price)
# pmean += bar.close_price
# pmean = pmean/(len(bars))
# ylow = pmin * 0.95
# yhigh = pmax * 1.05
# print(pmax-pmean)
# self.chart.setLimits(yMin=ylow, yMax=yhigh, minYRange= 0.5*abs(pmax-pmean))
# self.chart.setYRange(ylow, yhigh)
def init_chart(self):
self.splitter = QtGui.QSplitter(QtCore.Qt.Vertical)
self.splitter.setHandleWidth(0)
self.layout.addWidget(self.splitter)
self.chart = pg.PlotWidget(
parent=self.splitter,
axisItems={'bottom': self.xaxis, 'right': PriceAxis()},
enableMenu=True,
)
self.chart.getPlotItem().setContentsMargins(0, 0, 20, 0)
self.chart.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)
self.chart.hideAxis('left')
self.chart.showAxis('right')
self.chart.showGrid(x=True, y=True)
self.chartv = pg.PlotWidget(
parent=self.splitter,
axisItems={'bottom': self.xaxis,
'right': VolumeAxis(), 'left': OpenInterestAxis()},
enableMenu=True,
)
self.chartv.getPlotItem().setContentsMargins(0, 0, 15, 15)
self.chartv.setFrameStyle(
QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)
# self.chartv.hideAxis('left')
self.chartv.showAxis('left')
self.chartv.showAxis('right')
self.chartv.setXLink(self.chart)
self.chartoi = pg.ViewBox()
p1 = self.chartv.getPlotItem()
p1.scene().addItem(self.chartoi)
p1.getAxis('left').linkToView(self.chartoi)
self.chartoi.setXLink(p1)
p1.vb.sigResized.connect(self.updateViews)
self.chartoi.addItem(self.oicurve)
def updateViews(self):
p1 = self.chartv.getPlotItem()
p2 = self.chartoi
p2.setGeometry(p1.vb.sceneBoundingRect())
p2.linkedViewChanged(p1.vb, p2.XAxis)
class OrderBookWidget(QtWidgets.QWidget):
tick_signal = QtCore.pyqtSignal(Event)
symbol_signal = QtCore.pyqtSignal(str)
day_signal = QtCore.pyqtSignal(int)
def __init__(self):
""""""
super().__init__()
self.full_symbol = ""
self.init_ui()
self.register_event()
self.clear_label_text()
def init_ui(self):
self.symbol_line = QtWidgets.QLineEdit()
# self.symbol_line.setReadOnly(True)
self.symbol_line.returnPressed.connect(self.process_symbol)
self.change_label = self.create_label(alignment=QtCore.Qt.AlignRight)
self.open_label = self.create_label(alignment=QtCore.Qt.AlignRight)
self.low_label = self.create_label(alignment=QtCore.Qt.AlignRight)
self.high_label = self.create_label(alignment=QtCore.Qt.AlignRight)
bid_color = "rgb(255,174,201)"
ask_color = "rgb(160,255,160)"
self.uplimit_label = self.create_label()
self.bp1_label = self.create_label(bid_color)
self.bp2_label = self.create_label(bid_color)
self.bp3_label = self.create_label(bid_color)
self.bp4_label = self.create_label(bid_color)
self.bp5_label = self.create_label(bid_color)
self.bv1_label = self.create_label(
bid_color, alignment=QtCore.Qt.AlignRight)
self.bv2_label = self.create_label(
bid_color, alignment=QtCore.Qt.AlignRight)
self.bv3_label = self.create_label(
bid_color, alignment=QtCore.Qt.AlignRight)
self.bv4_label = self.create_label(
bid_color, alignment=QtCore.Qt.AlignRight)
self.bv5_label = self.create_label(
bid_color, alignment=QtCore.Qt.AlignRight)
self.ap1_label = self.create_label(ask_color)
self.ap2_label = self.create_label(ask_color)
self.ap3_label = self.create_label(ask_color)
self.ap4_label = self.create_label(ask_color)
self.ap5_label = self.create_label(ask_color)
self.av1_label = self.create_label(
ask_color, alignment=QtCore.Qt.AlignRight)
self.av2_label = self.create_label(
ask_color, alignment=QtCore.Qt.AlignRight)
self.av3_label = self.create_label(
ask_color, alignment=QtCore.Qt.AlignRight)
self.av4_label = self.create_label(
ask_color, alignment=QtCore.Qt.AlignRight)
self.av5_label = self.create_label(
ask_color, alignment=QtCore.Qt.AlignRight)
self.lplimit_lable = self.create_label()
self.lp_label = self.create_label()
self.size_label = self.create_label(alignment=QtCore.Qt.AlignRight)
self.last_volume = 0
form2 = QtWidgets.QFormLayout()
historylabel = self.create_label(alignment=QtCore.Qt.AlignCenter)
historylabel.setText('History Data')
form2.addRow(historylabel)
self.histbar_day = QtWidgets.QLineEdit()
self.histbar_day.setValidator(QtGui.QIntValidator())
self.histbar_day.setText('1')
self.histbar_day.returnPressed.connect(self.process_days)
self.last_days = 1
form2.addRow('Days', self.histbar_day)
self.intervalcombo = QtWidgets.QComboBox()
self.intervalcombo.addItems(["1m", "1h"])
form2.addRow('Interval', self.intervalcombo)
self.indicators = QtWidgets.QComboBox()
self.indicators.addItems(['ma', 'sma'])
form2.addRow('Indicator', self.indicators)
form2.addItem(QtWidgets.QSpacerItem(
40, 40, QtWidgets.QSizePolicy.Expanding))
titlelabel = self.create_label(alignment=QtCore.Qt.AlignCenter)
titlelabel.setText('OrderBook')
# pricelable = self.create_label(alignment=QtCore.Qt.AlignLeft)
# pricelable.setText('Price')
# volumelable = self.create_label(alignment=QtCore.Qt.AlignRight)
# volumelable.setText('Volume')
# verticalSpacer = QtWidgets.QSpacerItem(10, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
form2.addRow(titlelabel)
form2.addRow(self.symbol_line)
form2.addRow(self.change_label, self.open_label)
form2.addRow(self.low_label, self.high_label)
form2.addRow(self.uplimit_label)
form2.addRow(self.ap5_label, self.av5_label)
form2.addRow(self.ap4_label, self.av4_label)
form2.addRow(self.ap3_label, self.av3_label)
form2.addRow(self.ap2_label, self.av2_label)
form2.addRow(self.ap1_label, self.av1_label)
form2.addRow(self.lp_label, self.size_label)
form2.addRow(self.bp1_label, self.bv1_label)
form2.addRow(self.bp2_label, self.bv2_label)
form2.addRow(self.bp3_label, self.bv3_label)
form2.addRow(self.bp4_label, self.bv4_label)
form2.addRow(self.bp5_label, self.bv5_label)
form2.addRow(self.lplimit_lable)
# Overall layout
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(form2)
self.setLayout(vbox)
self.setFixedWidth(160)
# self.setFixedSize(160,500)
def create_label(self, color: str = "", alignment: int = QtCore.Qt.AlignLeft):
"""
Create label with certain font color.
"""
label = QtWidgets.QLabel()
if color:
label.setStyleSheet(f"color:{color}")
label.setAlignment(alignment)
return label
def register_event(self):
""""""
self.tick_signal.connect(self.process_tick_event)
self.symbol_signal.connect(self.set_full_symbol)
def process_tick_event(self, tickevent: Event):
""""""
tick = tickevent.data
if not tick:
return
if (tick.full_symbol != self.full_symbol):
return
self.lp_label.setText(str(tick.last_price))
self.open_label.setText(str(tick.open_price))
self.low_label.setText(str(tick.low_price))
self.high_label.setText(str(tick.high_price))
self.size_label.setText(str(tick.volume - self.last_volume))
self.last_volume = tick.volume
self.bp1_label.setText(str(tick.bid_price_1))
self.bv1_label.setText(str(tick.bid_volume_1))
self.ap1_label.setText(str(tick.ask_price_1))
self.av1_label.setText(str(tick.ask_volume_1))
self.uplimit_label.setText(str(tick.limit_up))
self.lplimit_lable.setText(str(tick.limit_down))
if tick.pre_close != 0.0:
r = (tick.last_price / tick.pre_close - 1) * 100
self.change_label.setText(f"{r:.2f}%")
if tick.depth == 5:
self.bp2_label.setText(str(tick.bid_price_2))
self.bv2_label.setText(str(tick.bid_volume_2))
self.ap2_label.setText(str(tick.ask_price_2))
self.av2_label.setText(str(tick.ask_volume_2))
self.bp3_label.setText(str(tick.bid_price_3))
self.bv3_label.setText(str(tick.bid_volume_3))
self.ap3_label.setText(str(tick.ask_price_3))
self.av3_label.setText(str(tick.ask_volume_3))
self.bp4_label.setText(str(tick.bid_price_4))
self.bv4_label.setText(str(tick.bid_volume_4))
self.ap4_label.setText(str(tick.ask_price_4))
self.av4_label.setText(str(tick.ask_volume_4))
self.bp5_label.setText(str(tick.bid_price_5))
self.bv5_label.setText(str(tick.bid_volume_5))
self.ap5_label.setText(str(tick.ask_price_5))
self.av5_label.setText(str(tick.ask_volume_5))
def process_symbol(self):
sym = self.symbol_line.text()
if sym:
self.symbol_signal.emit(sym)
def process_days(self):
days = int(self.histbar_day.text())
if days != self.last_days:
self.last_days = days
self.day_signal.emit(days)
def set_full_symbol(self, symbol: str):
"""
Set the tick depth data to monitor by full_symbol.
"""
# Update name line widget and clear all labels
self.full_symbol = symbol
self.symbol_line.setText(symbol)
self.clear_label_text()
def clear_label_text(self):
"""
Clear text on all labels.
"""
self.lp_label.setText("Last")
self.change_label.setText("Change")
self.lplimit_lable.setText('LowerLimit')
self.uplimit_label.setText('UpperLimit')
self.open_label.setText('Open')
self.low_label.setText('Low')
self.high_label.setText('High')
self.size_label.setText('Volume')
self.bv1_label.setText("")
self.bv2_label.setText("")
self.bv3_label.setText("")
self.bv4_label.setText("")
self.bv5_label.setText("")
self.av1_label.setText("")
self.av2_label.setText("")
self.av3_label.setText("")
self.av4_label.setText("")
self.av5_label.setText("")
self.bp1_label.setText("")
self.bp2_label.setText("")
self.bp3_label.setText("")
self.bp4_label.setText("")
self.bp5_label.setText("")
self.ap1_label.setText("")
self.ap2_label.setText("")
self.ap3_label.setText("")
self.ap4_label.setText("")
self.ap5_label.setText("")
| [
"dr.wb@qq.com"
] | dr.wb@qq.com |
f8d59c6ca9fd35d1ae26c1df1229ccb6f3063274 | 5d93a66d6b3e5c41e59f6e6423c21d898eb0952b | /macro_expansion.py | 4f1f7ec9b2f16d688b11ccf6f1610d5b65244b04 | [] | no_license | samikprakash/Assembler | 5b9270a9fb1849941f478d5d9feac61098a743f7 | 25dc551d73ea20f57be19b9d1e796f33592a4bc4 | refs/heads/master | 2022-02-22T23:17:15.139614 | 2019-10-14T08:34:06 | 2019-10-14T08:34:06 | 213,049,148 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | file=open("sample_input.txt","r")
expanded_macros_file=open("expanded_macros.txt","w")
macro_table={}
macro_keyword="MACRO"
macro_end_keyword="ENDM"
macro_found=False
for i in file.readlines():
if i!='\n':
# print(i)
currline=i.rstrip('\n')
currline=currline.split(" ")
currline=list(filter(lambda a: a != '', currline))
print(currline)
if macro_found:
if macro_end_keyword not in currline:
new_macro_def.append(currline)
else:
macro_table[macro_name]=new_macro_def
macro_found=False
continue
elif macro_keyword in currline:
new_macro_def=[]
macro_found=True
macro_name=currline[0]
elif currline[0] in macro_table:
found_macro_name=currline[0]
for k in macro_table[found_macro_name]:
for j in k:
expanded_macros_file.write(j+" ")
expanded_macros_file.write("\n")
else:
for j in currline:
expanded_macros_file.write(j+" ")
expanded_macros_file.write("\n")
print(macro_table)
| [
"noreply@github.com"
] | samikprakash.noreply@github.com |
a2f8578a6f605d88953c0c76cf81bd20e1fb1639 | b4adc6cee4abab25137ef781ef3b4b4e9e402ca5 | /skoods/race.py | 1dfa877e04993f9c1a10d9ba33ea3b998631e30b | [
"MIT"
] | permissive | allan-cedric/deep_learning_skoods | f59dbebb7be72c8c2476685ba56a6b03b56c75a5 | aa146322d6aa093e434d7f7546280aec69c30120 | refs/heads/main | 2023-04-15T03:54:51.816696 | 2021-04-26T16:46:41 | 2021-04-26T16:46:41 | 329,897,871 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,371 | py | ##########################################
### Skoods.org -> Self-Racing Car Team ###
##########################################
import airsim, math, time
class Race:
def __init__(self, sample_time):
self.sample_time = sample_time
self.cars_race_states = []
self.laps_completed = []
self.lap_times = []
self.past_accum_time = []
self.client = airsim.CarClient()
self.client.confirmConnection()
# Set Mode and Simulation Abstraction
self.mode_input = input("Type **1** to drive the car by your own and RECORD data (will load the first car), "+ \
"**2** to QUALIFY (will load the first car), "+ \
"**3** to RACE (accepts multiple cars), "+ \
"**4** to DATASET RECORDING (will load the first car) ")
if self.mode_input == '1':
print("Race || MODE: WAYPONTS RECORDING")
elif self.mode_input == '2':
print("Race || MODE: QUALIFY")
elif self.mode_input == '3':
print("Race || MODE: RACE")
# New mode
elif self.mode_input == '4':
print("Race || MODE: DATASET RECORDING")
else:
print("Race || Warning - MODE: Not defined")
if self.mode_input in ['2','3','4']:
pause_simulation_input = input("Type **1** to PAUSE SIMULATION to process data during racing (better performance but takes longer) or "+ \
"**2** to run the racing in REAL-TIME (worse performance but faster): ")
if pause_simulation_input == '1': # Pause
print("Race || SIMULATION: Pause")
self.pause_simulation = True
self.accum_time = 0.0
self.client.simPause(True)
elif pause_simulation_input == '2': # Real-time
print("Race || SIMULATION: Real-Time")
self.pause_simulation = False
self.client.simPause(False)
else:
print("RACE || Warning - SIMULATION: Not defined")
def setInitialTime(self):
self.initial_time = time.time()
def setCars(self, cars):
self.cars = cars
for each_car in self.cars:
self.cars_race_states.append(-1)
self.laps_completed.append(0)
car_dict = {'car_name' : each_car.name}
self.lap_times.append(car_dict)
self.past_accum_time.append(0.0)
def setNumberOfLaps(self, number_of_laps):
self.number_of_laps = number_of_laps
def playSimulation(self):
if self.pause_simulation:
self.client.simPause(False)
time.sleep(self.sample_time)
self.client.simPause(True)
self.accum_time += self.sample_time
else:
self.accum_time = time.time() - self.initial_time
def updateRaceParameters(self):
keep_racing = True
distances_from_start_point = []
for i, each_car in enumerate(self.cars):
x_val = each_car.state.kinematics_estimated.position.x_val
y_val = each_car.state.kinematics_estimated.position.y_val
distances_from_start_point.append(math.sqrt(x_val**2 + y_val**2))
if distances_from_start_point[i] > 50.0 and self.cars_race_states[i] == -1:
self.cars_race_states[i] = 0
if distances_from_start_point[i] < 2.0 and self.cars_race_states[i] == 0:
self.cars_race_states[i] = -1
self.laps_completed[i] += 1
self.lap_times[i][str(self.laps_completed[i])] = self.accum_time - self.past_accum_time[i]
self.past_accum_time[i] = self.accum_time
print(self.lap_times[i])
if self.laps_completed[i] >= self.number_of_laps:
self.cars_race_states[i] = 1
if sum(self.cars_race_states) == len(self.cars_race_states):
keep_racing = False
self.endRace()
return keep_racing
def endRace(self):
print("RACE completed!")
for each_lap_time in self.lap_times:
print(each_lap_time)
print("Upload your code: www.skoods.org")
self.client.simPause(False)
| [
"allan.py3000@gmail.com"
] | allan.py3000@gmail.com |
a9413ae193a4ef5a0b48ae6ca9c3cd95dd316e36 | ed0ba52f8260f8648f643d2c426aa0157083521a | /Python/Problem Set Volumes (100...1999)/272 - TEX Quotes.py | cd19eeadbc3bdebf4967f84980061b81942bfae7 | [] | no_license | frendydw/uva-online-judge-solutions | eba0f865174ebe1a8ccdfcbb0bbd8e8c01074ceb | e927eaee178026a510d49c9e556ed50c525a6217 | refs/heads/master | 2023-04-06T17:58:04.910703 | 2021-04-16T05:30:56 | 2021-04-16T05:30:56 | 357,031,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | flag = 1
while True:
tempSentence = []
try:
sentence = input()
except EOFError:
break
for char in sentence:
if char == "\"":
if flag == 1:
tempSentence.append("``")
flag = 2
else:
tempSentence.append("''")
flag = 1
else:
tempSentence.append(char)
print(''.join(tempSentence)) | [
"frendydw@gmail.com"
] | frendydw@gmail.com |
f81db419c1acc5dc9afc53a092cf3fe95710a2b4 | 67868ee44c0c7da114185eb1f23834e7d4378058 | /turtle car racing/player.py | e04685e37b781877baf6433fea88574b6ffed3b9 | [] | no_license | Nicolae77/Turtle-Car-Racing | 7344b9f77bdb785308eb7649230036b40b4bb305 | 1926e7cbfab31c332f6215d3235bd0e5269620ef | refs/heads/main | 2023-05-04T17:51:23.226973 | 2021-05-25T13:08:39 | 2021-05-25T13:08:39 | 370,696,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | from turtle import Turtle
STARTING_POSITION = (0, -280)
MOVE_DISTANCE = 10
FINISH_LINE_Y = 280
class Player(Turtle):
def __init__(self):
super().__init__()
self.shape("turtle")
self.penup()
self.goto(STARTING_POSITION)
self.setheading(90)
def go_up(self):
self.forward(MOVE_DISTANCE)
def go_to_start(self):
self.goto(STARTING_POSITION)
def is_at_finish(self):
if self.ycor() > FINISH_LINE_Y:
return True
else:
return False
| [
"noreply@github.com"
] | Nicolae77.noreply@github.com |
d5407f18cedfdc2dbbf47f984945f26300494bc7 | 6c9da59813d2677d4d9cdede90bb2446f542f66f | /faceRecognition.py | ee78aa93cecfebcc3bed370119aa8e43134c7565 | [] | no_license | NascimentoG/vision | d2801080baf2650ee4d9c55181b6a888358dec37 | 3ad8c19e9438097ee9f33204e205d9c96464f3a3 | refs/heads/master | 2020-04-03T18:09:47.470861 | 2018-11-04T13:02:44 | 2018-11-04T13:02:44 | 155,473,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | import cv2
import sys
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
video_capture = cv2.VideoCapture(0)
#second_camera = cv2.VideoCapture(1)
while True:
# Capture frame-by-frame
retval, frame = video_capture.read()
#ret, frame2 = second_camera.read()
# Convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# Detect features specified in Haar Cascade
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(35, 35)
)
#faces = faceCascade.detectMultiScale(
# gray2,
# scaleFactor=1.1,
# minNeighbors=5,
# minSize=(35, 35)
#)
# Draw a rectangle around recognized faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255,255, 255), 2)
#cv2.rectangle(frame2, (x, y), (x+w, y+h), (0,0, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
#cv2.imshow('Video note', frame2)
# Exit the camera view
if cv2.waitKey(1) & 0xFF == ord('q'):
sys.exit()
| [
"gus_nascimento98@hotmail.com"
] | gus_nascimento98@hotmail.com |
ec2f321127e4a1f870d4e4c9b178002ea220402a | d74ccf6290b7acb0011fd9b9132cd8beac0bd9d3 | /back/movies/migrations/0003_movie_like_users.py | 13f3f2abdab2d74e4da72d3a07d59fe254a85fc1 | [] | no_license | gaberani/final_netflix | a0687c9cec9157712c9fe2a8627d3624e5fe00b6 | 637016fd6a0c589f1ff96ed5e9225deffc8f18cb | refs/heads/master | 2022-11-09T10:42:22.460795 | 2020-06-21T00:30:21 | 2020-06-21T00:30:21 | 272,981,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # Generated by Django 2.1.15 on 2020-06-15 11:38
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movies', '0002_comment'),
]
operations = [
migrations.AddField(
model_name='movie',
name='like_users',
field=models.ManyToManyField(related_name='like_movies', to=settings.AUTH_USER_MODEL),
),
]
| [
"khs0783@naver.com"
] | khs0783@naver.com |
517d75eb080fc570f9f2944db0205779a06920c9 | 6ac0bba8c1851e71529269c0d9d89a7c8fa507f2 | /Medium/18.py | 5808c521f0c1350c3c957493c5fcc72c735dcfcf | [] | no_license | Hellofafar/Leetcode | e81dc85689cd6f9e6e9756beba070cb11e7b192e | 7a459e9742958e63be8886874904e5ab2489411a | refs/heads/master | 2021-05-16T07:07:19.823953 | 2020-02-17T03:00:09 | 2020-02-17T03:00:09 | 103,690,780 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | # ------------------------------
# 18. 4Sum
#
# Description:
# Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
#
# Note: The solution set must not contain duplicate quadruplets.
#
# Note: The solution set must not contain duplicate triplets.
# For example, given array S = [-1, 0, 1, 2, -1, -4],
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
#
# Version: 1.0
# 10/17/17 by Jianfa
# ------------------------------
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
res = []
nums.sort()
for i in range(len(nums) - 3):
if i > 0 and nums[i] == nums[i-1]:
continue # Think over here! At first I wrote i += 1, it's wrong.
possible_rest_three = self.threeSum(nums[i+1:], target - nums[i])
if possible_rest_three:
for three_set in possible_rest_three:
three_set.insert(0, nums[i])
res.append(three_set)
return res
def threeSum(self, nums, target):
res = []
nums.sort()
for i in range(0, len(nums) - 2):
if i > 0 and nums[i] == nums[i-1]:
continue
l, r = i+1, len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s > target:
r -= 1
elif s < target:
l += 1
else:
res.append([nums[i], nums[l], nums[r]])
while l < r and nums[l] == nums[l+1]:
l += 1
while l < r and nums[r] == nums[r-1]:
r -= 1
l += 1
r -= 1
return res
# Used for test
if __name__ == "__main__":
test = Solution()
nums = [1,0,-1,0,-2,2]
target = 0
print(test.fourSum(nums, target))
# Summary
# Leverage the idea of 3Sum. Check integer one by one and check 3Sum for the rest. | [
"buptljf@gmail.com"
] | buptljf@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.