blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5ea4ea841bdb356f1aa96a4a07df4f58831d038 | aea8205449dd0a5273f60b73c8bd4a04a6743f3b | /dyno_hsr_localization/nodes/fake_odom_broadcaster | 5746c24efb51d11dbd97ff3db9e487e26543818b | [] | no_license | samiamlabs/dyno_hsr | ad4fdb0487c006504f8c04c4f1699963563e3a4f | e34f685ff08f44364ba97b2c538e049c59241e8d | refs/heads/master | 2021-04-03T10:02:16.646441 | 2018-07-06T15:32:00 | 2018-07-06T15:32:00 | 124,736,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | #!/usr/bin/env python
import roslib
import rospy
import tf
from nav_msgs.msg import Odometry
def odometry_cb(msg):
br = tf.TransformBroadcaster()
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
z = msg.pose.pose.position.z
position = (x, y, z)
x = msg.pose.pose.orientation.x
y = msg.pose.pose.orientation.y
z = msg.pose.pose.orientation.z
w = msg.pose.pose.orientation.w
orientation = (x, y, z, w)
br.sendTransform(position,
orientation,
rospy.Time.now(),
msg.child_frame_id,
"odom")
if __name__ == '__main__':
rospy.init_node('fake_odom_broadcaster')
rospy.Subscriber('/base_pose_ground_truth', Odometry, odometry_cb)
rospy.spin()
| [
"samuellindgren91@gmail.com"
] | samuellindgren91@gmail.com | |
78e3471fd130c7d3f28c1f828db7914a791bae23 | 5568f3c6a6d24fd1489b41215f53d4522b150ca1 | /Day1 - Numbers, Arithmetic, and Printing to the Console.py | 8c5064e32a0f9727ed5f94ab72f4e42158155c77 | [] | no_license | LukeSkyRed/30-Days-of-Python | d4091e1996d32a3c7a4e91187b599ee66c413ef1 | d4dd3120a175c46e86d6a6228f610e49d368b92d | refs/heads/main | 2023-05-12T02:25:55.911765 | 2021-05-26T13:14:17 | 2021-05-26T13:14:17 | 367,687,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | # Day 1: Numbers, Arithmetic, and Printing to the Console
#calculate the numbers of days,weeks, months in 27 years
years = 27
days = 365
weeks = 52
month = 12
days_per_years = years * days
weeks_per_years = years * weeks
months_per_years = years * month
print("Il numero di giorni in " + str(years) + " anni è " +
str(days_per_years))
print("Il numero di settimane in " + str(years) + " anni è " +
str(weeks_per_years))
print("Il numero di mesi in " + str(years) + " anni è " +
str(months_per_years))
#calculate the area of a circle with radius 5
radius = 5
area = 3.14 * pow(radius, 2) | [
"noreply@github.com"
] | LukeSkyRed.noreply@github.com |
4ae82fbb54695b12dbf2f6d5842e6919c8a8330b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/1111.py | a46d75a4bf3135788b1b71ced5eaa6713ed67828 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # Program to solve C. Recycled Numbers
def is_recycled_pair(a, b, call):
astr = str(a)
bstr = str(b)
if len(astr) != len(bstr) or len(astr) == 1:
return False
for i in range(1, len(astr)):
if astr == (bstr[len(astr) - i:] + bstr[:len(astr) - i]):
return True
if call == 1:
return is_recycled_pair(b, a, 2)
else:
return False
filename = "in.txt"
infile = open(filename, 'r')
outfile = open("output.txt", 'w')
first_line = True
case = 0
for line in infile:
if first_line:
first_line = False
continue
case += 1
start = int(line.split(" ")[0])
end = int(line.split(" ")[1])
if end <= start:
outfile.write("Case #" + str(case) + ": 0" + "\n")
continue
pair_count = 0
for n1 in range(start, end):
for n2 in range(n1 + 1, end + 1):
if is_recycled_pair(n1, n2, 1):
pair_count += 1
outfile.write("Case #" + str(case) + ": " + str(pair_count) + "\n")
infile.close()
outfile.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
48988a0333816b67fbbeeb5cb654133e4a63e35a | 85bc90e520580d2e4c186f910fc3e0396ee934f7 | /hockeystreams_api/base.py | c6211b81a45b23af288db0f8f83746fbaa1217d8 | [] | no_license | ulchm/picks_server | c6abb32220ee18bb1d72398680139b354a092042 | 1f87296538d69b51f2bef75f009e35363d04a5b8 | refs/heads/master | 2021-01-10T02:02:12.993034 | 2016-01-03T14:31:47 | 2016-01-03T14:31:47 | 48,917,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | from django.conf import settings
import urllib
import urllib2
import json
class HockeyStreamsAPIBase:
token = settings.HOCKEYSTREAMS_GAMES_TOKEN
key = None
base_url = None
def __init__(self):
self.token = settings.HOCKEYSTREAMS_GAMES_TOKEN
self.base_url = settings.HOCKEYSTREAMS_API_URL
self.key = None
self.url = ""
def login(self):
data = {
'username': settings.HOCKEYSTREAMS_USERNAME,
'password': settings.HOCKEYSTREAMS_PASSWORD,
'key': settings.HOCKEYSTREAMS_GAMES_TOKEN
}
data = urllib.urlencode(data)
self.url = "login"
success, response = self.get_response(data=data, login_required=False)
if not success:
print "There was an error. %s" % response
else:
self.key = response['token']
def get_response(self, data=None, login_required=True):
"""
Gets the response using self.url and returns parsed json if applicable
:param data: Post parameters, must be urlencoded
:param login_required: If login is required to hockey streams to execute the command
:return: A tuple True/False, Response True/False based on if the command was successful.
"""
if not self.token and login_required:
self.login()
try:
if data:
rows = json.loads(urllib2.urlopen(self.base_url + self.url, data).read())
else:
rows = json.loads(urllib2.urlopen(self.base_url + self.url).read())
return True, rows
except urllib2.HTTPError, error:
if error.code == 406:
return False, "ERROR: Invalid method, (Usually GET instead of POST)"
elif error.code == 400:
return False, "ERROR: %s." % (error.read())
elif error.code == 204:
return False, "ERROR: Empty response detected."
else:
return False, "ERROR: Unkown code: %s" % error.code
except:
return False, "Unkown Error" #Happens if the response is blank for some reason.
| [
"mike@norcode.com"
] | mike@norcode.com |
8e5e8ca0317d169947d49cf752033de72b169638 | 0f4823e4e8dcedf64b0061c9f02d2bf4b410c0e0 | /autotest/t000_testscripts.py | 4394523e616b8f2de32d6d4ce65a26d645f47bda | [
"BSD-3-Clause"
] | permissive | MJKnowling/flopy | cfa4383c8c834fbc57341511621d3f2401726224 | f480ff304e5728ccaa5e663d3fa77ec025cb0ba8 | refs/heads/master | 2021-09-20T23:57:13.032896 | 2017-12-01T18:57:09 | 2017-12-01T18:57:09 | 113,387,250 | 0 | 0 | null | 2017-12-07T01:33:03 | 2017-12-07T01:33:02 | null | UTF-8 | Python | false | false | 1,809 | py | # Remove the temp directory and then create a fresh one
from __future__ import print_function
import os
import sys
import shutil
exclude = ['flopy_swi2_ex2.py', 'flopy_swi2_ex5.py']
for arg in sys.argv:
if arg.lower() == '--all':
exclude = []
sdir = os.path.join('..', 'examples', 'scripts')
# make working directories
testdir = os.path.join('.', 'temp', 'scripts')
if os.path.isdir(testdir):
shutil.rmtree(testdir)
os.mkdir(testdir)
# add testdir to python path
sys.path.append(testdir)
def copy_scripts():
files = [f for f in os.listdir(sdir) if f.endswith('.py')]
# exclude unwanted files
for e in exclude:
if e in files:
files.remove(e)
# copy files
for fn in files:
pth = os.path.join(sdir, fn)
opth = os.path.join(testdir, fn)
# copy script
print('copying {} from {} to {}'.format(fn, sdir, testdir))
shutil.copyfile(pth, opth)
return files
def import_from(mod, name):
mod = __import__(mod)
main = getattr(mod, name)
return main
def run_scripts(fn):
# import run function from scripts
s = os.path.splitext(fn)[0]
run = import_from(s, 'run')
# change to working directory
opth = os.getcwd()
print('changing to working directory "{}"'.format(testdir))
os.chdir(testdir)
# run the script
ival = run()
# change back to starting directory
print('changing back to starting directory "{}"'.format(opth))
os.chdir(opth)
# make sure script ran successfully
assert ival == 0, 'could not run {}'.format(fn)
def test_notebooks():
files = copy_scripts()
for fn in files:
yield run_scripts, fn
if __name__ == '__main__':
files = copy_scripts()
print(files)
for fn in files:
run_scripts(fn)
| [
"jdhughes@usgs.gov"
] | jdhughes@usgs.gov |
cf038c18ced34852618731774202a5ba9c0a476a | 1c76e59804d5686f4f6407d572b638f5669ad9ab | /leetcode/simplify_path/simplify_path.py | 88cdfabea8ce5de19f9544a0f1a4aca21bec45e5 | [] | no_license | Turall/leetcode | 28b9ccd24b32309ad77c0ad5e8a72f1b11a3bafd | c510cc2cc494070542b35d744eafb381fe7d0994 | refs/heads/master | 2020-05-21T06:19:28.899393 | 2019-05-15T06:46:28 | 2019-05-15T06:46:28 | 185,941,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | # import re
# def simplify_path(path):
# if "/./" == path or path == "/../" or path == "/":
# return "/"
# if not bool(re.search('[a-zA-Z]', path)):
# return "/"
# path = re.sub("//", "/", path)
# path = re.sub("(\.\./)", "", path)
# path = re.sub("(/\./)$", "", path)
# path = re.sub("(/\./)$|(/\../)$", "/", path)
# if path == "/":
# return path
# if path[-1] == "/" and path[-2] != ".":
# path = re.sub("/$", "", path)
# path = re.sub("/[a-z]/\.{1,2}/[a-z]?/[\.\./]?[a-z]", "/c", path)
# path = re.sub("/[a-z]/[a-z]/[a-z]", "/c", path)
# return path
def simplify_path(path):
path = path.split("/")
temp = ['']
for i in path:
if i:
if i not in ('.', '..'):
if len(temp) == 0:
temp.append('')
temp.append(i)
elif i == '..' and len(temp) > 0:
temp.pop()
if len(temp) < 2:
return "/"
else:
return "/".join(temp)
# "/../"
# /a/../../b/../c//.//
print(simplify_path("/a/../../b/../c//.//"))
| [
"tural_m@hotmail.com"
] | tural_m@hotmail.com |
1ab7c67dc5ce9a3e74f0e26fd5376c73aebbf86a | 87ed7b916af2ccb9cc4d40745cfe62272547818a | /img2emb.py | eae9ef82c006489242ce27d3177e18e4bea12cae | [] | no_license | lonngxiang/multimodal-text-image-recall | 14121e89efa4bd3800a39ee96d2f4c126cbfbb5f | 11c7cd5c689c3e191dd6a406ca3147253cdc3d41 | refs/heads/main | 2023-04-17T06:38:30.120095 | 2021-04-22T02:18:16 | 2021-04-22T02:18:16 | 360,359,771 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | """
缩略图向量化
"""
import torch
import clip
from PIL import Image
from tqdm import tqdm
import numpy as np
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# 加载媒资内容
kkk_all = np.load(r"D:****容1.npy")
aids = list(set([i[0] for i in kkk_all.tolist()]))
# 加载模型
device = "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
# 计算缩略图向量
aidss=[]
imgs=[]
for i in tqdm(aids):
try:
aidss.append(i)
image1 = preprocess(Image.open(r"D:****图\{}.jpg".format(i))).unsqueeze(0)
with torch.no_grad():
image_features = model.encode_image(image1)
imgs.append(image_features)
print(type(image_features))
except Exception as e:
print(e)
print("####")
aidss.pop()
pass
# 保存
np.save(r"D:*****dss.npy", aidss)
np.save(r"D:\****s_embs.npy", imgs)
| [
"noreply@github.com"
] | lonngxiang.noreply@github.com |
9978938d6c89dfc4cbef5d0b474f6ea5d568ee40 | 04fb46ffbf635ca5090b860e39098c366a3a84e4 | /fpga/mqnic/fb2CG/fpga_100g/tb/fpga_core/test_fpga_core.py | f115c4c4cb3c16eeba80e3ad6828388750820cf1 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | yangcc2019/corundum | 46b7c7126973976617e065373bb7666df71cdc3c | 7c8abe261b2ec3e653da7bc881f769668a231bde | refs/heads/master | 2023-02-24T11:17:47.471315 | 2021-02-02T05:55:07 | 2021-02-02T05:55:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,033 | py | """
Copyright 2020, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
"""
import logging
import os
import sys
import scapy.utils
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
from cocotbext.axi import AxiStreamSource, AxiStreamSink
try:
import mqnic
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
import mqnic
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.rc.max_payload_size = 0x1 # 256 bytes
self.rc.max_read_request_size = 0x2 # 512 bytes
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=16,
user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk_250mhz,
user_reset=dut.rst_250mhz,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_entity=dut,
rq_name="m_axis_rq",
pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_entity=dut,
rc_name="s_axis_rc",
# Completer reQuest Interface
cq_entity=dut,
cq_name="s_axis_cq",
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_entity=dut,
cc_name="m_axis_cc",
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
# cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
# cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.driver = mqnic.Driver(self.rc)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True)
# Ethernet
cocotb.fork(Clock(dut.qsfp_0_rx_clk, 3.102, units="ns").start())
self.qsfp_0_source = AxiStreamSource(dut, "qsfp_0_rx_axis", dut.qsfp_0_rx_clk, dut.qsfp_0_rx_rst)
cocotb.fork(Clock(dut.qsfp_0_tx_clk, 3.102, units="ns").start())
self.qsfp_0_sink = AxiStreamSink(dut, "qsfp_0_tx_axis", dut.qsfp_0_tx_clk, dut.qsfp_0_tx_rst)
cocotb.fork(Clock(dut.qsfp_1_rx_clk, 3.102, units="ns").start())
self.qsfp_1_source = AxiStreamSource(dut, "qsfp_1_rx_axis", dut.qsfp_1_rx_clk, dut.qsfp_1_rx_rst)
cocotb.fork(Clock(dut.qsfp_1_tx_clk, 3.102, units="ns").start())
self.qsfp_1_sink = AxiStreamSink(dut, "qsfp_1_tx_axis", dut.qsfp_1_tx_clk, dut.qsfp_1_tx_rst)
dut.qsfp_0_i2c_scl_i.setimmediatevalue(1)
dut.qsfp_0_i2c_sda_i.setimmediatevalue(1)
dut.qsfp_0_intr_n.setimmediatevalue(1)
dut.qsfp_0_mod_prsnt_n.setimmediatevalue(0)
dut.qsfp_1_i2c_scl_i.setimmediatevalue(1)
dut.qsfp_1_i2c_sda_i.setimmediatevalue(1)
dut.qsfp_1_intr_n.setimmediatevalue(1)
dut.qsfp_1_mod_prsnt_n.setimmediatevalue(0)
dut.qspi_dq_i.setimmediatevalue(0)
dut.pps_in.setimmediatevalue(0)
self.loopback_enable = False
cocotb.fork(self._run_loopback())
async def init(self):
self.dut.qsfp_0_rx_rst.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp_0_rx_rst.setimmediatevalue(1)
self.dut.qsfp_0_tx_rst.setimmediatevalue(1)
self.dut.qsfp_1_rx_rst.setimmediatevalue(1)
self.dut.qsfp_1_tx_rst.setimmediatevalue(1)
await FallingEdge(self.dut.rst_250mhz)
await Timer(100, 'ns')
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp_0_rx_rst.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst.setimmediatevalue(0)
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
async def _run_loopback(self):
while True:
await RisingEdge(self.dut.clk_250mhz)
if self.loopback_enable:
if not self.qsfp_0_sink.empty():
await self.qsfp_0_source.send(await self.qsfp_0_sink.recv())
if not self.qsfp_1_sink.empty():
await self.qsfp_1_source.send(await self.qsfp_1_sink.recv())
@cocotb.test()
async def run_test_nic(dut):
tb = TB(dut)
await tb.init()
tb.log.info("Init driver")
await tb.driver.init_dev(tb.dev.functions[0].pcie_id)
await tb.driver.interfaces[0].open()
# await driver.interfaces[1].open()
# enable queues
tb.log.info("Enable queues")
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001)
for k in range(tb.driver.interfaces[0].tx_queue_count):
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000003)
# wait for all writes to complete
await tb.rc.mem_read(tb.driver.hw_addr, 4)
tb.log.info("Init complete")
tb.log.info("Send and receive single packet")
data = bytearray([x % 256 for x in range(1024)])
await tb.driver.interfaces[0].start_xmit(data, 0)
pkt = await tb.qsfp_0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp_0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
# await tb.driver.interfaces[1].start_xmit(data, 0)
# pkt = await tb.qsfp_1_0_sink.recv()
# tb.log.info("Packet: %s", pkt)
# await tb.qsfp_1_0_source.send(pkt)
# pkt = await tb.driver.interfaces[1].recv()
# tb.log.info("Packet: %s", pkt)
# assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.log.info("RX and TX checksum tests")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5')
ip = IP(src='192.168.1.100', dst='192.168.1.101')
udp = UDP(sport=1, dport=2)
test_pkt = eth / ip / udp / payload
test_pkt2 = test_pkt.copy()
test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP]))
await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6)
pkt = await tb.qsfp_0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp_0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
assert Ether(pkt.data).build() == test_pkt.build()
tb.log.info("Multiple small packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Multiple large packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Jumbo frames")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(9014)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
await RisingEdge(dut.clk_250mhz)
await RisingEdge(dut.clk_250mhz)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "common", "mqnic_interface.v"),
os.path.join(rtl_dir, "common", "mqnic_port.v"),
os.path.join(rtl_dir, "common", "cpl_write.v"),
os.path.join(rtl_dir, "common", "cpl_op_mux.v"),
os.path.join(rtl_dir, "common", "desc_fetch.v"),
os.path.join(rtl_dir, "common", "desc_op_mux.v"),
os.path.join(rtl_dir, "common", "queue_manager.v"),
os.path.join(rtl_dir, "common", "cpl_queue_manager.v"),
os.path.join(rtl_dir, "common", "tx_engine.v"),
os.path.join(rtl_dir, "common", "rx_engine.v"),
os.path.join(rtl_dir, "common", "tx_checksum.v"),
os.path.join(rtl_dir, "common", "rx_hash.v"),
os.path.join(rtl_dir, "common", "rx_checksum.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"),
os.path.join(rtl_dir, "common", "event_mux.v"),
os.path.join(rtl_dir, "common", "tdma_scheduler.v"),
os.path.join(rtl_dir, "common", "tdma_ber.v"),
os.path.join(rtl_dir, "common", "tdma_ber_ch.v"),
os.path.join(eth_rtl_dir, "ptp_clock.v"),
os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"),
os.path.join(eth_rtl_dir, "ptp_perout.v"),
os.path.join(eth_rtl_dir, "ptp_ts_extract.v"),
os.path.join(axi_rtl_dir, "axil_interconnect.v"),
os.path.join(axi_rtl_dir, "arbiter.v"),
os.path.join(axi_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_arb_mux.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "pcie_tag_manager.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 512
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81
parameters['RQ_SEQ_NUM_WIDTH'] = 6
parameters['BAR0_APERTURE'] = 24
parameters['AXIS_ETH_DATA_WIDTH'] = 512
parameters['AXIS_ETH_KEEP_WIDTH'] = parameters['AXIS_ETH_DATA_WIDTH'] // 8
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
aa2264a5588a0b46c7cfb011a8461d7dcd77f40f | cc62db498416bac19cff27484789af28a16f1afa | /week6/MultiApps/apps/users/views.py | 0744b48bea923da45d243f4a86dec47ecdcdab99 | [] | no_license | py2-10-2017/KevinDunn | 5b7899aaad87497b661e938f5f8d72d1733032f6 | a8745fc9c447dd21f82fef8aec6704780ef42acf | refs/heads/master | 2021-07-17T15:45:06.930093 | 2017-10-26T01:24:41 | 2017-10-26T01:24:41 | 105,678,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | from django.shortcuts import render, redirect, HttpResponse
# Create your views here.
def register(request):
response = "Sign in here"
return HttpResponse(response)
def users(request):
response = "These are the users..."
return HttpResponse(response)
def login(request):
response = "Log in here"
return HttpResponse(response)
| [
"kevincdunn.kcd@gmail.com"
] | kevincdunn.kcd@gmail.com |
4b1d2c76432581fbd81460b762ecd5235470d50c | 010da1500dfce66804de580f5e7a60083521bdc5 | /posts/forms.py | 4b34605542b86817a76b70912a52bd60847d33a4 | [] | no_license | shreyaabh/Travel-match | bae3fb969bef8e4d08735585c5eb440715e1eb96 | e6caba62f424640aea61c1a667d820d514d1721c | refs/heads/master | 2023-05-06T20:59:38.178119 | 2021-05-18T04:36:22 | 2021-05-18T04:36:22 | 368,401,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django import forms
from .models import Post
class PostModelForm(forms.ModelForm):
class Meta:
model = Post
fields = ('no_people','no_days','tour_date', 'Gender_prefer','location','detail','author') | [
"shrbhandari2000@gmail.com"
] | shrbhandari2000@gmail.com |
c1594b6024924f1733c5af193d9a8a11686b8087 | f24ff43f3ac98c63e45df50e6d3c02571de6f3db | /third.py | 2f8cbe863951e44aa7130bb7d5fac62f2987d856 | [] | no_license | crazycracker/PythonPractice | 796ef9c7bc772b15108eeac280c995ea9bd81b5f | 9afa6e59509d925fe439c98bec930c03daf9f7ed | refs/heads/master | 2021-03-30T16:18:59.108002 | 2017-12-11T06:27:21 | 2017-12-11T06:27:21 | 112,300,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | for x in range(1, 50, 1):
if x % 3 == 0 and x % 5 == 0:
print("FizzBuzz")
elif x % 3 == 0:
print("Fizz")
else:
print("Buzz")
| [
"vinay.manchala@capgemini.com"
] | vinay.manchala@capgemini.com |
2418d79a943544995aa09fa6c451b0eae28c6c7d | ea25f8fed02d4f059ee141d09314a679496835e5 | /download_and_mux_YouTube_streams_from_video.py | 0ca805371a4a134e1ca3ffc4e4e52c68b2ad7522 | [] | no_license | cristi-iorga/download_and_mux_YouTube_streams_from_video | 4d56789d3f020cdfd478521faa061d8937badd44 | 515be49b7f14b628ff21a76391f67219f40da18c | refs/heads/master | 2021-04-08T21:36:33.154382 | 2020-03-20T17:25:22 | 2020-03-20T17:25:22 | 248,811,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,845 | py | """
Name: download_and_mux_YouTube_streams_from_video
Purpose: Lets you choose a video and an audio stream from a YouTube video link and muxes them together using ffmpeg
Author: iorga.ton@gmail.com
Revision: 20.03.2020 - initial version
Python Version: 3.8.2
Windows Version: 10.0.17763
Dependencies: pytube (pip install pytube3)
ffmpeg
"""
from pytube import YouTube
import os
import sys
import time
def get_input():
input_link = input("\nEnter the Youtube link here and press ENTER: ")
return input_link
def choose_muxed_stream():
input_tag = input("\nChoose an itag to download and press ENTER to start downloading: ")
return input_tag
def choose_video_audio_stream():
video_tag = input("\nChoose an itag for the VIDEO stream and press ENTER: ")
audio_tag = input("\nChoose an itag for the AUDIO stream and press ENTER: ")
only_audio_question = input("\nWould you like audio only? (y/n)")
if only_audio_question == "y":
only_audio = True
elif only_audio_question == "n":
only_audio = False
else:
print("Invalid input! I'm assuming you don't care, we'll do both then")
only_audio = False
return (video_tag, audio_tag, only_audio)
def display_muxed_streams(video):
print("\nThese are all the available muxed streams:\n")
muxed_list = (video.streams.filter(progressive=True))
print(*muxed_list, sep="\n")
def display_video_streams(video):
print("\nThese are all the available video streams:\n")
video_list = (video.streams.filter(only_video=True))
print(*video_list, sep="\n")
def display_audio_streams(video):
print("\nThese are all the available audio streams:\n")
audio_list = (video.streams.filter(only_audio=True))
print(*audio_list, sep="\n")
def download_streams():
link = get_input()
video = YouTube(link)
display_muxed_streams(video)
display_video_streams(video)
display_audio_streams(video)
video_tag, audio_tag, only_audio = choose_video_audio_stream()
output_path_video = None
if not only_audio:
video_stream = video.streams.get_by_itag(video_tag)
output_path_video = video_stream.download(filename_prefix="video_")
audio_stream = video.streams.get_by_itag(audio_tag)
output_path_audio = audio_stream.download(filename_prefix="audio_")
full_video_name = video.title
return(output_path_video, output_path_audio, full_video_name, only_audio)
def add_doublequotes(command):
quoted_command = '"' + str(command) + '"'
return quoted_command
def file_muxer(video_file, audio_file, full_name):
try:
video_file_name = os.path.basename(video_file)
video_file_root, video_file_extension = os.path.splitext(video_file_name)
audio_file_name = os.path.basename(audio_file)
audio_file_root, audio_file_extension = os.path.splitext(audio_file_name)
video_file = add_doublequotes(video_file)
audio_file = add_doublequotes(audio_file)
in_video_file = add_doublequotes(video_file_name)
in_audio_file = add_doublequotes(audio_file_name)
out_file = add_doublequotes(full_name + video_file_extension)
print("Video file is: ", video_file_name)
print("Audio file is: ", audio_file_name)
mux_files = 'ffmpeg -i ' + video_file + ' -i ' + audio_file + ' -vcodec copy -acodec copy -map 0:0 -map 1:0 ' + out_file
print(mux_files)
os.system(mux_files)
except OSError as err:
print(err.reason)
exit(1)
if __name__ == '__main__':
# downloads the two streams:
video_path, audio_path, full_name, only_audio = download_streams()
# muxes the two streams that were downloaded into a new file and deletes the original files
if not only_audio:
file_muxer(video_path, audio_path, full_name)
os.remove(video_path)
os.remove(audio_path)
input("\n\nDone! Press any key to exit!") | [
"noreply@github.com"
] | cristi-iorga.noreply@github.com |
eb239b21952da625554fc6c3c1b389fd1c3d1bfe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03293/s450382229.py | 6fbb1ed548a962cfbf8b0a5267d97027111aea93 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | S,T=input(),input()
for i in range(len(T)):
if S==T:print('Yes');exit()
S=S[-1]+S[0:-1]
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
082339a9172c6247b17243465962f4b8cea0e12c | 45469b00d86555aa419f1cf015bde4cf3e62d8d4 | /scales.py | 1421e3ac8f4e154c27d283116b63e28283b825fb | [] | no_license | Airflame/improviser | 76eac4c2390c0ebdaf65e6796b56217f02421fa4 | 06ac76f96c26bfab95d9d57031dd77165dc3ce09 | refs/heads/master | 2020-05-05T09:23:06.311063 | 2019-04-07T13:11:21 | 2019-04-07T13:11:21 | 173,586,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | KEYS = ('A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#')
CHROMATIC = ('a2', 'a#2', 'b2', 'c3', 'c#3', 'd3', 'd#3', 'e3', 'f3', 'f#3', 'g3', 'g#3',
'a3', 'a#3', 'b3', 'c', 'c#', 'd', 'd#', 'e', 'f', 'f#', 'g', 'g#',
'a', 'a#', 'b', 'c5', 'c#5', 'd5', 'd#5', 'e5', 'f5', 'f#5', 'g5', 'g#5',
'a5', 'a#5', 'b5', 'c6', 'c#6', 'd6', 'd#6', 'e6', 'f6', 'f#6', 'g6', 'g#6')
PENTATONIC = (2, 2, 3, 2, 3)
BLUES = (3, 2, 1, 1, 3, 2)
MAJOR = (2, 2, 1, 2, 2, 2, 1)
MINOR = (2, 1, 2, 2, 1, 2, 2)
| [
"airflame@wp.pl"
] | airflame@wp.pl |
95b54f2914a61f9a045c2fd26d9d46b9767a42c4 | 0b953c73458679beeef3b95f366601c834cff9b4 | /hunter/longest palindrome substring within string.py | 9f907b18b130617c943cd267d9545d83c25ece09 | [] | no_license | Sravaniram/Python-Programming | 41531de40e547f0f461e77b88e4c0d562faa041c | f6f2a4e3a6274ecab2795062af8899c2a06c9dc1 | refs/heads/master | 2020-04-11T12:49:18.677561 | 2018-06-04T18:04:13 | 2018-06-04T18:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | a=raw_input()
l=[]
m=[]
for i in range(0,len(a)):
for j in range(i,len(a)):
z=a[i:j+1]
y=z[::-1]
if z==y:
l.append(z)
m.append(len(z))
y=max(m)
for i in range(0,len(a)):
if m[i]==y:
print l[i]
break
| [
"noreply@github.com"
] | Sravaniram.noreply@github.com |
ccbd04c0b681691aa8c803aef2597a9db732015b | 8a5be45f90ca7e7538afbf005c912bfb3d9e083e | /datacleaningScripts/removeTitles.py | 69b3a14a0ad888efc73494b7410ad21e6658aad4 | [] | no_license | dylanharden3/Mock-Streaming-Company-Database | 69c2487612362b674e9cad1f64ff31423f5bb159 | 2a7ce32c6ab5f8625bac5e553c0d531639a1540a | refs/heads/master | 2023-08-25T01:16:22.755935 | 2021-10-12T22:25:55 | 2021-10-12T22:25:55 | 416,107,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | # titles intput format (no header): (title ID) (title type) (original title) (runtime minutes) (genres) (year) (average rating) (num votes)
# customer_ratings input format (no header): (customer ID) (rating) (date) (title Id)
# principals input format (no header): (title ID) (nconst) (category)
# removes titles from principalsNew and customer_ratingsNew that have been removed from titles
# keeps same format of files input
# reading through titlesNewNew (most recent version of titles) and inserting all titles into map for ease of access
titles = {}
# getting file names for input and output
print("NOTE: put path of file from data directory")
titlesInput = input("input name for titles input file: ")
customerInput = input("input name for customer_ratings input file: ")
customerOutput = input("input name for customer_ratings output file: ")
principalInput = input("input name for principals input file: ")
principalOutput = input("input name for principals output file: ")
# loading titles and years into dictionary
with open("../" + titlesInput, "r") as fileRead:
for line in fileRead.readlines():
line = line.split("\t") # spliting line by tabs
# title id is first entry in titles file
titles[line[0]] = line[5] # just need title id, year (cannot have review in year before movie was released)
# reading through customerRatings, removing lines where title is not present and
# year of review is less than year movie was made
with open("../" + customerInput, "r") as fileRead:
with open("../" + customerOutput, "w") as fileWrite:
for line in fileRead.readlines():
# if title and year are good >> write to newnew
line = line.split() # splitting line by whitespace
if (line[3] in titles):
# check year of review (movie year <= review year)
if (int(titles[line[3]]) <= int(line[2][0:4])):
# need to add
fileWrite.write("\t".join(line) + "\n")
# reading through principals, removing lines where title id is not in titlesNewNew
with open("../" + principalInput, "r") as fileRead:
with open("../" + principalOutput, "w") as fileWrite:
for line in fileRead.readlines():
# if title is in titles >> add to new file
line = line.split() # splitting by whitespace
if (line[0] in titles):
# have title >> can write to new file
fileWrite.write("\t".join(line) + "\n") | [
"dylanharden3@tamu.edu"
] | dylanharden3@tamu.edu |
ba1dcd4b1351805b84b78d9f79143556400138fe | ca357b1ff16c62151f35573be3a2cc447d564323 | /tests/test_generators.py | 79a349ceedade6a180b79b6c18d247833a5be1a0 | [
"Apache-2.0"
] | permissive | opsdroid/opsdroid-audio | fad62453d6d474e59818c7a881184cd11d01e5b8 | 943d181489fee1a3ebed99f271e5cf814c0172f3 | refs/heads/master | 2021-12-30T08:49:51.127080 | 2020-07-22T17:24:29 | 2020-07-22T17:24:29 | 83,927,663 | 5 | 10 | Apache-2.0 | 2022-01-04T10:51:37 | 2017-03-04T22:04:15 | Python | UTF-8 | Python | false | false | 352 | py | import unittest
from opsdroidaudio import generators
class TestGenerators(unittest.TestCase):
"""Test the opsdroidaudio generators class."""
def test_prepare_url(self):
"""Test the prepare_url method"""
response = generators.prepare_url('https://www.youtube.com')
self.assertEqual('a link to youtube.com', response)
| [
"jacobtomlinson@users.noreply.github.com"
] | jacobtomlinson@users.noreply.github.com |
e28937a960ba2e01fca8963a93fe93be28a5275a | f2fd1dd56806165622b8fc741c4e2247c48c505f | /productSearch2.py | 9a1fef691a955ba12eb52a47405e2452cef98812 | [] | no_license | professorr-x/archive.za | 6dcee8d1a046dbafcae541e0460641cee67e8e90 | deee5fe1bbae22733a7f0cb34875b8e59ab6f151 | refs/heads/master | 2022-11-29T23:42:13.632474 | 2020-08-06T14:57:31 | 2020-08-06T14:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | import string
import requests
import json
from datetime import datetime
import threading
import time
import os
from discord import *
def removeFile(filename):
if os.path.exists(filename):
os.remove(filename)
else:
print("File doesn't exists")
alpha = list(string.ascii_uppercase)
numb = list(range(0, 10))
base = '060601A'
productIds = []
for i in range(0,3):
for k in range(len(alpha)):
for y in range(len(numb)):
for j in range(len(alpha)):
productId = base + alpha[i] + alpha[k] + alpha[j] + str(numb[y])
productIds.append(productId)
def productSearch(prodId):
try:
client = requests.Session()
r = client.get('https://www.archivestore.co.za/product/generateProductJSON.jsp?productId={}'.format(prodId))
time.sleep(1)
productData = json.loads(r.text)
if len(productData) > 0 and 'NRG' in productData['name']:
name = productData['name']
with open('productList.csv', 'a') as f:
f.write('{}|{} \n'.format(prodId, name))
f.close()
print('{} - {}'.format(prodId, name))
sendHook(name,'https://www.archivestore.co.za' + productData['pdpURL'], 'size', productData['images'][0]['thumb'])
elif len(productData) > 0:
name = productData['name']
print('{} - {}'.format(prodId, name))
else:
print('{}'.format(prodId))
except requests.exceptions.ConnectionError:
print('Unable to find product at ID: {}'.format(prodId))
with open('emptyID.txt', 'a') as f:
f.write('{} \n'.format(prodId))
f.close()
threads = []
split = 1000
x=0
with open('productList.csv', 'w') as f:
f.write('product_id|product_name \n')
f.close
removeFile('productList.csv')
removeFile('emptyID.txt')
while x<115:
for i in range(len(productIds[(x*176):(x+1)*176])):
t = threading.Thread(target=productSearch, args=(productIds[(x*176):(x+1)*176][i],))
threads.append(t)
t.start()
for one_thread in threads:
one_thread.join()
x += 1
dateTimeObj = datetime.now()
print(str(dateTimeObj))
print(productIds[(x*176):(x+1)*176])
| [
"hussaiy1@gmail.com"
] | hussaiy1@gmail.com |
db479a4504dbd92c606e11ca2cddea4c860fc035 | 7d4bff60046d7254606adce1323c57ff890e7830 | /sigin_tests.py | b0cf35474ac807b5e026c1a27f3e4e91f0d11b9c | [] | no_license | mmoh4187/assn1 | 78d7d4e4cac610c3fc145b335cfb82b5512e077d | 471bc4468d5c2edcee8c1c0195618d1cf50dae7c | refs/heads/master | 2016-09-12T23:27:37.689774 | 2016-04-11T10:57:53 | 2016-04-11T10:57:53 | 55,957,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import signin
import unittest
class UserLoginTestCase(unittest.TestCase):
def setUp(self):
signin.app.config['TESTING'] = True
self.app = signin.app.test_client()
def login(self, username, password):
return self.app.post('/login', data=dict(
username = username,
password=password
),follow_redirects=True)
def test_login_ok(self):
rv = self.login("test", "test123")
assert b'Success' in rv.data
def test_login_not_ok(self):
rv = self.login("test", "test")
assert b'Fail' in rv.data
def test_login_unknown_user(self):
rv = self.login("batman", "test123")
assert b'Fail' in rv.data
if __name__ == '__main__':
unittest.main()
| [
"mmoh4187@uni.sydney.edu.au"
] | mmoh4187@uni.sydney.edu.au |
21e9a8b18a7f5dfb4dd96d245f7e3f6aa402db76 | 2a1f85ec2bf3a43f789676d4391b3cd187e81648 | / darth-enoch/iat/actions.py | e05dcac2b9b5035e6ad380ea149e0f1424289d89 | [] | no_license | pardus-anka/contrib | 6e5acba2ff11b388594092187de8ca523d935683 | 773f0917b6b44bfe8056bda1d9f2b29299f127e4 | refs/heads/master | 2016-09-02T00:36:28.051867 | 2013-03-05T15:47:06 | 2013-03-05T15:47:06 | 8,203,958 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "NEWS", "README")
| [
"yaralikurt15@hotmail.com"
] | yaralikurt15@hotmail.com |
9173d3039691ddd66032180f3338f138b8bae29c | 87da37fab5769ca8b26078d06fe8d57bfe9be0f4 | /ZeroMq_Python/application/application.py | 22c8e2914304531759999a1ea65226683d863e46 | [] | no_license | SonnevilleJ/ZeroMq | 95c6c976c0e9087f86fa6a4da8677e657c2d9ef7 | 87fd921270259c780d6aa18e0ff7f0ea8f888565 | refs/heads/master | 2022-07-06T03:37:07.685502 | 2014-09-23T14:57:33 | 2014-09-23T16:43:13 | 247,728,066 | 0 | 0 | null | 2022-06-22T22:53:37 | 2020-03-16T14:42:57 | C# | UTF-8 | Python | false | false | 611 | py | from flask import Flask
from flask import render_template
from monitorZmq import *
from autoDiscovery import *
app = Flask(__name__)
ip_of_server = auto_discover_server();
@app.route('/monitor/')
def queue_monitor():
model = {}
model["queueDepth"] = int(monitor_messages(ip_of_server))
return render_template('monitor.html', model=model)
@app.route('/currentqueuedepth/')
def current_queue_depth():
return monitor_messages(ip_of_server)
@app.route('/currentconsumercount/')
def current_consumer_count():
return monitor_consumers(ip_of_server)
if __name__ == '__main__':
app.run(debug=True)
| [
"wisnowskidavid@johndeere.com"
] | wisnowskidavid@johndeere.com |
e906f07c23f23c8b524db511e17761a4e3bdba53 | 54cc761fe47833e8004647c516e328fc860b4e78 | /2019/flareon6/7 - wopr/_wopr.exe.extracted/b/c/e/PYC138.pyc.py | 81ec5fecae9490c3982b3a7a2f292ddc275297dd | [] | no_license | enderdzz/ReverseThings | c8023fe3095617ae8b0be71b0f10f19579e7aec5 | fe2a3ee8d3b1b018eebc57ebd31fb715bc89958e | refs/heads/master | 2023-08-14T09:11:36.708079 | 2023-07-19T06:49:03 | 2023-07-19T06:49:03 | 67,817,669 | 5 | 0 | null | 2023-07-19T06:49:05 | 2016-09-09T17:13:22 | HTML | UTF-8 | Python | false | false | 3,722 | py | # uncompyle6 version 3.3.5
# Python bytecode 3.7 (3394)
# Decompiled from: Python 2.7.16 (default, Mar 4 2019, 09:02:22)
# [GCC 4.2.1 Compatible Apple LLVM 10.0.0 (clang-1000.11.45.5)]
# Embedded file name: email\errors.py
"""email package exception classes."""
class MessageError(Exception):
"""'Base class for errors in the email package.'"""
pass
class MessageParseError(MessageError):
"""'Base class for message parsing errors.'"""
pass
class HeaderParseError(MessageParseError):
"""'Error while parsing headers.'"""
pass
class BoundaryError(MessageParseError):
""""Couldn't find terminating boundary.\""""
pass
class MultipartConversionError(MessageError, TypeError):
"""'Conversion to a multipart is prohibited.'"""
pass
class CharsetError(MessageError):
"""'An illegal charset was given.'"""
pass
class MessageDefect(ValueError):
"""'Base class for a message defect.'"""
def __init__(self, line=None):
if line is not None:
super().__init__(line)
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""'A message claimed to be a multipart but had no boundary parameter.'"""
pass
class StartBoundaryNotFoundDefect(MessageDefect):
"""'The claimed start boundary was never found.'"""
pass
class CloseBoundaryNotFoundDefect(MessageDefect):
"""'A start boundary was found, but not the corresponding close boundary.'"""
pass
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""'A message had a continuation line as its first header line.'"""
pass
class MisplacedEnvelopeHeaderDefect(MessageDefect):
""""A 'Unix-from' header was found in the middle of a header block.\""""
pass
class MissingHeaderBodySeparatorDefect(MessageDefect):
"""'Found line with no leading whitespace and no colon before blank line.'"""
pass
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
class MultipartInvariantViolationDefect(MessageDefect):
"""'A message claimed to be a multipart but no subparts were found.'"""
pass
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
"""'An invalid content transfer encoding was set on the multipart itself.'"""
pass
class UndecodableBytesDefect(MessageDefect):
"""'Header contained bytes that could not be decoded'"""
pass
class InvalidBase64PaddingDefect(MessageDefect):
"""'base64 encoded sequence had an incorrect length'"""
pass
class InvalidBase64CharactersDefect(MessageDefect):
"""'base64 encoded sequence had characters not in base64 alphabet'"""
pass
class InvalidBase64LengthDefect(MessageDefect):
"""'base64 encoded sequence had invalid length (1 mod 4)'"""
pass
class HeaderDefect(MessageDefect):
"""'Base class for a header defect.'"""
def __init__(self, *args, **kw):
(super().__init__)(*args, **kw)
class InvalidHeaderDefect(HeaderDefect):
"""'Header is not valid, message gives details.'"""
pass
class HeaderMissingRequiredValue(HeaderDefect):
"""'A header that must have a value had none'"""
pass
class NonPrintableDefect(HeaderDefect):
"""'ASCII characters outside the ascii-printable range found'"""
def __init__(self, non_printables):
super().__init__(non_printables)
self.non_printables = non_printables
def __str__(self):
return 'the following ASCII non-printables found in header: {}'.format(self.non_printables)
class ObsoleteHeaderDefect(HeaderDefect):
"""'Header uses syntax declared obsolete by RFC 5322'"""
pass
class NonASCIILocalPartDefect(HeaderDefect):
"""'local_part contains non-ASCII characters'"""
pass | [
"e@man1ac.me"
] | e@man1ac.me |
34c74b16d3dacd0b0bdc29e67b46cbf0d0df9207 | 6323d2118a2e0986ec1ec3c021b56ffa792c3083 | /src/main/java/com/jueee/learnspark/dataanalysis/chapter04/P5DataPartitioning.py | 3bb681c030222d28e0cf24c895a2a6cb26443958 | [] | no_license | weixiaodai1/SparkFastDataAnalysis | 73620d0ca982ff8dc0361fb61756d198813288ab | d7cb6a24bf88cf31b54fe7b497659643bc87812a | refs/heads/master | 2022-12-30T05:15:10.514506 | 2020-10-21T11:06:29 | 2020-10-21T11:06:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py |
# Python 自定义分区方式
import urllib.parse
def hash_domain(url):
return hash(urllib.parse.urlparse(url).netloc)
# 使用
from pyspark import SparkConf,SparkContext
conf = SparkConf().setMaster("local").setAppName("MY App")
sc = SparkContext(conf = conf)
lines = sc.textFile("README.md")
lines.partitionBy(20,hash_domain) # 创建20个分区 | [
"hzweiyongqiang@corp.netease.com"
] | hzweiyongqiang@corp.netease.com |
7f67a514b7783a72479b109643a245885ec88de4 | 73d511f98dac31ecbc90c7df9af092cd9b4f4421 | /webapp/ENV/lib/python3.6/site-packages/dask/dataframe/utils.py | 95ac23b77f4587d15314d5b78a40a6ee56be2825 | [
"MIT"
] | permissive | linkehub/linkehub_api | f704f6272b5d103d6d833eae1a11fbb72f0b8233 | b5579a6156d6ae01f0cbd8526c8ed8264b5deeb5 | refs/heads/master | 2022-10-11T14:32:37.548985 | 2018-05-29T11:57:29 | 2018-05-29T16:59:54 | 135,220,244 | 0 | 1 | MIT | 2022-09-23T21:51:08 | 2018-05-29T00:05:57 | Python | UTF-8 | Python | false | false | 25,266 | py | from __future__ import absolute_import, division, print_function
import re
import textwrap
from distutils.version import LooseVersion
from collections import Iterator
import sys
import traceback
from contextlib import contextmanager
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.api.types import is_categorical_dtype, is_scalar
try:
from pandas.api.types import is_datetime64tz_dtype
except ImportError:
# pandas < 0.19.2
from pandas.core.common import is_datetime64tz_dtype
from ..core import get_deps
from ..local import get_sync
from ..utils import asciitable, is_arraylike
PANDAS_VERSION = LooseVersion(pd.__version__)
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i + 1]]
yield df.iloc[indices[-1]:]
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided. Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
indent = " " * kwargs.get('pad', 8)
body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent,
subsequent_indent=indent, width=78)
descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body))
if f.__doc__:
if '$META' in f.__doc__:
f.__doc__ = f.__doc__.replace('$META', descr)
else:
# Put it at the end of the parameters section
parameter_header = 'Parameters\n%s----------' % indent[4:]
first, last = re.split('Parameters\\n[ ]*----------', f.__doc__)
parameters, rest = last.split('\n\n', 1)
f.__doc__ = '{0}{1}{2}\n{3}{4}\n\n{5}'.format(first, parameter_header,
parameters, indent[4:],
descr, rest)
return f
@contextmanager
def raise_on_meta_error(funcname=None, udf=False):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
msg = "Metadata inference failed{0}.\n\n"
if udf:
msg += ("You have supplied a custom function and Dask is unable to \n"
"determine the type of output that that function returns. \n\n"
"To resolve this please provide a meta= keyword.\n"
"The docstring of the Dask function you ran should have more information.\n\n")
msg += ("Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}")
msg = msg.format(" in `{0}`".format(funcname) if funcname else "", repr(e), tb)
raise ValueError(msg)
UNKNOWN_CATEGORIES = '__UNKNOWN_CATEGORIES__'
def has_known_categories(x):
"""Returns whether the categories in `x` are known.
Parameters
----------
x : Series or CategoricalIndex
"""
x = getattr(x, '_meta', x)
if isinstance(x, pd.Series):
return UNKNOWN_CATEGORIES not in x.cat.categories
elif isinstance(x, pd.CategoricalIndex):
return UNKNOWN_CATEGORIES not in x.categories
raise TypeError("Expected Series or CategoricalIndex")
def strip_unknown_categories(x):
"""Replace any unknown categoricals with empty categoricals.
Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
cat_mask = x.dtypes == 'category'
if cat_mask.any():
cats = cat_mask[cat_mask].index
for c in cats:
if not has_known_categories(x[c]):
x[c].cat.set_categories([], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype) and not has_known_categories(x):
x.cat.set_categories([], inplace=True)
if (isinstance(x.index, pd.CategoricalIndex) and not
has_known_categories(x.index)):
x.index = x.index.set_categories([])
elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):
x = x.set_categories([])
return x
def clear_known_categories(x, cols=None, index=True):
"""Set categories to be unknown.
Parameters
----------
x : DataFrame, Series, Index
cols : iterable, optional
If x is a DataFrame, set only categoricals in these columns to unknown.
By default, all categorical columns are set to unknown categoricals
index : bool, optional
If True and x is a Series or DataFrame, set the clear known categories
in the index as well.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
mask = x.dtypes == 'category'
if cols is None:
cols = mask[mask].index
elif not mask.loc[cols].all():
raise ValueError("Not all columns are categoricals")
for c in cols:
x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype):
x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
if index and isinstance(x.index, pd.CategoricalIndex):
x.index = x.index.set_categories([UNKNOWN_CATEGORIES])
elif isinstance(x, pd.CategoricalIndex):
x = x.set_categories([UNKNOWN_CATEGORIES])
return x
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == 'category':
return pd.Series(pd.Categorical([UNKNOWN_CATEGORIES]),
name=name, index=index).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
def make_meta(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, '_meta'):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
elif is_arraylike(x):
return x[:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame({c: _empty_series(c, d, index=index)
for (c, d) in x.items()}, index=index)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError("Expected iterable of tuples of (name, dtype), "
"got {0}".format(x))
return pd.DataFrame({c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x], index=index)
elif not hasattr(x, 'dtype') and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
if PANDAS_VERSION >= "0.20.0":
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
else:
_numeric_index_types = (pd.Int64Index, pd.Float64Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(['a', 'b'], name=idx.name)
elif typ is pd.DatetimeIndex:
start = '1970-01-01'
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
data = [start, '1970-01-02'] if idx.freq is None else None
return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq,
tz=idx.tz, name=idx.name)
elif typ is pd.PeriodIndex:
return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, 'D')
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = _nonempty_index(idx.categories)
cats = None
else:
data = _nonempty_index(_nonempty_index(idx.categories))
cats = idx.categories
return pd.CategoricalIndex(data, categories=cats,
ordered=idx.ordered, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
labels = [[0, 0] for i in idx.levels]
return pd.MultiIndex(levels=levels, labels=labels, names=idx.names)
raise TypeError("Don't know how to handle index of "
"type {0}".format(type(idx).__name__))
_simple_fake_mapping = {
'b': np.bool_(True),
'V': np.void(b' '),
'M': np.datetime64('1970-01-01'),
'm': np.timedelta64(1),
'S': np.str_('foo'),
'a': np.str_('foo'),
'U': np.unicode_('foo'),
'O': 'foo'
}
def _scalar_from_dtype(dtype):
if dtype.kind in ('i', 'f', 'u'):
return dtype.type(1)
elif dtype.kind == 'c':
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ('m', 'M') else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError("Can't handle meta of type "
"'{0}'".format(type(x).__name__))
def _nonempty_series(s, idx):
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp('1970-01-01', tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = None
data = pd.Categorical(data, categories=cats,
ordered=s.cat.ordered)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def meta_nonempty(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if isinstance(x, pd.Index):
return _nonempty_index(x)
elif isinstance(x, pd.Series):
idx = _nonempty_index(x.index)
return _nonempty_series(x, idx)
elif isinstance(x, pd.DataFrame):
idx = _nonempty_index(x.index)
data = {i: _nonempty_series(x.iloc[:, i], idx)
for i, c in enumerate(x.columns)}
res = pd.DataFrame(data, index=idx,
columns=np.arange(len(x.columns)))
res.columns = x.columns
return res
elif is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError("Expected Index, Series, DataFrame, or scalar, "
"got {0}".format(type(x).__name__))
def check_meta(x, meta, funcname=None, numeric_equal=True):
"""Check that the dask metadata matches the result.
If metadata matches, ``x`` is passed through unchanged. A nice error is
raised if metadata doesn't match.
Parameters
----------
x : DataFrame, Series, or Index
meta : DataFrame, Series, or Index
The expected metadata that ``x`` should match
funcname : str, optional
The name of the function in which the metadata was specified. If
provided, the function name will be included in the error message to be
more helpful to users.
numeric_equal : bool, optionl
If True, integer and floating dtypes compare equal. This is useful due
to panda's implicit conversion of integer to floating upon encountering
missingness, which is hard to infer statically.
"""
eq_types = {'i', 'f'} if numeric_equal else {}
def equal_dtypes(a, b):
if is_categorical_dtype(a) != is_categorical_dtype(b):
return False
if (a is '-' or b is '-'):
return False
if is_categorical_dtype(a) and is_categorical_dtype(b):
# Pandas 0.21 CategoricalDtype compat
if (PANDAS_VERSION >= '0.21.0' and
(UNKNOWN_CATEGORIES in a.categories or
UNKNOWN_CATEGORIES in b.categories)):
return True
return a == b
return (a.kind in eq_types and b.kind in eq_types) or (a == b)
if not isinstance(meta, (pd.Series, pd.Index, pd.DataFrame)):
raise TypeError("Expected partition to be DataFrame, Series, or "
"Index, got `%s`" % type(meta).__name__)
if type(x) != type(meta):
errmsg = ("Expected partition of type `%s` but got "
"`%s`" % (type(meta).__name__, type(x).__name__))
elif isinstance(meta, pd.DataFrame):
dtypes = pd.concat([x.dtypes, meta.dtypes], axis=1)
bad = [(col, a, b) for col, a, b in dtypes.fillna('-').itertuples()
if not equal_dtypes(a, b)]
if not bad:
return x
errmsg = ("Partition type: `%s`\n%s" %
(type(meta).__name__,
asciitable(['Column', 'Found', 'Expected'], bad)))
else:
if equal_dtypes(x.dtype, meta.dtype):
return x
errmsg = ("Partition type: `%s`\n%s" %
(type(meta).__name__,
asciitable(['', 'dtype'], [('Found', x.dtype),
('Expected', meta.dtype)])))
raise ValueError("Metadata mismatch found%s.\n\n"
"%s" % ((" in `%s`" % funcname if funcname else ""),
errmsg))
def index_summary(idx, name=None):
"""Summarized representation of an Index.
"""
n = len(idx)
if name is None:
name = idx.__class__.__name__
if n:
head = idx[0]
tail = idx[-1]
summary = ', {} to {}'.format(head, tail)
else:
summary = ''
return "{}: {} entries{}".format(name, n, summary)
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True, result=None):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
if result is None:
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index), type(result)
assert isinstance(dsk._meta, pd.Index), type(dsk._meta)
if check_names:
assert dsk.name == result.name
assert dsk._meta.name == result.name
if isinstance(result, pd.MultiIndex):
assert result.names == dsk._meta.names
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series), type(result)
assert isinstance(dsk._meta, pd.Series), type(dsk._meta)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(dsk.index, check_names=check_names,
check_dtypes=check_dtypes, result=result.index)
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(dsk.index, check_names=check_names,
check_dtypes=check_dtypes, result=result.index)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(a, b, check_names=True, check_dtypes=True,
check_divisions=True, check_index=True, **kwargs):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
if hasattr(a, 'divisions') and hasattr(b, 'divisions'):
at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python
bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion
assert at == bt, (at, bt)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
raise AssertionError("given dask graph doesn't contain label: {label}"
.format(label=label))
def assert_divisions(ddf):
if not hasattr(ddf, 'divisions'):
return
if not hasattr(ddf, 'index'):
return
if not ddf.known_divisions:
return
def index(x):
return (x if isinstance(x, pd.Index)
else x.index.get_level_values(0))
results = get_sync(ddf.dask, ddf.__dask_keys__())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, 'dask'):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert ' ' not in k
if sys.version_info[0] >= 3:
assert k.split('-')[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike
if numeric_equal:
eq_types.update(('i', 'f'))
if isinstance(res, pd.DataFrame):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes],
axis=1).itertuples():
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
elif isinstance(res, (pd.Series, pd.Index)):
a = ddf._meta.dtype
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
if hasattr(ddf._meta, 'dtype'):
a = ddf._meta.dtype
if not hasattr(res, 'dtype'):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
| [
"diego.marcolino.silva@gmail.com"
] | diego.marcolino.silva@gmail.com |
aaa1bebc04e41b15d7bbd59b3e874ecfad08e1e6 | ebde1fadfbe336fa52bc20c8a2f74de8d1d90cf3 | /src/moca_modules/moca_share/__init__.py | 53ea1bfc999d299bbc568895421ca67f221548ec | [
"MIT"
] | permissive | el-ideal-ideas/MocaTwitterUtils | be2481ce9eb0f9e53e8e0bd54b1b265c80e4f959 | 544a260600ade1b8cd4e0a2d2967c2fb6a8f38d3 | refs/heads/master | 2023-02-18T23:27:31.056121 | 2021-01-23T07:41:16 | 2021-01-23T07:41:16 | 321,014,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # -- Imports --------------------------------------------------------------------------
from .MocaMultiProcessLock import MocaMultiProcessLock
from .MocaSharedMemory import MocaSharedMemory
# -------------------------------------------------------------------------- Imports --
"""
This module can share data between processes.
Requirements
------------
None
""" | [
"el.idealideas@gmail.com"
] | el.idealideas@gmail.com |
21f9593176f7176406f6f4e661783ae4fb1f099c | 15a269d168041169148391bf7a3964b797403738 | /src/internalconf.py | 3166a8becb773e8022bfd8290737c0e6fd60197f | [] | no_license | playbutton-media/publisher | e5344be02942ae9f878d72a2e8b7af52df3ac9ac | f4e53d0110833b0e72ccf07f3b6bfdad45d8abf8 | refs/heads/master | 2020-05-31T10:40:26.505414 | 2019-06-04T17:09:05 | 2019-06-04T17:09:05 | 190,244,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | import os
import re
SCHEME_FILTERS = ["slug"]
CONFIG_TYPES = {
"paths" : {
"renamed": {
"videos" : str,
"audios" : str,
"covers" : str,
"covers-lowres": str,
"project-files": str,
"descriptions" : str,
},
"files" : {
"videos" : str,
"audios" : str,
"covers" : str,
"covers-lowres": str,
"project-files": str,
"descriptions" : str,
},
"misc" : {
"track_data_file": str
},
"ftp" : {
"videos": str,
"audios": str,
"covers": str
}
},
"titles" : {
"track" : str,
"single": str,
"remix" : str,
"ep" : str,
"album" : str,
"videos": str
},
"defaults" : {
"artist" : str,
"covers-description": str
},
"options" : {
"automatic" : {
"recover" : bool,
"open-dirs" : bool,
"create-dirs": bool,
},
"show-help" : bool,
"contract-rename-map": bool,
"confirm" : {
"track-title" : bool,
"track-number" : bool,
'rename-tracks' : bool,
"apply-metadata": bool,
}
},
"description-languages": list,
"various-artists" : {
"threshold" : str,
"separator" : str,
"default-name": str,
"ask" : bool
}
}
# cfgwiz = config wizard
CFGWIZ_TRUES = 'True true yes on'.split(' ')
CFGWIZ_FALSES = 'False false no off'.split(' ')
CFGWIZ_LISTS = re.compile('\[?(?:([^,]+,))+\]?')
LOG_FORMATS = {
'extended': "[{levelname:>8}@{module}.{funcName}:{lineno}] {message}",
'basic' : "[{levelname:^8}] {message}"
}
COVER_ART_FORMATS = ['wide', 'square']
NUMERIC_LOG_LEVELS = {
0:'FATAL',
1:'ERROR',
2:'WARNING',
3:'INFO',
4:'DEBUG',
}
LATEST_TRACKDATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'latest.json') | [
"caseyoneill78@hotmail"
] | caseyoneill78@hotmail |
81b6659ce41232ce1546045cddc849edadb44f22 | 3a2af7b4b801d9ba8d78713dcd1ed57ee35c0992 | /zerver/webhooks/errbit/view.py | a47ccae2f0fc9f5a3b1841a1b5be747b0a7ea1b3 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | timabbott/zulip | 2b69bd3bb63539adbfc4c732a3ff9d52657f40ac | 42f239915526180a1a0cd6c3761c0efcd13ffe6f | refs/heads/master | 2023-08-30T21:45:39.197724 | 2020-02-13T23:09:22 | 2020-06-25T21:46:33 | 43,171,533 | 6 | 9 | Apache-2.0 | 2020-02-24T20:12:52 | 2015-09-25T19:34:16 | Python | UTF-8 | Python | false | false | 1,333 | py | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
ERRBIT_TOPIC_TEMPLATE = '{project_name}'
ERRBIT_MESSAGE_TEMPLATE = '[{error_class}]({error_url}): "{error_message}" occurred.'
@api_key_only_webhook_view('Errbit')
@has_request_variables
def api_errbit_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
subject = get_subject(payload)
body = get_body(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject(payload: Dict[str, Any]) -> str:
project = payload['problem']['app_name'] + ' / ' + payload['problem']['environment']
return ERRBIT_TOPIC_TEMPLATE.format(project_name=project)
def get_body(payload: Dict[str, Any]) -> str:
data = {
'error_url': payload['problem']['url'],
'error_class': payload['problem']['error_class'],
'error_message': payload['problem']['message'],
}
return ERRBIT_MESSAGE_TEMPLATE.format(**data)
| [
"tabbott@zulipchat.com"
] | tabbott@zulipchat.com |
cba2a83f5faa58b5979086c4da00a35ba0cca5fc | 53be75de6517563894e58744c63679044d7333c1 | /ICS3U/Unit 3/Nicholas_Snair_ConditionalAssignment1.0.py | 40dda965e4534070d9ae69aa3d0cbc8702cf13e2 | [] | no_license | mhsNSnair/My-Work | a3a9277822b0af7ef619598c87d4346e02cbc331 | ec06c8ba52ef6722d533af5a7d1c3c3fc1c34db1 | refs/heads/master | 2022-10-17T07:04:52.396458 | 2020-06-15T22:19:44 | 2020-06-15T22:19:44 | 271,899,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,140 | py | """
course: ICS3U
filename: Nicholas_Conditional_Assignment
date: 06/03/20
name: Nicholas Snair
description: *****
"""
print('\033[1;39;40m'"Welcome to the Merivale Computers R Us. We look forward to helping you create the perfect computer for your needs. \n\n First, you will need a monitor. Here are two options:\n\n 1 - 27 inch Dell S2719DGF. This widescreen monitor is nice for watching widescreen movies. The cost is $275.99 \n\n 2 - 19 inch Dell P910S. This smaller screen has an aspect ratio of 5:4 and is more economical. The cost is $149.99 \n\n Please input your selection (1 or 2):")
moniter = int(input('\033[1;31;40m'))
if moniter == 1:
moniter = 275.99
moniterType = 'Dell S2719DGF'
elif moniter == 2:
moniter = 149.99
moniterType = "Dell P910S"
print('\033[1;39;40m', "\n ....\n\nThe CPU is considered the brain of the computer. Here are two CPU options: \n\n1 - The AMD Ryzen 5 3600 is great for gaming and doesn’t overheat. The Ryzen 5 costs $149.99\n\n2 - The AMD Ryzen 3 2200G is well suited to home office applications such as Microsoft Office. The Ryzen 3 costs $84.99\n\nPlease input your selection (1 or 2):")
cpu = int(input('\033[1;31;40m'))
if cpu == 1:
cpu = 149.99
cpuType = 'AMD Ryzen 5 3600'
elif cpu == 2:
cpu = 84.99
cpuType = 'AMD Ryzen 3 2200G'
print('\033[1;39;40m', "\n ....\n\nThe CPU is considered the brain of the computer. Here are two CPU options: \n\n1 - The AMD Ryzen 5 3600 is great for gaming and doesn’t overheat. The Ryzen 5 costs $149.99\n\n2 - The AMD Ryzen 3 2200G is well suited to home office applications such as Microsoft Office. The Ryzen 3 costs $84.99\n\nPlease input your selection (1 or 2):")
keyboard = int(input('\033[1;31;40m'))
if keyboard == 1:
keyboard = 149.99
keyboardType = 'AMD Ryzen 5 3600'
elif keyboard == 2:
keyboard = 84.99
keyboardType = 'AMD Ryzen 3 2200G'
print('\033[1;39;40m', "\n ....\n\nThe CPU is considered the brain of the computer. Here are two CPU options: \n\n1 - The AMD Ryzen 5 3600 is great for gaming and doesn’t overheat. The Ryzen 5 costs $149.99\n\n2 - The AMD Ryzen 3 2200G is well suited to home office applications such as Microsoft Office. The Ryzen 3 costs $84.99\n\nPlease input your selection (1 or 2):")
motherboard = int(input('\033[1;31;40m'))
if motherboard == 1:
motherboard = 149.99
motherboardType = 'AMD Ryzen 5 3600'
elif motherboard == 2:
motherboard = 84.99
motherboardType = 'AMD Ryzen 3 2200G'
print('\033[1;39;40m', "\n ....\n\nThe CPU is considered the brain of the computer. Here are two CPU options: \n\n1 - The AMD Ryzen 5 3600 is great for gaming and doesn’t overheat. The Ryzen 5 costs $149.99\n\n2 - The AMD Ryzen 3 2200G is well suited to home office applications such as Microsoft Office. The Ryzen 3 costs $84.99\n\nPlease input your selection (1 or 2):")
storage = int(input('\033[1;31;40m'))
if storage == 1:
storage = 149.99
storageType = 'AMD Ryzen 5 3600'
elif storage == 2:
storage = 84.99
storageType = 'AMD Ryzen 3 2200G'
print('\033[1;39;40m', "\n ....\n\nThe CPU is considered the brain of the computer. Here are two CPU options: \n\n1 - The AMD Ryzen 5 3600 is great for gaming and doesn’t overheat. The Ryzen 5 costs $149.99\n\n2 - The AMD Ryzen 3 2200G is well suited to home office applications such as Microsoft Office. The Ryzen 3 costs $84.99\n\nPlease input your selection (1 or 2):")
case = int(input('\033[1;31;40m'))
if case == 1:
case = 149.99
caseType = 'AMD Ryzen 5 3600'
elif case == 2:
case = 84.99
caseType = 'AMD Ryzen 3 2200G'
print('\033[1;39;40m', "\n ....\n\nThe CPU is considered the brain of the computer. Here are two CPU options: \n\n1 - The AMD Ryzen 5 3600 is great for gaming and doesn’t overheat. The Ryzen 5 costs $149.99\n\n2 - The AMD Ryzen 3 2200G is well suited to home office applications such as Microsoft Office. The Ryzen 3 costs $84.99\n\nPlease input your selection (1 or 2):")
powerSupply = int(input('\033[1;31;40m'))
if powerSupply == 1:
powerSupply = 149.99
powerSupplyType = 'AMD Ryzen 5 3600'
elif powerSupply == 2:
powerSupply = 84.99
powerSupplyType = 'AMD Ryzen 3 2200G'
print('\033[1;39;40m','...\n\nYour computer build is complete! Here’s a reminder of your selections:\n\n')
print(format("Component","15"), format("Selction",'15'), 'Cost\n'
,format("Moniter","15"), format(moniterType,'15'), '$',float(moniter)
,'\n',format("CPU","15"), format(cpuType,'15'), '$',float(cpu)
,'\n',format("Keyboard","15"), format(keyboardType,'15'), '$',float(keyboard)
,'\n',format("Motherboard","15"), format(motherboardType,'15'), '$',float(motherboard)
,'\n',format("Storage","15"), format(storageType,'15'), '$',float(storage)
,'\n',format("Case","15"), format(caseType,'15'), '$',float(case)
,'\n',format("Power Supply","15"), format(powerSupplyType,'15'), '$',float(powerSupply))
totalCost = moniter + cpu + keyboard + motherboard + storage + case + powerSupply
print(format('Total cost before tax:','30'),'$',totalCost)
finalCost = totalCost*1.13
print(format('Total cost after 13% HST:','30'),'$', finalCost)
| [
"noreply@github.com"
] | mhsNSnair.noreply@github.com |
fad439a94c81108870dc075e57cfbefa4bf596ac | 342af79039d05a91a9bf1494572c86449955b5c5 | /opcua/common/node.py | f31c38ba3a3407988430b8909fe38a69d5817ec0 | [] | no_license | fabio6/freeopcua-ARD-UI | 142de2588c110176384219cc5e710848af27a1e0 | fac72f6a9d0559e19e20c7d8f5f2c460397279f8 | refs/heads/master | 2021-01-19T05:25:52.597471 | 2016-07-21T19:29:49 | 2016-07-21T19:29:49 | 63,883,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,979 | py | """
High level node object, to access node attribute
and browse address space
"""
from opcua import ua
from opcua.common import events
class Node(object):
"""
High level node object, to access node attribute,
browse and populate address space.
Node objects are usefull as-is but they do not expose the entire
OPC-UA protocol. Feel free to look at the code of this class and call
directly UA services methods to optimize your code
"""
def __init__(self, server, nodeid):
self.server = server
self.nodeid = None
if isinstance(nodeid, Node):
self.nodeid = nodeid.nodeid
elif isinstance(nodeid, ua.NodeId):
self.nodeid = nodeid
elif type(nodeid) in (str, bytes):
self.nodeid = ua.NodeId.from_string(nodeid)
elif isinstance(nodeid, int):
self.nodeid = ua.NodeId(nodeid, 0)
else:
raise ua.UaError("argument to node must be a NodeId object or a string defining a nodeid found {} of type {}".format(nodeid, type(nodeid)))
def __eq__(self, other):
if isinstance(other, Node) and self.nodeid == other.nodeid:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "Node({})".format(self.nodeid)
__repr__ = __str__
def __hash__(self):
return self.nodeid.__hash__()
def get_browse_name(self):
"""
Get browse name of a node. A browse name is a QualifiedName object
composed of a string(name) and a namespace index.
"""
result = self.get_attribute(ua.AttributeIds.BrowseName)
return result.Value.Value
def get_display_name(self):
"""
get description attribute of node
"""
result = self.get_attribute(ua.AttributeIds.DisplayName)
return result.Value.Value
def get_data_type(self):
"""
get data type of node
"""
result = self.get_attribute(ua.AttributeIds.DataType)
return result.Value.Value
def get_node_class(self):
"""
get node class attribute of node
"""
result = self.get_attribute(ua.AttributeIds.NodeClass)
return result.Value.Value
def get_description(self):
"""
get description attribute class of node
"""
result = self.get_attribute(ua.AttributeIds.Description)
return result.Value.Value
def get_value(self):
"""
Get value of a node as a python type. Only variables ( and properties) have values.
An exception will be generated for other node types.
"""
result = self.get_data_value()
return result.Value.Value
def get_data_value(self):
"""
Get value of a node as a DataValue object. Only variables (and properties) have values.
An exception will be generated for other node types.
DataValue contain a variable value as a variant as well as server and source timestamps
"""
return self.get_attribute(ua.AttributeIds.Value)
def set_array_dimensions(self, value):
"""
Set attribute ArrayDimensions of node
make sure it has the correct data type
"""
v = ua.Variant(value, ua.VariantType.UInt32)
self.set_attribute(ua.AttributeIds.ArrayDimensions, ua.DataValue(v))
def get_array_dimensions(self):
"""
Read and return ArrayDimensions attribute of node
"""
res = self.get_attribute(ua.AttributeIds.ArrayDimensions)
return res.Value.Value
def set_value_rank(self, value):
"""
Set attribute ArrayDimensions of node
"""
v = ua.Variant(value, ua.VariantType.Int32)
self.set_attribute(ua.AttributeIds.ValueRank, ua.DataValue(v))
def get_value_rank(self):
"""
Read and return ArrayDimensions attribute of node
"""
res = self.get_attribute(ua.AttributeIds.ValueRank)
return res.Value.Value
def set_value(self, value, varianttype=None):
"""
Set value of a node. Only variables(properties) have values.
An exception will be generated for other node types.
value argument is either:
* a python built-in type, converted to opc-ua
optionnaly using the variantype argument.
* a ua.Variant, varianttype is then ignored
* a ua.DataValue, you then have full control over data send to server
"""
datavalue = None
if isinstance(value, ua.DataValue):
datavalue = value
elif isinstance(value, ua.Variant):
datavalue = ua.DataValue(value)
else:
datavalue = ua.DataValue(ua.Variant(value, varianttype))
self.set_attribute(ua.AttributeIds.Value, datavalue)
set_data_value = set_value
def set_writable(self, writable=True):
"""
Set node as writable by clients.
A node is always writable on server side.
"""
if writable:
self.set_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentWrite)
self.set_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentWrite)
else:
self.unset_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentWrite)
self.unset_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentWrite)
def set_attr_bit(self, attr, bit):
val = self.get_attribute(attr)
val.Value.Value = ua.set_bit(val.Value.Value, bit)
self.set_attribute(attr, val)
def unset_attr_bit(self, attr, bit):
val = self.get_attribute(attr)
val.Value.Value = ua.unset_bit(val.Value.Value, bit)
self.set_attribute(attr, val)
def set_read_only(self):
"""
Set a node as read-only for clients.
A node is always writable on server side.
"""
return self.set_writable(False)
def set_attribute(self, attributeid, datavalue):
"""
Set an attribute of a node
attributeid is a member of ua.AttributeIds
datavalue is a ua.DataValue object
"""
attr = ua.WriteValue()
attr.NodeId = self.nodeid
attr.AttributeId = attributeid
attr.Value = datavalue
params = ua.WriteParameters()
params.NodesToWrite = [attr]
result = self.server.write(params)
result[0].check()
def get_attribute(self, attr):
"""
Read one attribute of a node
result code from server is checked and an exception is raised in case of error
"""
rv = ua.ReadValueId()
rv.NodeId = self.nodeid
rv.AttributeId = attr
params = ua.ReadParameters()
params.NodesToRead.append(rv)
result = self.server.read(params)
result[0].StatusCode.check()
return result[0]
def get_attributes(self, attrs):
"""
Read several attributes of a node
list of DataValue is returned
"""
params = ua.ReadParameters()
for attr in attrs:
rv = ua.ReadValueId()
rv.NodeId = self.nodeid
rv.AttributeId = attr
params.NodesToRead.append(rv)
results = self.server.read(params)
return results
def get_children(self, refs=ua.ObjectIds.HierarchicalReferences, nodeclassmask=ua.NodeClass.Unspecified):
"""
Get all children of a node. By default hierarchical references and all node classes are returned.
Other reference types may be given:
References = 31
NonHierarchicalReferences = 32
HierarchicalReferences = 33
HasChild = 34
Organizes = 35
HasEventSource = 36
HasModellingRule = 37
HasEncoding = 38
HasDescription = 39
HasTypeDefinition = 40
GeneratesEvent = 41
Aggregates = 44
HasSubtype = 45
HasProperty = 46
HasComponent = 47
HasNotifier = 48
HasOrderedComponent = 49
"""
return self.get_referenced_nodes(refs, ua.BrowseDirection.Forward, nodeclassmask)
def get_properties(self):
"""
return properties of node.
properties are child nodes with a reference of type HasProperty and a NodeClass of Variable
"""
return self.get_children(refs=ua.ObjectIds.HasProperty, nodeclassmask=ua.NodeClass.Variable)
def get_children_descriptions(self, refs=ua.ObjectIds.HierarchicalReferences, nodeclassmask=ua.NodeClass.Unspecified, includesubtypes=True):
return self.get_references(refs, ua.BrowseDirection.Forward, nodeclassmask, includesubtypes)
def get_references(self, refs=ua.ObjectIds.References, direction=ua.BrowseDirection.Both, nodeclassmask=ua.NodeClass.Unspecified, includesubtypes=True):
"""
returns references of the node based on specific filter defined with:
refs = ObjectId of the Reference
direction = Browse direction for references
nodeclassmask = filter nodes based on specific class
includesubtypes = If true subtypes of the reference (ref) are also included
"""
desc = ua.BrowseDescription()
desc.BrowseDirection = direction
desc.ReferenceTypeId = ua.TwoByteNodeId(refs)
desc.IncludeSubtypes = includesubtypes
desc.NodeClassMask = nodeclassmask
desc.ResultMask = ua.BrowseResultMask.All
desc.NodeId = self.nodeid
params = ua.BrowseParameters()
params.View.Timestamp = ua.win_epoch_to_datetime(0)
params.NodesToBrowse.append(desc)
results = self.server.browse(params)
return results[0].References
def get_referenced_nodes(self, refs=ua.ObjectIds.References, direction=ua.BrowseDirection.Both, nodeclassmask=ua.NodeClass.Unspecified, includesubtypes=True):
"""
returns referenced nodes based on specific filter
Paramters are the same as for get_references
"""
references = self.get_references(refs, direction, nodeclassmask, includesubtypes)
nodes = []
for desc in references:
node = Node(self.server, desc.NodeId)
nodes.append(node)
return nodes
def get_type_definition(self):
"""
returns type definition of the node.
"""
references = self.get_references(refs=ua.ObjectIds.HasTypeDefinition, direction=ua.BrowseDirection.Forward)
if len(references) == 0:
return ua.ObjectIds.BaseObjectType
return references[0].NodeId.Identifier
def get_parent(self):
"""
returns parent of the node.
"""
refs = self.get_references(refs=ua.ObjectIds.HierarchicalReferences, direction=ua.BrowseDirection.Inverse)
return Node(self.server, refs[0].NodeId)
def get_child(self, path):
"""
get a child specified by its path from this node.
A path might be:
* a string representing a qualified name.
* a qualified name
* a list of string
* a list of qualified names
"""
if type(path) not in (list, tuple):
path = [path]
rpath = self._make_relative_path(path)
bpath = ua.BrowsePath()
bpath.StartingNode = self.nodeid
bpath.RelativePath = rpath
result = self.server.translate_browsepaths_to_nodeids([bpath])
result = result[0]
result.StatusCode.check()
# FIXME: seems this method may return several nodes
return Node(self.server, result.Targets[0].TargetId)
def _make_relative_path(self, path):
rpath = ua.RelativePath()
for item in path:
el = ua.RelativePathElement()
el.ReferenceTypeId = ua.TwoByteNodeId(ua.ObjectIds.HierarchicalReferences)
el.IsInverse = False
el.IncludeSubtypes = True
if isinstance(item, ua.QualifiedName):
el.TargetName = item
else:
el.TargetName = ua.QualifiedName.from_string(item)
rpath.Elements.append(el)
return rpath
def read_raw_history(self, starttime=None, endtime=None, numvalues=0):
"""
Read raw history of a node
result code from server is checked and an exception is raised in case of error
If numvalues is > 0 and number of events in period is > numvalues
then result will be truncated
"""
details = ua.ReadRawModifiedDetails()
details.IsReadModified = False
if starttime:
details.StartTime = starttime
else:
details.StartTime = ua.DateTimeMinValue
if endtime:
details.EndTime = endtime
else:
details.EndTime = ua.DateTimeMinValue
details.NumValuesPerNode = numvalues
details.ReturnBounds = True
result = self.history_read(details)
return result.HistoryData.DataValues
def history_read(self, details):
"""
Read raw history of a node, low-level function
result code from server is checked and an exception is raised in case of error
"""
valueid = ua.HistoryReadValueId()
valueid.NodeId = self.nodeid
valueid.IndexRange = ''
params = ua.HistoryReadParameters()
params.HistoryReadDetails = details
params.TimestampsToReturn = ua.TimestampsToReturn.Both
params.ReleaseContinuationPoints = False
params.NodesToRead.append(valueid)
result = self.server.history_read(params)[0]
return result
def read_event_history(self, starttime=None, endtime=None, numvalues=0, evtype=ua.ObjectIds.BaseEventType):
"""
Read event history of a source node
result code from server is checked and an exception is raised in case of error
If numvalues is > 0 and number of events in period is > numvalues
then result will be truncated
"""
# FIXME event filter must be supplied externally, the problem is the node class doesn't have a way to get
# FIXME another node from the address space as these methods are at the server level, therefore there is
# FIXME no way to build an event filter here (although it could be nicer for a user who doesn't want a filter)
details = ua.ReadEventDetails()
if starttime:
details.StartTime = starttime
else:
details.StartTime = ua.DateTimeMinValue
if endtime:
details.EndTime = endtime
else:
details.EndTime = ua.DateTimeMinValue
details.NumValuesPerNode = numvalues
evfilter = events.get_filter_from_event_type(Node(self.server, evtype))
details.Filter = evfilter
result = self.history_read_events(details)
event_res = []
for res in result.HistoryData.Events:
event_res.append(events.EventResult.from_event_fields(evfilter.SelectClauses, res.EventFields))
return event_res
def history_read_events(self, details):
"""
Read event history of a node, low-level function
result code from server is checked and an exception is raised in case of error
"""
valueid = ua.HistoryReadValueId()
valueid.NodeId = self.nodeid
valueid.IndexRange = ''
params = ua.HistoryReadParameters()
params.HistoryReadDetails = details
params.TimestampsToReturn = ua.TimestampsToReturn.Both
params.ReleaseContinuationPoints = False
params.NodesToRead.append(valueid)
result = self.server.history_read(params)[0]
return result
# Hack for convenience methods
# local import is ugly but necessary for python2 support
# feel fri to propose something better but I want to split all those
# create methods from Node
def add_folder(*args, **kwargs):
from opcua.common import manage_nodes
return manage_nodes.create_folder(*args, **kwargs)
def add_object(*args, **kwargs):
from opcua.common import manage_nodes
return manage_nodes.create_object(*args, **kwargs)
def add_variable(*args, **kwargs):
from opcua.common import manage_nodes
return manage_nodes.create_variable(*args, **kwargs)
def add_property(*args, **kwargs):
from opcua.common import manage_nodes
return manage_nodes.create_property(*args, **kwargs)
def add_method(*args, **kwargs):
from opcua.common import manage_nodes
return manage_nodes.create_method(*args, **kwargs)
def add_subtype(*args, **kwargs):
from opcua.common import manage_nodes
return manage_nodes.create_subtype(*args, **kwargs)
def call_method(*args, **kwargs):
from opcua.common import methods
return methods.call_method(*args, **kwargs)
| [
"fabio.6.fernandez@gmail.com"
] | fabio.6.fernandez@gmail.com |
41fa610eb87a4b6cabdcaea79fabebde5ae61343 | 790eee283f432809dd16f143155184147d0c600d | /MuonDecay/data_analyze/Analyze/analysis.py | d076cd3f2ae356f2651f596af90e7bfcec46e94e | [] | no_license | afauth/mudecay | 09c594eb5567ea812e452fe62d7a2c72445f94a8 | 47b6eb70f7236bc13fab5c05562b0b9dd085de51 | refs/heads/master | 2023-07-11T04:16:59.888186 | 2021-08-30T20:56:16 | 2021-08-30T20:56:16 | 272,067,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,577 | py | # # Imports
# #==========================================================================================================
import pathlib
import pandas as pd
from data_analyze.Preliminaries.concat_csv_files import concat_csv
from data_analyze.Preliminaries.read_output_file import trigger_acquisition, retrieve_y_to_volts
from data_analyze.Spectrums.integral import simpson_integral_df
from acquisition.DataAcquisition.Conversion_Values import convert_y_to_units, trigger_slope_value
from data_analyze.FindPeaks.peaks import peaks_single_muon, peaks_muon_decay
from data_analyze.Spectrums.contours import contours_single_muon, contours_muon_decay
# .
#==========================================================================================================
def Analysis_SingleMuon(folder):
"""
This function is built to
Parameters
----------
folder: string
This is the main folder that contains the sub_files of the acquisition and the output file.
Example: '../documents/single_muon/1619201634.9231706'
Please, note that the '..' is used to acess a parent folder.
"""
print('\n============\nAnalysis Single Muon...\n')
'''
Concatenate all sub-csv files
'''
df = concat_csv(path=folder)
waveform = df[1:] #eliminate the row of the time_epoch data
'''
Retrieve base line and trigger, for the analysis
'''
baseLine = waveform.iloc[:130].mean().mean() #assume that the peaks occours until x=150; then, the baseLine is the general mean
print(f'baseLine = {baseLine}')
trigger_in_mV, slope_string = trigger_acquisition(folder) #reads the trigger on the output.txt file; trigger is in mV
converter = retrieve_y_to_volts(folder)
'''Convert trigger to units and slope to a number'''
trigger_in_units = convert_y_to_units(value_in_volts=trigger_in_mV/1000, converter_df=converter)
slope_number = trigger_slope_value(slope_string)
'''
Find peaks and problems; save to csv
'''
peaks, problems_peaks = peaks_single_muon(df=waveform, height=trigger_in_units, slope=slope_number, first_peak_loc=100)
pathlib.Path(f"{folder}/results").mkdir(parents=True, exist_ok=True) #create folder to store the results
peaks.to_csv(f"{folder}/results/peaks.csv")
problems_peaks.to_csv(f"{folder}/results/problems.csv") #saves only the peaks problem, in case of unexpected error
'''
Find contours, problems and calculate integrals; save to csv
'''
contours, problems_contour = contours_single_muon(waveform=waveform, peak=peaks, random_left=10, random_right=15)
integral = simpson_integral_df(contours - baseLine)
contours.to_csv(f"{folder}/results/contours.csv")
integral.to_csv(f"{folder}/results/integral.csv")
'''
Concat both problem-catchers in one single csv file; rewrite previous csv file
'''
problems = pd.concat([problems_peaks, problems_contour])
problems.to_csv(f"{folder}/results/problems.csv")
print('\nending analysis...\n============\n')
# .
#==========================================================================================================
def Analysis_MuonDecay(folder):
"""
This function is built to
Parameters
----------
folder: string
This is the main folder that contains the sub_files of the acquisition and the output file.
Example: '../documents/single_muon/1619201634.9231706'
Please, note that the '..' is used to acess a parent folder.
"""
'''
Concatenate all sub-csv files
'''
df = concat_csv(path=folder)
waveform = df[1:] #eliminate the row of the time_epoch data
'''
Retrieve base line and trigger, for the analysis
'''
baseLine = waveform.iloc[:130].mean().mean() #assume that the peaks occours until x=150; then, the baseLine is the general mean
trigger_in_mV, slope_string = trigger_acquisition(folder) #reads the trigger on the output.txt file; trigger is in mV
converter = retrieve_y_to_volts(folder)
'''Convert trigger to units and slope to a number'''
trigger_in_units = convert_y_to_units(trigger_in_mV/1000, converter)
slope_number = trigger_slope_value(slope_string)
'''
Find peaks and problems; save to csv
'''
peaks, problems_peaks = peaks_muon_decay(df=waveform, height=trigger_in_units, slope=slope_number, first_peak_loc=100)
pathlib.Path(f"{folder}/results").mkdir(parents=True, exist_ok=True) #create folder to store the results
peaks.to_csv(f"{folder}/results/peaks.csv")
problems_peaks.to_csv(f"{folder}/results/problems.csv") #saves only the peaks problem, in case of unexpected error
'''
Find contours, problems and calculate integrals; save to csv
'''
contours_0, contours_1, problems_contour = contours_muon_decay(waveform=waveform, peak=peaks, random_left=10, random_right=15)
integral_0 = simpson_integral_df(contours_0 - baseLine)
integral_1 = simpson_integral_df(contours_1 - baseLine)
contours_0.to_csv(f"{folder}/results/contours_0.csv")
integral_0.to_csv(f"{folder}/results/integral_0.csv")
contours_1.to_csv(f"{folder}/results/contours_1.csv")
integral_1.to_csv(f"{folder}/results/integral_1.csv")
'''
Concat both problem-catchers in one single csv file; rewrite previous csv file
'''
problems = pd.concat([problems_peaks, problems_contour])
problems.to_csv(f"{folder}/results/problems.csv")
| [
"63481188+jhapreis@users.noreply.github.com"
] | 63481188+jhapreis@users.noreply.github.com |
22de5652b278417998165e7e22fcea54256b722c | e2571605a6fd778a3e9d99a5cd2de2d908509222 | /productapp/views.py | 5fb55c14b00e3cd06e4a081f64b2622cba2db868 | [] | no_license | roshi26/store | 53bb699e9153601658ce7f9443a7cc0126604952 | 301208b3de48920e8cbbeaaef47181722fddf529 | refs/heads/master | 2023-01-01T20:26:10.141855 | 2020-10-26T07:51:51 | 2020-10-26T07:51:51 | 305,916,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | from django.shortcuts import render,redirect
from django.http import HttpResponseRedirect
from django.views.generic import View,ListView,DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from productapp.models import Product
from productapp.forms import ProductForm
from django.http import HttpResponse
from django.urls import reverse_lazy
# Create your views here.
class ProductView(CreateView):
template_name='productapp/product_form.html'
form_class=ProductForm
def post(self,request):
if request.method=="POST":
form=ProductForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/product/list')
else:
form=ProductForm()
return render(request,'productapp/product_form.html',{'form':form})
class ProductListView(ListView):
model=Product
template_name='productapp/product_list.html'
fields=['name','created_at', 'price', 'quality','issue_by', 'issue_to', 'brand', 'quantity', 'coupon', 'discount']
context_object_name="object_list"
def get_queryset(self):
return Product.objects.filter(created_by=self.request.user)
class ProductDetailView(DetailView):
model=Product
template_name='productapp/product_detail.html'
context_object_name="ob"
class ProductUpdateView(UpdateView):
model=Product
fields=['name','created_at', 'price','quality', 'issue_by', 'issue_to','brand','coupon','issue_to','discount']
template_name='productapp/product_form.html'
success_url=reverse_lazy('product_list')
class ProductDeleteView(DeleteView):
model=Product
template_name='productapp/product_delete.html'
success_url=reverse_lazy('product_list')
| [
"ridubey@isystango.com"
] | ridubey@isystango.com |
161f685e3e2bd99967d8044a6dd3af0c1fdbdef2 | e1947a375b03a2e296f7188791ad6267ce2648e1 | /tests/test_utils.py | 1add0fb06708c631db4d2ff0fe3d70f0d94f9838 | [] | no_license | alexdmoss/anti-preempter | 79d7f7826a173353dad58428712ba4fc5670ad1f | 1230fe714a0bfa21d5198e73c3c50194cf616fe1 | refs/heads/master | 2022-12-23T11:23:48.025954 | 2021-06-02T04:07:22 | 2021-06-06T18:49:52 | 194,923,794 | 0 | 0 | null | 2022-12-09T05:24:28 | 2019-07-02T19:34:19 | Python | UTF-8 | Python | false | false | 424 | py | import logging
import pytest
from anti_preempter import utils
logger = logging.getLogger(__name__)
def test_get_env_variable_is_set(monkeypatch):
monkeypatch.setenv('TEST_ENV_VAR', 'some-magic-value')
assert utils.get_env_variable('TEST_ENV_VAR') == 'some-magic-value'
def test_get_env_variable_not_set():
with pytest.raises(EnvironmentError):
utils.get_env_variable('NOT_SET_VAR')
| [
"alex@mosstech.io"
] | alex@mosstech.io |
56cfbe221dfb2598ec1bb22b29b6fd613bae35d9 | a380f0622720a1df66ee3ca621275068ba7e501c | /Online/Neuracle/__init__.py | 7eb1fa6d91d34891bd4c798736d781f3a70c62b6 | [
"Apache-2.0"
] | permissive | HongLabTHU/Dual-mVEPs-Speller | 41d9dd450123e90e653c8b76525ec24b900fb3a2 | b18ad9f57deed183beab033a0d96a09fa3d743fe | refs/heads/master | 2022-07-18T08:36:51.321990 | 2022-06-21T15:06:55 | 2022-06-21T15:06:55 | 208,050,740 | 7 | 4 | Apache-2.0 | 2019-12-10T17:14:30 | 2019-09-12T12:53:23 | Jupyter Notebook | UTF-8 | Python | false | false | 72 | py | from .DataClient import Neuracle
from .TriggerBox import TriggerNeuracle | [
"liudkun15@gmail.com"
] | liudkun15@gmail.com |
a3fde335a693371ad5703b8b32009283f4e5e61c | 96dc0dbe7b558ec617823c4fcd4219a3a24fcafd | /inserter.py | 4c095adc22ae463d0e8873dc644677ae81650767 | [] | no_license | smooshie/Work | a503a8a3d05653b019245ba00d2cc941feff7c38 | 4aa6eeaa34d01d6452cb31be3bba8aa38c3f2d9e | refs/heads/master | 2020-04-05T22:58:07.789460 | 2013-08-15T13:31:14 | 2013-08-15T13:31:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | '''
Created on 11.1.2013
@author:
::.
(\./) .-""-.
`\'-'` \
'.___,_^__/
* Whale whale whale, what have we here?
'''
def rc(reversable):
complement = { "A" : "T", "T" : "A", "C" : "G", "G" : "C"}
result = ""
for i in reversed(reversable):
result += complement[i]
return result
def main():
seq = []
with open("human_relative.fas", 'rU') as f:
for line in f:
line.rstrip('\r\n')
if line[0] != ">":
for i in line:
if i != '\n':
seq.append(i)
indexes = open("indexes.txt", "w")
for i in range (1, len(seq)):
if i % 500000 == 0:
reversd = rc(seq[i:i+5])
indexes.write("from " + str(i) + " took " + str(seq[i:i+5]) + " --> " + str(i-20) + "-" + str(i-18) + " inserted " + reversd + " \n")
seq.pop(i-20)
seq.pop(i-19)
seq.pop(i-18)
seq.pop(i-17)
seq.pop(i-16)
seq.pop(i-15)
seq.pop(i-14)
seq.insert(i-20, reversd)
done = open("human_altered.fas", "w")
done.write(''.join(seq))
main()
| [
"timere@gmail.com"
] | timere@gmail.com |
b157b3943a5da0075b79e5476fd9dc13cb5f888d | f0e25779a563c2d570cbc22687c614565501130a | /Think_Python/rotate.py | 88a2a43db71c667c9424a08799bd16968e7efbd5 | [] | no_license | XyK0907/for_work | 8dcae9026f6f25708c14531a83a6593c77b38296 | 85f71621c54f6b0029f3a2746f022f89dd7419d9 | refs/heads/master | 2023-04-25T04:18:44.615982 | 2021-05-15T12:10:26 | 2021-05-15T12:10:26 | 293,845,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import string
def rotate_letter(letter, n):
"""Rotates a letter by n places. Does not change other chars.
letter: single-letter string
n: int
Returns: single-letter string
"""
if letter.isupper():
start = ord('A')
elif letter.islower():
start = ord('a')
else:
return letter
c = ord(letter) - start
i = (c + n) % 26 + start
return chr(i)
def rotate_word(word, n):
"""Rotates a word by n places.
word: string
n: integer
Returns: string
"""
res = ''
for letter in word:
res += rotate_letter(letter, n)
return res
if __name__ == '__main__':
print(rotate_word('cheer', 7))
print(rotate_word('melon', -10))
print(rotate_word('sleep', 9)) | [
"cherry.kong0907@gmail.com"
] | cherry.kong0907@gmail.com |
4b9c7ed4e2417f85945ab2ccf1de824b3529bad9 | 87ebe3ef1bc075b8a3528cf631b2619ba22b9e60 | /seeded_assn.py | 65928d7dda5bd8bdedb6b23476f0ed0bd018f6af | [] | no_license | ClipCaslCollab/ArabicPreprocessingScripts | 30c8bfcf079f8f4ff697c34084c59a0bf67f18b6 | a5464612c9c595528c4c788b36a419138f109bb9 | refs/heads/master | 2021-01-22T09:33:38.259813 | 2014-08-20T02:23:13 | 2014-08-20T02:23:13 | 19,685,669 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """seeded_assn.py
For converting ITM's model.states file to SHLDA's seeded assignment input format.
"""
import sys
def main(argv):
if len(argv) < 1:
print "USAGE: python seeded_assn.py ITM_MODEL_STATES"
with open(argv[0]) as f:
for line in f:
#print line
print " ".join(x.split(":")[0] for x in line.split())
if __name__ == '__main__':
main(sys.argv[1:])
| [
"slunk@umd.edu"
] | slunk@umd.edu |
21c19c391582cf6d0aae0447fae6b34ea1c9bc6b | 113da1ed728a63c915cc01fe06dca5869bdf788f | /Project_2/test.py | 1dc0a9e57f7f50289c3c090d6a0c6b741266c4ca | [] | no_license | sitio-couto/machine-learning | e9e43517377c60b3cf43001ea8acf308ca38dcf1 | 79f1aae5803e533f7d398b973419ef5513f90d0f | refs/heads/master | 2020-07-08T04:28:22.223316 | 2019-11-23T18:08:05 | 2019-11-23T18:08:05 | 203,564,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,736 | py |
#### DISCLAIMER ####
# Code provided by Orhan Gazi Yalçın (used solely for comparison)
# https://towardsdatascience.com/image-classification-in-10-minutes-with-mnist-dataset-54c35b77a38d
import numpy as np
import tensorflow as tf
import normalization as norm
# Reading stuff
train = np.load('Dataset/train.npz')
valid = np.load('Dataset/val.npz')
x_train, y_train = train['xs'].astype('float32') , train['ys'].astype('int8')
x_test, y_test = valid['xs'].astype('float32') , valid['ys'].astype('int8')
# x_train = norm.monochrome(x_train, 1024, ch_axis=1)
# x_test = norm.monochrome(x_test, 1024, ch_axis=1)
# Reshaping the array to 4-dims so that it can work with the Keras API
x_train = x_train.reshape(x_train.shape[0], 32, 32, 3)
x_test = x_test.reshape(x_test.shape[0], 32, 32, 3)
input_shape = (32, 32, 3)
# Making sure that the values are float so that we can get decimal points after division
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Normalizing the RGB codes by dividing it to the max RGB value.
x_train /= 255
x_test /= 255
# Importing the required Keras modules containing model and layers
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
# Creating a Sequential Model and adding the layers
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # Flattening the 2D arrays for fully connected layers
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(10,activation=tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=x_train,y=y_train, epochs=10)
print(model.evaluate(x_test, y_test))
#### COLORED RESULTS ####
# E 1 - 66s - L: 1.6789 - Acc: 0.3905
# E 2 - 62s - L: 1.4593 - Acc: 0.4764
# E 3 - 60s - L: 1.3519 - Acc: 0.5146
# E 4 - 58s - L: 1.2635 - Acc: 0.5486
# E 5 - 64s - L: 1.1819 - Acc: 0.5770
# E 6 - 63s - L: 1.1133 - Acc: 0.6023
# E 7 - 64s - L: 1.0415 - Acc: 0.6284
# E 8 - 64s - L: 0.9838 - Acc: 0.6476
# E 9 - 65s - L: 0.9212 - Acc: 0.6718
# E 10 - 65s - L: 0.8610 - Acc: 0.6937
# Val => L: 1.5908 - Acc: 0.5149
#### GRAYSCALE RESULTS ####
# E 1/10 - 55s - L: 1.9341 - Acc: 0.2959
# E 2/10 - 62s - L: 1.7526 - Acc: 0.3659
# E 3/10 - 60s - L: 1.6729 - Acc: 0.3955
# E 4/10 - 59s - L: 1.6111 - Acc: 0.4210
# E 5/10 - 58s - L: 1.5491 - Acc: 0.4431
# E 6/10 - 61s - L: 1.4894 - Acc: 0.4678
# E 7/10 - 60s - L: 1.4287 - Acc: 0.4897
# E 8/10 - 65s - L: 1.3685 - Acc: 0.5108
# E 9/10 - 63s - L: 1.3043 - Acc: 0.5360
# E 10/10 - 61s - L: 1.2493 - Acc: 0.5552
# Val => L: 1.7758 - Acc: 0.3969 | [
"vinicius.c.e@hotmail.com"
] | vinicius.c.e@hotmail.com |
77ab9cecf9571229a858bc319ec4530650f8d96c | 4a48593a04284ef997f377abee8db61d6332c322 | /python/opencv/opencv_2/gui/opencv_with_tkinter.py | c38c3d8a121d82026b7644085f0fe74574998ae3 | [
"MIT"
] | permissive | jeremiedecock/snippets | 8feaed5a8d873d67932ef798e16cb6d2c47609f0 | b90a444041c42d176d096fed14852d20d19adaa7 | refs/heads/master | 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 | MIT | 2023-06-06T02:17:44 | 2015-06-05T10:19:09 | Python | UTF-8 | Python | false | false | 3,408 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Trackbar widget.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_trackbar/py_trackbar.html#trackbar
WARNING: Tkinter doesn't work if it's run outside the main thread!
See: http://stackoverflow.com/questions/10556479/running-a-tkinter-form-in-a-separate-thread
"Tkinter isn't thread safe, and the general consensus is that Tkinter
doesn't work in a non-main thread. If you rewrite your code so that Tkinter
runs in the main thread, you can have your workers run in other threads."
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import Tkinter as tk
import threading
def trackbar1_cb(x):
pass
def trackbar2_cb(x):
pass
#def scale_cb(ev=None):
# print(scale.get())
def main():
# Parse the programm options (get the path of the image file to read) #####
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--cameraid", "-i", help="The camera ID number (default: 0)", type=int, default=0, metavar="INTEGER")
args = parser.parse_args()
device_number = args.cameraid
# TkInter #################################################################
root = tk.Tk()
root.geometry("500x75") # Set the size of the "root" window
# See: http://effbot.org/tkinterbook/scale.htm
scale = tk.Scale(root, from_=0, to=255, orient=tk.HORIZONTAL)
#scale = tk.Scale(root, from_=0, to=255, orient=tk.HORIZONTAL, command=scale_cb)
scale.pack(fill=tk.X, expand=1)
# OpenCV ##################################################################
video_capture = cv.VideoCapture(device_number)
# Create a window
window_name = "Threshold Bin"
cv.namedWindow(window_name)
print("Press q to quit.")
def opencv_main_loop():
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to Grayscale
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Threshold the Grayscale image: dst_i = (src_i > threshold_value) ? max_val : 0
threshold_value = scale.get()
max_val = 255
ret, img_threshold_bin = cv.threshold(img_gray, threshold_value, max_val, cv.THRESH_BINARY)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frames (Threshold)
cv.imshow(window_name, img_threshold_bin)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
# Run the OpenCV main loop in a separate thread
thread_cv = threading.Thread(target=opencv_main_loop)
thread_cv.start()
# Run the tkinter main loop
root.mainloop()
if __name__ == '__main__':
main()
| [
"jd.jdhp@gmail.com"
] | jd.jdhp@gmail.com |
97c0403f90bd2513f21d8492e08e0d6ff0eb0194 | 8d2817cf3b5f4480da485ec39231d1e6b4b5b544 | /regions/io/crtf/read.py | 3287161c8b42a6d10ce5efc6384ba5aa9fd06961 | [] | no_license | sushobhana/regions | 9a11f4c9a409140146567ea6c18f351edb36d386 | 1896f7e1040bcdb7d8b318ed2135a88475be3673 | refs/heads/master | 2018-10-20T04:03:19.442300 | 2018-08-13T18:07:13 | 2018-08-13T18:07:13 | 120,472,441 | 1 | 0 | null | 2018-02-06T14:45:13 | 2018-02-06T14:45:12 | null | UTF-8 | Python | false | false | 18,492 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import re
import copy
import itertools
from warnings import warn
from astropy import units as u
from astropy import coordinates
from astropy.extern import six
from .core import CRTFRegionParserError, CRTFRegionParserWarning, valid_symbols
from ..core import Shape, ShapeList, reg_mapping
__all__ = ['read_crtf', 'CRTFParser', 'CRTFRegionParser']
# All CASA files start with '#CRTF' . It may also include the version number like '#CRTFv0' .
regex_begin = re.compile(r'^#CRTFv?[\d]?$')
# Comment Format :
regex_comment = re.compile(r'^#.*$')
# Identifies the global attributes Format.
regex_global = re.compile(r'^global\s+(?P<parameters>.*)?')
# Coordinate Format : "[x, y]"
regex_coordinate = re.compile(r'\[([\w.+-:]*?)\s*[,]\s*([\w.+-:]*?)\]')
# Single length Format. For Ex : helps us to extract the radius of a circle.
regex_length = re.compile(r'(?:\[[^=]*\])+[,]\s*([^\[]*)\]')
# Extracts each 'parameter=value' pair.
regex_meta = re.compile(r'(?:(\w+)\s*=[\s\'\"]*([^,\[\]]+?)[\'\",]+)|(?:(\w+)\s*=\s*\[(.*?)\])')
# Region format which segregates include('+'|'-') parameter, kind of definition ('ann' for annotations | '' for regions)
# and region type.
regex_region = re.compile(r'(?P<include>[+-])?(?P<type>ann(?=\s))?\s*(?P<regiontype>[a-z]*?)\[[^=]*]')
# Line format which checks the validity of the line and segregates the meta attributes from the region format.
regex_line = re.compile(r'(?P<region>[+-]?(?:ann(?=\s))?\s*[a-z]+?\[[^=]+\])(?:\s*[,]\s*(?P<parameters>.*))?')
def read_crtf(filename, errors='strict'):
"""
Reads a CRTF region file and returns a list of region objects.
Parameters
----------
filename : `str`
The file path
errors : ``warn``, ``ignore``, ``strict``, optional
The error handling scheme to use for handling parsing errors.
The default is 'strict', which will raise a `~regions.CRTFRegionParserError`.
``warn`` will raise a `~regions.CRTFRegionParserWarning`, and ``ignore`` will do nothing
(i.e., be silent).
Returns
-------
regions : `list`
Python `list` of `~regions.Region` objects.
Examples
--------
>>> from regions import read_crtf
>>> from astropy.utils.data import get_pkg_data_filename
>>> file = get_pkg_data_filename('data/CRTFgeneral.crtf', package='regions.io.crtf.tests')
>>> regs = read_crtf(file, errors='warn')
>>> print(regs[0])
Region: CircleSkyRegion
center: <SkyCoord (FK4: equinox=B1950.000, obstime=B1950.000): (ra, dec) in deg
(273.1, -23.18333333)>
radius: 2.3 arcsec
>>> print(regs[0].meta)
{'frame': 'BARY', 'corr': ['I', 'Q'], 'include': True, 'type': 'ann'}
>>> print(regs[0].visual)
{'color': 'blue'}
"""
with open(filename) as fh:
if regex_begin.search(fh.readline()):
region_string = fh.read()
parser = CRTFParser(region_string, errors)
return parser.shapes.to_regions()
else:
raise CRTFRegionParserError('Every CRTF Region must start with "#CRTF" ')
class CRTFParser(object):
"""
Parses a CRTF string.
This class transforms a CRTF string to a `~regions.io.core.ShapeList`. The
result is stored as ``shapes`` attribute.
Each line is tested for either containing a region with meta attributes or global parameters.
If global parameters are found then, it is stored in the ``global_meta`` attribute.
If a region is found the `~regions.CRTFRegionParser` is invoked to transform the line into a
`~regions.io.core.Shape` object.
Parameters
----------
region_string : `str`
CRTF region string
errors : ``warn``, ``ignore``, ``strict``, optional
The error handling scheme to use for handling parsing errors.
The default is 'strict', which will raise a `~regions.CRTFRegionParserError`.
``warn`` will raise a `~regions.CRTFRegionParserWarning`, and ``ignore`` will do nothing
(i.e., be silent).
Examples
--------
>>> from regions import CRTFParser
>>> reg_str = "ann circle[[18h12m24s, -23d11m00s], 2.3arcsec], coord=B1950, frame=BARY, corr=[I, Q], color=blue"
>>> regs = CRTFParser(reg_str, errors='warn')
>>> print(regs[0])
Region: CircleSkyRegion
center: <SkyCoord (FK4: equinox=B1950.000, obstime=B1950.000): (ra, dec) in deg
(273.1, -23.18333333)>
radius: 2.3 arcsec
>>> print(regs[0].meta)
{'frame': 'BARY', 'corr': ['I', 'Q'], 'include': True, 'type': 'ann'}
>>> print(regs[0].visual)
{'color': 'blue'}
"""
# It contains a tuple of valid definition (region, annotation) type.
valid_definition = ('box', 'centerbox', 'rotbox', 'poly', 'circle', 'annulus', 'ellipse',
'line', 'vector', 'text', 'symbol')
# It contains a tuple of valid name of the parameters(attributes).
valid_global_keys = ('coord', 'frame', 'corr', 'veltype', 'restfreq', 'linewidth', 'linestyle', 'symsize',
'symthick', 'color', 'font', 'fontsize', 'fontstyle', 'usetex', 'labelpos','labelcolor',
'labeloff', 'range')
def __init__(self, region_string, errors='strict'):
if errors not in ('strict', 'ignore', 'warn'):
msg = "``errors`` must be one of strict, ignore, or warn; is {}"
raise ValueError(msg.format(errors))
self.region_string = region_string
self.errors = errors
# Global states
self.global_meta = {}
# Results
self.shapes = ShapeList()
self.run()
def __str__(self):
ss = self.__class__.__name__
ss += '\nErrors: {}'.format(self.errors)
ss += '\nGlobal meta: {}'.format(self.global_meta)
ss += '\nShapes: {}'.format(self.shapes)
ss += '\n'
return ss
def parse_line(self, line):
"""
Parses a single line.
"""
# Skip blanks
if line == '':
return
# Skip comments
if regex_comment.search(line):
return
# Special case / header: parse global parameters into metadata
global_parameters = regex_global.search(line)
if global_parameters:
self.parse_global_meta(global_parameters.group('parameters'))
return
# Tries to check the validity of the line.
crtf_line = regex_line.search(line)
if crtf_line:
# Tries to parse the line.
# Finds info about the region.
region = regex_region.search(crtf_line.group('region'))
type_ = region.group('type') or 'reg'
include = region.group('include') or '+'
region_type = region.group('regiontype').lower()
if region_type in self.valid_definition:
helper = CRTFRegionParser(self.global_meta, include, type_, region_type,
*crtf_line.group('region', 'parameters'))
self.shapes.append(helper.shape)
else:
self._raise_error("Not a valid CRTF Region type: '{0}'.".format(region_type))
else:
self._raise_error("Not a valid CRTF line: '{0}'.".format(line))
return
def _raise_error(self, msg):
if self.errors == 'warn':
warn(msg, CRTFRegionParserWarning)
elif self.errors == 'strict':
raise CRTFRegionParserError(msg)
def run(self):
"""
Run all the steps.
Splits the regions into line and calls ``parse_line`` for each line.
"""
for line in self.region_string.split('\n'):
self.parse_line(line)
def parse_global_meta(self, global_meta_str):
"""
Parses the line starting with global to extract all the valid meta key/value pair.
"""
if global_meta_str:
global_meta_str = regex_meta.findall(global_meta_str + ',')
if global_meta_str:
for par in global_meta_str:
if par[0] is not '':
val1 = par[0].lower()
val2 = par[1]
else:
val1 = par[2].lower()
val2 = par[3]
val1 = val1.strip()
val2 = val2.strip()
if val1 in self.valid_global_keys :
if val1 in ('range', 'corr', 'labeloff'):
val2 = val2.split(",")
val2 = [x.strip() for x in val2 if x]
self.global_meta[val1] = val2
else:
self._raise_error("'{0}' is not a valid global meta key".format(val1))
class CRTFRegionParser(object):
"""
Parse a CRTF region string
This will turn a line containing a CRTF region into a `~regions.Shape` object.
Parameters
----------
global_meta : `dict`
Global meta data of the CRTF file which is used as default meta values for regions
include : `str` {'+', '-'}
Flag at the beginning of the line
type_ : `str` {'reg', 'ann'}
Kind of the region definition
region_type : `str`
Region type
reg_str : `str`
Region string to parse
meta_str : `str`
Meta string to parse
errors : ``warn``, ``ignore``, ``strict``, optional
The error handling scheme to use for handling parsing errors.
The default is 'strict', which will raise a `~regions.CRTFRegionParserError`.
``warn`` will raise a `~regions.CRTFRegionParserWarning`, and
``ignore`` will do nothing (i.e., be silent).
"""
# List of valid coordinate system
# TODO : There are still many reference systems to support
coordinate_systems = ['j2000', 'icrs', 'galactic', 'supergal', 'image', 'ecliptic']
# Maps CASA coordinate frame to appropriate astropy coordinate frames.
coordsys_mapping = dict(zip(coordinates.frame_transform_graph.get_names(),
coordinates.frame_transform_graph.get_names()))
coordsys_mapping['j2000'] = 'fk5'
coordsys_mapping['b1950'] = 'fk4'
coordsys_mapping['supergal'] = 'supergalactic'
coordsys_mapping['ecliptic'] = 'geocentrictrueecliptic'
# CRTF Format specifications. This define how a certain region is read.
# 'c' denotes a coordinates, 'l' denotes a length, 'pl' denotes a pair of lengths,
# 's' denotes a string(generally a text or symbol)
language_spec = {'circle': ['c', 'l'],
'box': ['c', 'c'],
'centerbox': ['c', 'pl'],
'rotbox': ['c', 'pl', 'l'],
'poly': itertools.cycle('c'),
'annulus': ['c', 'pl'],
'ellipse': ['c', 'pl', 'l'],
'line': ['c', 'c'],
'vector': ['c', 'c'],
'symbol': ['c', 's'],
'text': ['c', 's']}
def __init__(self, global_meta, include, type_, region_type, reg_str, meta_str, errors='strict'):
self.global_meta = global_meta
self.reg_str = reg_str
self.meta_str = meta_str
self.errors = errors
self.coord = None
self.coordsys = None
self.coord_str = None
self.type_ = type_
self.region_type = region_type
self.meta = copy.deepcopy(global_meta)
self.shape = None
self.include = include or '+'
self.parse()
def _raise_error(self, msg):
if self.errors == 'warn':
warn(msg, CRTFRegionParserWarning)
elif self.errors == 'strict':
raise CRTFRegionParserError(msg)
def parse(self):
"""
Starting point to parse the CRTF region string.
"""
self.convert_meta()
self.coordsys = self.meta.get('coord', 'image').lower()
self.set_coordsys()
self.convert_coordinates()
self.make_shape()
def set_coordsys(self):
"""
Mapping to astropy's coordinate system name
# TODO: needs expert attention (Most reference systems are not mapped)
"""
if self.coordsys.lower() in self.coordsys_mapping:
self.coordsys = self.coordsys_mapping[self.coordsys.lower()]
def convert_coordinates(self):
"""
Convert coordinate string to `~astropy.coordinates.Angle` or `~astropy.units.quantity.Quantity` objects
"""
coord_list_str = regex_coordinate.findall(self.reg_str) + regex_length.findall(self.reg_str)
coord_list = []
if self.region_type == 'poly':
if len(coord_list_str) < 4:
self._raise_error('Not in proper format: {} polygon should have > 4 coordinates'.format(self.reg_str))
if coord_list_str[0] != coord_list_str[-1]:
self._raise_error("Not in proper format: '{0}', "
"In polygon, the last and first coordinates should be same".format(self.reg_str))
else:
if len(coord_list_str) != len(self.language_spec[self.region_type]):
self._raise_error("Not in proper format: '{0}', "
"Does not contain expected number of parameters for the region '{1}'"
.format(self.reg_str, self.region_type))
for x, y in zip(self.language_spec[self.region_type], coord_list_str):
if x == 'c':
if len(y) == 2 and y[1] != '':
coord_list.append(CoordinateParser.parse_coordinate(y[0]))
coord_list.append(CoordinateParser.parse_coordinate(y[1]))
else:
self._raise_error("Not in proper format: {0} should be a coordinate".format(y))
if x == 'pl':
if len(y) == 2 and y[1] != '':
coord_list.append(CoordinateParser.parse_angular_length_quantity(y[0]))
coord_list.append(CoordinateParser.parse_angular_length_quantity(y[1]))
else:
self._raise_error("Not in proper format: {0} should be a pair of length".format(y))
if x == 'l':
if isinstance(y, six.string_types):
coord_list.append(CoordinateParser.parse_angular_length_quantity(y))
else:
self._raise_error("Not in proper format: {0} should be a single length".format(y))
if x == 's':
if self.region_type == 'symbol':
if y in valid_symbols:
self.meta['symbol'] = y
else:
self._raise_error("Not in proper format: '{0}' should be a symbol".format(y))
elif self.region_type == 'text':
self.meta['text'] = y[1:-1]
self.coord = coord_list
def convert_meta(self):
"""
Parses the meta_str to python dictionary and stores in ``meta`` attribute.
"""
if self.meta_str:
self.meta_str = regex_meta.findall(self.meta_str + ',')
if self.meta_str:
for par in self.meta_str:
if par[0] is not '':
val1 = par[0]
val2 = par[1]
else:
val1 = par[2]
val2 = par[3]
val1 = val1.strip()
val2 = val2.strip()
if val1 in CRTFParser.valid_global_keys or val1 == 'label':
if val1 in ('range', 'corr', 'labeloff'):
val2 = val2.split(',')
val2 = [x.strip() for x in val2]
self.meta[val1] = val2
else:
self._raise_error("'{0}' is not a valid meta key".format(val1))
self.meta['include'] = self.include != '-'
self.include = self.meta['include']
if 'range' in self.meta:
self.meta['range'] = [u.Quantity(x) for x in self.meta['range']]
self.meta['type'] = self.type_
def make_shape(self):
"""
Make shape object
"""
if self.region_type == 'ellipse':
self.coord[2:] = [x * 2 for x in self.coord[2:]]
if len(self.coord) % 2 == 1: # This checks if the angle is present.
self.coord[-1] /= 2
self.meta.pop('coord', None)
self.shape = Shape(coordsys=self.coordsys,
region_type=reg_mapping['CRTF'][self.region_type],
coord=self.coord,
meta=self.meta,
composite=False,
include=self.include
)
class CoordinateParser(object):
"""
Helper class to structure coordinate parser
"""
@staticmethod
def parse_coordinate(string_rep):
"""
Parse a single coordinate
"""
# Any CRTF coordinate representation (sexagesimal or degrees)
if 'pix' in string_rep:
return u.Quantity(string_rep[:-3], u.dimensionless_unscaled)
if 'h' in string_rep or 'rad' in string_rep:
return coordinates.Angle(string_rep)
if len(string_rep.split('.')) >= 3:
string_rep = string_rep.replace('.', ':', 2)
return coordinates.Angle(string_rep, u.deg)
@staticmethod
def parse_angular_length_quantity(string_rep):
"""
Given a string that is a number and a unit, return a
Quantity of that string.Raise an Error If there is no unit. e.g.:
50" -> 50*u.arcsec
50 -> CRTFRegionParserError : Units must be specified for 50
"""
unit_mapping = {
'deg': u.deg,
'rad': u.rad,
'arcmin': u.arcmin,
'arcsec': u.arcsec,
'pix': u.dimensionless_unscaled,
'"': u.arcsec,
"'": u.arcmin,
}
regex_str = re.compile(r'([0-9+,-.]*)(.*)')
str = regex_str.search(string_rep)
unit = str.group(2)
if unit:
if unit in unit_mapping:
return u.Quantity(str.group(1), unit=unit_mapping[unit])
return u.Quantity(str.group(1))
else:
raise CRTFRegionParserError('Units must be specified for {0} '.format(string_rep))
| [
"sushobhanapatra@gmail.com"
] | sushobhanapatra@gmail.com |
1c738821805cbc87233602c63211afc8c571f98f | 5936cd96a9aaf90d3f10600e7358d8baf4ca8732 | /junk.py | 1cc2f6852f9694eff73a54c089c1b1447bb5fdf2 | [] | no_license | isukrit/models_genesis_brain | 3d892ab7b1ce7ca93ae9398955b7802ce0fc6f19 | 2a10e64a9c845fb2864a8d96dfc79ac7cdb74158 | refs/heads/master | 2021-01-07T01:49:24.047464 | 2020-02-29T06:53:26 | 2020-02-29T06:53:26 | 241,543,397 | 10 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,769 | py | from tensorflow.keras import losses
from tensorflow.keras.layers import Input, GRU, Dense, Concatenate, TimeDistributed, LSTM, Add
from tensorflow.keras.models import Model
from attention_tanh import AttentionLayerTanh
from attention_base import AttentionLayerBase
from tensorflow.keras import optimizers
from tensorflow.python.keras import backend as K
# from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.keras import optimizers
def custom_loss(y_true, y_pred):
return(K.categorical_crossentropy(y_true, y_pred, from_logits=True))
def define_attn_model(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize, attn_layer_type=0):
""" Defining a NMT model """
# Define an input sequence and process it.
if batch_size:
encoder_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')
decoder_inputs = Input(batch_shape=(batch_size, fr_timesteps, fr_vsize), name='decoder_inputs')
else:
encoder_inputs = Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')
decoder_inputs = Input(shape=(fr_timesteps, fr_vsize), name='decoder_inputs')
# Encoder LSTM
encoder_lstm = LSTM(hidden_size, return_sequences=True, return_state=True, name='encoder_lstm')
encoder_out, enc_state_h, enc_state_c = encoder_lstm(encoder_inputs)
encoder_state = [enc_state_h, enc_state_c]
#print ('K.shape', K.shape(enc_state_h), K.shape(enc_state_c))
# Set up the decoder LSTM, using `encoder_states` as initial state.
decoder_lstm = LSTM(hidden_size, return_sequences=True, return_state=True, name='decoder_lstm')
decoder_out, dec_state_h, dec_state_c = decoder_lstm(decoder_inputs, initial_state=encoder_state)
# Attention layer
if attn_layer_type == 0:
attn_layer = AttentionLayerBase(name='attention_layer')
elif attn_layer_type == 1:
attn_layer = AttentionLayerTanh(name='attention_layer')
attn_out, attn_states = attn_layer([encoder_out, decoder_out])
# print(decoder_out.shape)
# print(attn_out.shape)
# Concat attention input and decoder LSTM output
decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])
# tile attention input and decoder outputs
# attn_out_tiled = K.tile(attn_out, attn_states.shape[1])
#decoder_concat_input = Add(name='addition_layer')([decoder_out, attn_out])
# decoder_concat_input = decoder_out + attn_out
# Dense layer
dense = Dense(fr_vsize, activation='linear', name='softmax_layer')
dense_time = TimeDistributed(dense, name='time_distributed_layer')
decoder_pred = dense_time(decoder_concat_input)
# Full model
optimizer = optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, amsgrad=False)
full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)
full_model.compile(optimizer= optimizer , loss=custom_loss, metrics=['accuracy'])
full_model.summary(line_length=200)
""" Inference model """
batch_size = 1
""" Encoder (Inference) model """
encoder_inf_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')
encoder_inf_out, enc_inf_state_h, enc_inf_state_c = encoder_lstm(encoder_inf_inputs)
encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, enc_inf_state_h, enc_inf_state_c])
""" Decoder (Inference) model """
decoder_inf_inputs = Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')
encoder_inf_states = Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')
decoder_init_state_h = Input(batch_shape=(batch_size, hidden_size), name='decoder_init_h')
decoder_init_state_c = Input(batch_shape=(batch_size, hidden_size), name='decoder_init_c')
decoder_inf_out, decoder_inf_state_h, decoder_inf_state_c = decoder_lstm(decoder_inf_inputs, initial_state=[decoder_init_state_h, decoder_init_state_c])
attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])
#decoder_inf_concat = Add(name='addition_layer')([decoder_inf_out, attn_inf_out])
decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])
decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)
decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state_h, decoder_init_state_c, decoder_inf_inputs],
outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state_h, decoder_inf_state_c])
return full_model, encoder_model, decoder_model, attn_layer, attn_states
if __name__ == '__main__':
""" Checking nmt model for toy examples """
define_nmt(64, None, 20, 30, 20, 20)
| [
"noreply@github.com"
] | isukrit.noreply@github.com |
0dd5d12a45bb2be62d25a40e85870d5b26d72ad5 | df7731c6b41f7a32420bfcbc73f42d8470604393 | /SNN/archives/behavior_SNN_Ambiance_Backup.py | 6aeb94b185e0e537e6e492c18985beb1f868817a | [] | no_license | jsdessureault/spike-old | 97b4f00dc2cb33ac0a309bd6e4c798fe7d1c9189 | 431a4cf9d4866c23411cfb001f6e59d45a1da0b0 | refs/heads/master | 2021-07-22T04:13:58.773242 | 2017-11-02T18:06:05 | 2017-11-02T18:06:05 | 52,386,331 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,972 | py | #!/usr/bin/env python
import rospy
import numpy as np
#import time
#import random
from brian2 import *
from std_msgs.msg import String
import matplotlib.pyplot as plt
import matplotlib.animation as animation
rospy.init_node('node_spiking_neural_networks', anonymous=True)
rospy.loginfo("Gestion des reseaux de neurones a decharge.")
verbose = True
frames = []
norm = []
NBFRAMESORIGINAL = 800
ECHANTILLON = 8
NBFRAMES = NBFRAMESORIGINAL / ECHANTILLON
# Ajustement - BUG
NBFRAMES = NBFRAMES - 15
print "NBFRAMES: " + str(NBFRAMES)
CHUNK = 16
NB_LIGNES = NBFRAMES - 1
NB_COLONNES = CHUNK
N = NB_LIGNES * NB_COLONNES
rospy.set_param("no_frame_SNN", 1)
def plotSignalSonore():
# Tuto: matplotlib.org/users/beginner.html
#plt.plot([1,2,3,4])
plt.plot(frames)
plt.ylabel('Spectre sonore')
plt.show()
def animate(i):
line.set_ydata(statemon.v[neurone])
return line
def init():
line.set_ydata(statemon.v[neurone])
return line
def plotAnimationVoltTemps(statemon):
neurone = 0
fig = plt.plot(statemon.t/ms, statemon.v[neurone])
ani = animation.FuncAnimation(fig, animate, 20, init_func=init, interval=25, blit=False)
plt.show()
def plotVoltTemps(statemon):
debut = 0
fin = 1
title("Voltage en fonction du temps (Neurones de " + str(debut) + " a " + str(fin) + ")")
for j in range(debut,fin):
plt.plot(statemon.t/ms, statemon.v[j])
plt.ylabel('voltage')
plt.xlabel('Temps m/s')
plt.show()
def plotOutputNeurons(stateOutput):
debut = 0
fin = 2
title("Voltage final des neurones de sortie")
for j in range(debut,fin):
plt.plot(stateOutput.t/ms, stateOutput.v[j])
plt.ylabel('voltage')
plt.xlabel('Neurones de sortie')
plt.show()
def plotSpikeTemps(spikemon):
plt.plot(spikemon.t/ms, spikemon.i, '.k')
plt.ylabel('Spikes')
plt.xlabel('Temps m/s')
plt.show()
def plotPopulationRate(popratemon):
plt.plot(popratemon.t/ms, popratemon.rate/Hz)
plt.xlabel('Temps m/s')
plt.ylabel('Rate/Hz')
plt.show()
def plotConnectivity(S):
Ns = len(S.source)
Nt = len(S.target)
figure(figsize=(10,4))
subplot(121)
plot(np.zeros(Ns), arange(Ns), 'ok', ms=10)
plot(np.ones(Nt), arange(Nt), 'ok', ms=10)
for i,j in zip(S.i, S.j):
plot([0,1], [i,j], '-k')
xticks([0,1], ['Source', 'Target'])
ylabel("Neuron index")
xlim(-0.1, 1.1)
ylim(-1, max(Ns, Nt))
subplot(122)
plot(S.i, S.j, 'ok')
xlim(-1, Ns)
ylim(-1, Nt)
xlabel('Source neuron index')
ylabel('Target neuron index')
plt.show()
def echantillonFrames():
print "Echantillons FRAMES"
print type(norm[5][5])
print norm[5][5]
#print type(frames[5])
#print frames[5]
print norm
def callbackRecoitDonnees(data):
no_frame = rospy.get_param("no_frame_SNN")
decoded = numpy.fromstring(data.data, 'Int16');
#if verbose:
# rospy.loginfo(rospy.get_caller_id() + "Le callback a recu: %s", decoded)
frames.append(decoded)
#norm.append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
liste=[]
for i in range(CHUNK):
liste.append(0)
norm.append(liste)
rospy.set_param("no_frame_SNN", no_frame + 1)
def normalize2DVector():
print "Normalize"
pluspetit = 99999
plusgrand = -99999
for l in range(0, NB_LIGNES-1):
for c in range(0, NB_COLONNES-1):
nombre = frames[l][c]
if nombre < pluspetit:
pluspetit = nombre
if nombre > plusgrand:
plusgrand = nombre
ecart = plusgrand - pluspetit
print "plusgrand: %i" % plusgrand
print "pluspetit: %i" % pluspetit
print "ecart: %i" % ecart
for l in range(0, NB_LIGNES-1):
for c in range(0, NB_COLONNES-1):
ancien = frames[l][c]
norm[l][c] = float(float(ancien - pluspetit)) / float(ecart)
#print "Donnee: %i Donnee normalisee: %.5f" % (ancien, norm[l][c])
#def normalize1DVector():
# norm = [float(i)/max(frames) for i in frames]
@check_units(l=1, c=1, result=0)
def intensite(l, c):
i = norm[l][c]
return i
def SNN():
print "SNN"
start_scope()
# Definition des variables
print "Nombre de neurones entrees: " + str(N)
tau = 10*ms
eqs = '''
dv/dt = (1 - v) /tau : 1 (unless refractory)
'''
# Creation du SNN
# Neurones d'entree
InputGroup = NeuronGroup(N, eqs, threshold='v> 0.8', reset='v = 0', refractory=5*ms)
HiddenGroup1 = NeuronGroup(50, eqs, threshold='v> 0.8', reset='v = 0', refractory=5*ms)
HiddenGroup2 = NeuronGroup(50, eqs, threshold='v> 0.8', reset='v = 0', refractory=5*ms)
OutputGroup = NeuronGroup(3, eqs, threshold='v> 0.8', reset='v = 0', refractory=5*ms)
# Synapses
global ItoH1
ItoH1 = Synapses(InputGroup, HiddenGroup1, 'w:1', on_pre='v_post += w')
ItoH1.connect()
ItoH1.w = 'j*0.2'
global H1toH2
H1toH2 = Synapses(HiddenGroup1, HiddenGroup2, 'w:1', on_pre='v_post += w')
H1toH2.connect()
H1toH2.w = 'j*0.2'
global H2toO
H2toO = Synapses(HiddenGroup2, OutputGroup, 'w:1', on_pre='v_post += w')
H2toO.connect()
H2toO.w = 'j*0.2'
# Assignation des neurones d'entrees
for j in range(0,NB_LIGNES-1):
for k in range(0,NB_COLONNES-1):
noNeurone = (j*NB_COLONNES) + k
InputGroup.v[noNeurone] = intensite(j,k)
if verbose:
if noNeurone%100 == 0:
print "neurone : " + str(noNeurone) + " voltage: " + str(InputGroup.v[noNeurone])
# Creation des moniteurs
#global statemon
statemon = StateMonitor(InputGroup, 'v', record=0)
#global stateOutput
stateOutput = StateMonitor(OutputGroup, 'v', record=True)
#global spikemon
spikemon = SpikeMonitor(HiddenGroup1)
#global popratemon
popratemon = PopulationRateMonitor(HiddenGroup1)
# Execution de la simulation
run(500*ms, report='text', report_period=1*second)
# Recuperation des neurones de sorties
# Envoie du signal sur un topic qui sera lu par l'attention.
# Affichage des graphiques
if verbose:
print "Affichage des graphiques..."
#plotSignalSonore()
#echantillonFrames()
#plotVoltTemps(statemon)
#plotSpikeTemps(spikemon)
#plotAnimationVoltTemps(statemon)
#plotConnectivity(S)
#plotPopulationRate(popratemon)
plotOutputNeurons(stateOutput)
if verbose:
print("Souscrit au callback...")
rospy.Subscriber("topic_son_ambiant", String, callbackRecoitDonnees)
if verbose:
print("Boucle sur les frames...")
frame = rospy.get_param("no_frame_SNN")
while frame <= NBFRAMES:
# Affiche a tous les x frames.
frame = rospy.get_param("no_frame_SNN")
if frame%10 == 0:
print "Frame # " + str(frame) + "/" + str(NBFRAMES)
normalize2DVector()
SNN()
| [
"sdessureault@videotron.ca"
] | sdessureault@videotron.ca |
05569309e30bae8fa01d77141b06eb6f922b24e6 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/os_test.py | cb10509f1d7cdb4b47c62f144aadf5f27e252502 | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 673 | py | #
# currently broken:
# import os.path
import os
r1 = os.urandom(8)
r2 = os.urandom(8)
print len(r1), len(r2), type(r1), type(r2), r1 == r2
print type(os.stat("/dev/null"))
print os.path.expanduser("~") == os.environ["HOME"]
print os.path.isfile("/dev/null")
print os.path.isfile("/should_not_exist!")
e = OSError(1, 2, 3)
print e
print e.errno
print e.strerror
print e.filename
print OSError(1, 2).filename
try:
os.execvp("aoeuaoeu", ['aoeuaoeu'])
except OSError, e:
print e
# Changes to os.environ should show up in subprocesses:
import subprocess
env = os.environ
env["PYTHONPATH"] = "."
subprocess.check_call("echo PYTHONPATH is $PYTHONPATH", shell=1)
| [
"kmod@dropbox.com"
] | kmod@dropbox.com |
4c75e2f57de88f5b09a8af1dc38d30330111ec12 | bfc38084cbcbfb5d25629db634abe1ce4d6694ee | /P0E4/Solve_float_Caso2/S_F_C2.py | 760c7dfc6e1ebeb19d04ffd72d51a9b026e30d4a | [] | no_license | JoaquinGuzmanO/MCOC2021-P0 | a54c25bf78edd3cc107d3c39fe4a9ed6c124521e | 58cf8a7e3936740a4793ee92c5444744330a619d | refs/heads/main | 2023-07-16T18:37:25.610107 | 2021-09-04T01:33:39 | 2021-09-04T01:33:39 | 392,008,328 | 0 | 0 | null | 2021-08-02T15:51:06 | 2021-08-02T15:51:05 | null | UTF-8 | Python | false | false | 1,229 | py | from time import perf_counter
from numpy.linalg import inv
from scipy.linalg import eigh, solve
from numpy import double, zeros, eye, ones, float32
def laplaciana(N,tipo):
e = eye(N)-eye(N,N,1)
return tipo(e+e.T)
tiempos=[]
promedios=[]
text = open("rendimiento_SFC2.txt","w") #Cambiar nombre
for i in range(1,11):
ns= [2,5,10,20,50,100,500,1000,2000,5000]
t = []
for N in ns:
A = laplaciana(N, float32) ### hay que variar el tipo de dato
b = ones(N)
t1 = perf_counter()
s = solve(A,b) ### cambiarrrr
t2 = perf_counter()
dt = t2-t1
t.append(dt)
tiempos.append(t)
p1 = 0
p2 = 0
p3 = 0
p4 = 0
p5 = 0
p6 = 0
p7 = 0
p8 = 0
p9 = 0
p10 = 0
for i in tiempos:
p1 += i[0]
p2 += i[1]
p3 += i[2]
p4 += i[3]
p5 += i[4]
p6 += i[5]
p7 += i[6]
p8 += i[7]
p9 += i[8]
p10 += i[9]
promedios.append(p1/len(tiempos))
promedios.append(p2/len(tiempos))
promedios.append(p3/len(tiempos))
promedios.append(p4/len(tiempos))
promedios.append(p5/len(tiempos))
promedios.append(p6/len(tiempos))
promedios.append(p7/len(tiempos))
promedios.append(p8/len(tiempos))
promedios.append(p9/len(tiempos))
promedios.append(p10/len(tiempos))
for i in range(len(promedios)):
text.write(f"{ns[i]} {promedios[i]} \n")
text.close() | [
"jaguzman1@miuandes.cl"
] | jaguzman1@miuandes.cl |
4b12cb36cca7db69add9afd812f75a2819c4b7f7 | 2ec26d004a653c0576594e48ac13dd71f539b30a | /crikey/conditional_audio/fruit_binned_slow_mse/fruitspeecher_binned_slow_mse.py | c5d16460509e10da90e3e3c6c64df5e5c6b1c737 | [] | no_license | kastnerkyle/research_megarepo | 6aca5b2c3b2413e0def1093b23f2826e3e7e5e97 | ab182667650fd59b99f75d4b599d7ace77a3f30b | refs/heads/master | 2021-01-17T20:31:52.250050 | 2016-12-27T01:28:54 | 2016-12-27T01:28:54 | 68,341,074 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 24,969 | py | import numpy as np
import theano
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from scipy.io import wavfile
import os
import sys
from kdllib import load_checkpoint, theano_one_hot, concatenate
from kdllib import fetch_fruitspeech_spectrogram, list_iterator
from kdllib import np_zeros, GRU, GRUFork, dense_to_one_hot
from kdllib import make_weights, make_biases, relu, run_loop
from kdllib import as_shared, adam, gradient_clipping
from kdllib import get_values_from_function, set_shared_variables_in_function
from kdllib import soundsc, categorical_crossentropy
from kdllib import sample_binomial, sigmoid
if __name__ == "__main__":
import argparse
speech = fetch_fruitspeech_spectrogram()
X = speech["data"]
y = speech["target"]
vocabulary = speech["vocabulary"]
vocabulary_size = speech["vocabulary_size"]
reconstruct = speech["reconstruct"]
fs = speech["sample_rate"]
X = np.array([x.astype(theano.config.floatX) for x in X])
y = np.array([yy.astype(theano.config.floatX) for yy in y])
minibatch_size = 1
n_epochs = 200 # Used way at the bottom in the training loop!
checkpoint_every_n = 10
cut_len = 41 # Used way at the bottom in the training loop!
random_state = np.random.RandomState(1999)
train_itr = list_iterator([X, y], minibatch_size, axis=1,
stop_index=105, randomize=True, make_mask=True)
valid_itr = list_iterator([X, y], minibatch_size, axis=1,
start_index=80, randomize=True, make_mask=True)
X_mb, X_mb_mask, c_mb, c_mb_mask = next(train_itr)
train_itr.reset()
n_hid = 256
att_size = 10
n_proj = 256
n_v_proj = 5
n_bins = 10
input_dim = X_mb.shape[-1]
n_pred_proj = 1
n_feats = X_mb.shape[-1]
n_chars = vocabulary_size
# n_components = 3
# n_density = 2 * n_out * n_components + n_components
desc = "Speech generation"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s', '--sample',
help='Sample from a checkpoint file',
default=None,
required=False)
parser.add_argument('-p', '--plot',
help='Plot training curves from a checkpoint file',
default=None,
required=False)
parser.add_argument('-w', '--write',
help='The string to write out (default first minibatch)',
default=None,
required=False)
def restricted_int(x):
if x is None:
# None makes it "auto" sample
return x
x = int(x)
if x < 1:
raise argparse.ArgumentTypeError("%r not range [1, inf]" % (x,))
return x
parser.add_argument('-sl', '--sample_length',
help='Number of steps to sample, default is automatic',
type=restricted_int,
default=None,
required=False)
parser.add_argument('-c', '--continue', dest="cont",
help='Continue training from another saved model',
default=None,
required=False)
args = parser.parse_args()
if args.plot is not None or args.sample is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if args.sample is not None:
checkpoint_file = args.sample
else:
checkpoint_file = args.plot
if not os.path.exists(checkpoint_file):
raise ValueError("Checkpoint file path %s" % checkpoint_file,
" does not exist!")
print(checkpoint_file)
checkpoint_dict = load_checkpoint(checkpoint_file)
train_costs = checkpoint_dict["train_costs"]
valid_costs = checkpoint_dict["valid_costs"]
plt.plot(train_costs)
plt.plot(valid_costs)
plt.savefig("costs.png")
X_mb, X_mb_mask, c_mb, c_mb_mask = next(valid_itr)
valid_itr.reset()
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
prev_kappa = np_zeros((minibatch_size, att_size))
prev_w = np_zeros((minibatch_size, n_chars))
if args.sample is not None:
predict_function = checkpoint_dict["predict_function"]
attention_function = checkpoint_dict["attention_function"]
sample_function = checkpoint_dict["sample_function"]
if args.write is not None:
sample_string = args.write
print("Sampling using sample string %s" % sample_string)
oh = dense_to_one_hot(
np.array([vocabulary[c] for c in sample_string]),
vocabulary_size)
c_mb = np.zeros(
(len(oh), minibatch_size, oh.shape[-1])).astype(c_mb.dtype)
c_mb[:len(oh), :, :] = oh[:, None, :]
c_mb = c_mb[:len(oh)]
c_mb_mask = np.ones_like(c_mb[:, :, 0])
if args.sample_length is None:
raise ValueError("NYI - use -sl or --sample_length ")
else:
fixed_steps = args.sample_length
completed = []
init_x = np.zeros_like(X_mb[0])
for i in range(fixed_steps):
rvals = sample_function(init_x, c_mb, c_mb_mask, prev_h1, prev_h2,
prev_h3, prev_kappa, prev_w)
sampled, h1_s, h2_s, h3_s, k_s, w_s, stop_s, stop_h = rvals
completed.append(sampled)
# cheating sampling...
#init_x = X_mb[i]
init_x = sampled
prev_h1 = h1_s
prev_h2 = h2_s
prev_h3 = h3_s
prev_kappa = k_s
prev_w = w_s
cond = c_mb
print("Completed sampling after %i steps" % fixed_steps)
completed = np.array(completed).transpose(1, 0, 2)
rlookup = {v: k for k, v in vocabulary.items()}
all_strings = []
for yi in y:
ex_str = "".join([rlookup[c]
for c in np.argmax(yi, axis=1)])
all_strings.append(ex_str)
for i in range(len(completed)):
ex = completed[i]
ex_str = "".join([rlookup[c]
for c in np.argmax(cond[:, i], axis=1)])
s = "gen_%s_%i.wav" % (ex_str, i)
ii = reconstruct(ex)
wavfile.write(s, fs, soundsc(ii))
if ex_str in all_strings:
inds = [n for n, s in enumerate(all_strings)
if ex_str == s]
ind = inds[0]
it = reconstruct(X[ind])
s = "orig_%s_%i.wav" % (ex_str, i)
wavfile.write(s, fs, soundsc(it))
valid_itr.reset()
print("Sampling complete, exiting...")
sys.exit()
else:
print("No plotting arguments, starting training mode!")
X_sym = tensor.tensor3("X_sym")
X_sym.tag.test_value = X_mb
X_mask_sym = tensor.matrix("X_mask_sym")
X_mask_sym.tag.test_value = X_mb_mask
c_sym = tensor.tensor3("c_sym")
c_sym.tag.test_value = c_mb
c_mask_sym = tensor.matrix("c_mask_sym")
c_mask_sym.tag.test_value = c_mb_mask
init_h1 = tensor.matrix("init_h1")
init_h1.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h2 = tensor.matrix("init_h2")
init_h2.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h3 = tensor.matrix("init_h3")
init_h3.tag.test_value = np_zeros((minibatch_size, n_hid))
init_kappa = tensor.matrix("init_kappa")
init_kappa.tag.test_value = np_zeros((minibatch_size, att_size))
init_w = tensor.matrix("init_w")
init_w.tag.test_value = np_zeros((minibatch_size, n_chars))
params = []
biases = []
cell1 = GRU(input_dim, n_hid, random_state)
cell2 = GRU(n_hid, n_hid, random_state)
cell3 = GRU(n_hid, n_hid, random_state)
params += cell1.get_params()
params += cell2.get_params()
params += cell3.get_params()
inp_to_h1 = GRUFork(input_dim, n_hid, random_state)
inp_to_h2 = GRUFork(input_dim, n_hid, random_state)
inp_to_h3 = GRUFork(input_dim, n_hid, random_state)
att_to_h1 = GRUFork(n_chars, n_hid, random_state)
att_to_h2 = GRUFork(n_chars, n_hid, random_state)
att_to_h3 = GRUFork(n_chars, n_hid, random_state)
h1_to_h2 = GRUFork(n_hid, n_hid, random_state)
h1_to_h3 = GRUFork(n_hid, n_hid, random_state)
h2_to_h3 = GRUFork(n_hid, n_hid, random_state)
params += inp_to_h1.get_params()
params += inp_to_h2.get_params()
params += inp_to_h3.get_params()
params += att_to_h1.get_params()
params += att_to_h2.get_params()
params += att_to_h3.get_params()
params += h1_to_h2.get_params()
params += h1_to_h3.get_params()
params += h2_to_h3.get_params()
biases += inp_to_h1.get_biases()
biases += inp_to_h2.get_biases()
biases += inp_to_h3.get_biases()
biases += att_to_h1.get_biases()
biases += att_to_h2.get_biases()
biases += att_to_h3.get_biases()
biases += h1_to_h2.get_biases()
biases += h1_to_h3.get_biases()
biases += h2_to_h3.get_biases()
# 3 to include groundtruth, pixel RNN style
outs_to_v_h1 = GRUFork(3, n_v_proj, random_state)
params += outs_to_v_h1.get_params()
biases += outs_to_v_h1.get_biases()
v_cell1 = GRU(n_v_proj, n_v_proj, random_state)
params += v_cell1.get_params()
h1_to_att_a, h1_to_att_b, h1_to_att_k = make_weights(n_hid, 3 * [att_size],
random_state)
h1_to_outs, = make_weights(n_hid, [n_proj], random_state)
h2_to_outs, = make_weights(n_hid, [n_proj], random_state)
h3_to_outs, = make_weights(n_hid, [n_proj], random_state)
params += [h1_to_att_a, h1_to_att_b, h1_to_att_k]
params += [h1_to_outs, h2_to_outs, h3_to_outs]
pred_proj, = make_weights(n_v_proj, [n_pred_proj], random_state)
pred_b, = make_biases([n_pred_proj])
params += [pred_proj, pred_b]
biases += [pred_b]
inpt = X_sym[:-1]
target = X_sym[1:]
mask = X_mask_sym[1:]
context = c_sym * c_mask_sym.dimshuffle(0, 1, 'x')
inp_h1, inpgate_h1 = inp_to_h1.proj(inpt)
inp_h2, inpgate_h2 = inp_to_h2.proj(inpt)
inp_h3, inpgate_h3 = inp_to_h3.proj(inpt)
u = tensor.arange(c_sym.shape[0]).dimshuffle('x', 'x', 0)
u = tensor.cast(u, theano.config.floatX)
def calc_phi(k_t, a_t, b_t, u_c):
a_t = a_t.dimshuffle(0, 1, 'x')
b_t = b_t.dimshuffle(0, 1, 'x')
ss1 = (k_t.dimshuffle(0, 1, 'x') - u_c) ** 2
ss2 = -b_t * ss1
ss3 = a_t * tensor.exp(ss2)
ss4 = ss3.sum(axis=1)
return ss4
def step(xinp_h1_t, xgate_h1_t,
xinp_h2_t, xgate_h2_t,
xinp_h3_t, xgate_h3_t,
h1_tm1, h2_tm1, h3_tm1,
k_tm1, w_tm1, ctx):
attinp_h1, attgate_h1 = att_to_h1.proj(w_tm1)
h1_t = cell1.step(xinp_h1_t + attinp_h1, xgate_h1_t + attgate_h1,
h1_tm1)
h1inp_h2, h1gate_h2 = h1_to_h2.proj(h1_t)
h1inp_h3, h1gate_h3 = h1_to_h3.proj(h1_t)
a_t = h1_t.dot(h1_to_att_a)
b_t = h1_t.dot(h1_to_att_b)
k_t = h1_t.dot(h1_to_att_k)
a_t = tensor.exp(a_t)
b_t = tensor.exp(b_t)
k_t = k_tm1 + tensor.exp(k_t)
ss4 = calc_phi(k_t, a_t, b_t, u)
ss5 = ss4.dimshuffle(0, 1, 'x')
ss6 = ss5 * ctx.dimshuffle(1, 0, 2)
w_t = ss6.sum(axis=1)
attinp_h2, attgate_h2 = att_to_h2.proj(w_t)
attinp_h3, attgate_h3 = att_to_h3.proj(w_t)
h2_t = cell2.step(xinp_h2_t + h1inp_h2 + attinp_h2,
xgate_h2_t + h1gate_h2 + attgate_h2, h2_tm1)
h2inp_h3, h2gate_h3 = h2_to_h3.proj(h2_t)
h3_t = cell3.step(xinp_h3_t + h1inp_h3 + h2inp_h3 + attinp_h3,
xgate_h3_t + h1gate_h3 + h2gate_h3 + attgate_h3,
h3_tm1)
return h1_t, h2_t, h3_t, k_t, w_t
init_x = tensor.fmatrix()
init_x.tag.test_value = np_zeros((minibatch_size, n_feats)).astype(theano.config.floatX)
srng = RandomStreams(1999)
# Used to calculate stopping heuristic from sections 5.3
u_max = 0. * tensor.arange(c_sym.shape[0]) + c_sym.shape[0]
u_max = u_max.dimshuffle('x', 'x', 0)
u_max = tensor.cast(u_max, theano.config.floatX)
def sample_step(x_tm1, h1_tm1, h2_tm1, h3_tm1, k_tm1, w_tm1, ctx):
xinp_h1_t, xgate_h1_t = inp_to_h1.proj(x_tm1)
xinp_h2_t, xgate_h2_t = inp_to_h2.proj(x_tm1)
xinp_h3_t, xgate_h3_t = inp_to_h3.proj(x_tm1)
attinp_h1, attgate_h1 = att_to_h1.proj(w_tm1)
h1_t = cell1.step(xinp_h1_t + attinp_h1, xgate_h1_t + attgate_h1,
h1_tm1)
h1inp_h2, h1gate_h2 = h1_to_h2.proj(h1_t)
h1inp_h3, h1gate_h3 = h1_to_h3.proj(h1_t)
a_t = h1_t.dot(h1_to_att_a)
b_t = h1_t.dot(h1_to_att_b)
k_t = h1_t.dot(h1_to_att_k)
a_t = tensor.exp(a_t)
b_t = tensor.exp(b_t)
k_t = k_tm1 + tensor.exp(k_t)
ss_t = calc_phi(k_t, a_t, b_t, u)
# calculate and return stopping criteria
sh_t = calc_phi(k_t, a_t, b_t, u_max)
ss5 = ss_t.dimshuffle(0, 1, 'x')
ss6 = ss5 * ctx.dimshuffle(1, 0, 2)
w_t = ss6.sum(axis=1)
attinp_h2, attgate_h2 = att_to_h2.proj(w_t)
attinp_h3, attgate_h3 = att_to_h3.proj(w_t)
h2_t = cell2.step(xinp_h2_t + h1inp_h2 + attinp_h2,
xgate_h2_t + h1gate_h2 + attgate_h2, h2_tm1)
h2inp_h3, h2gate_h3 = h2_to_h3.proj(h2_t)
h3_t = cell3.step(xinp_h3_t + h1inp_h3 + h2inp_h3 + attinp_h3,
xgate_h3_t + h1gate_h3 + h2gate_h3 + attgate_h3,
h3_tm1)
out_t = h1_t.dot(h1_to_outs) + h2_t.dot(h2_to_outs) + h3_t.dot(
h3_to_outs)
theano.printing.Print("out_t.shape")(out_t.shape)
out_t_shape = out_t.shape
x_tm1_shuf = x_tm1.dimshuffle(1, 0, 'x')
vinp_t = out_t.dimshuffle(1, 0, 'x')
theano.printing.Print("x_tm1.shape")(x_tm1.shape)
theano.printing.Print("vinp_t.shape")(vinp_t.shape)
init_pred = tensor.zeros((vinp_t.shape[1],), dtype=theano.config.floatX)
init_hidden = tensor.zeros((x_tm1_shuf.shape[1], n_v_proj),
dtype=theano.config.floatX)
def sample_out_step(x_tm1_shuf, vinp_t, pred_fm1, v_h1_tm1):
j_t = concatenate((x_tm1_shuf, vinp_t,
pred_fm1.dimshuffle(0, 'x')),
axis=-1)
theano.printing.Print("j_t.shape")(j_t.shape)
vinp_h1_t, vgate_h1_t = outs_to_v_h1.proj(j_t)
v_h1_t = v_cell1.step(vinp_h1_t, vgate_h1_t, v_h1_tm1)
theano.printing.Print("v_h1_t.shape")(v_h1_t.shape)
pred_f = v_h1_t.dot(pred_proj) + pred_b
theano.printing.Print("pred_f.shape")(pred_f.shape)
return pred_f[:, 0], v_h1_t
r, isupdates = theano.scan(
fn=sample_out_step,
sequences=[x_tm1_shuf, vinp_t],
outputs_info=[init_pred, init_hidden])
(pred_t, v_h1_t) = r
theano.printing.Print("pred_t.shape")(pred_t.shape)
theano.printing.Print("v_h1_t.shape")(v_h1_t.shape)
#pred_t = sigmoid(pre_pred_t)
#x_t = sample_binomial(pred_t, n_bins, srng)
# MSE
x_t = pred_t
return x_t, h1_t, h2_t, h3_t, k_t, w_t, ss_t, sh_t, isupdates
(sampled, h1_s, h2_s, h3_s, k_s, w_s, stop_s, stop_h, supdates) = sample_step(
init_x, init_h1, init_h2, init_h3, init_kappa, init_w, c_sym)
sampled = sampled.dimshuffle(1, 0)
theano.printing.Print("sampled.shape")(sampled.shape)
(h1, h2, h3, kappa, w), updates = theano.scan(
fn=step,
sequences=[inp_h1, inpgate_h1,
inp_h2, inpgate_h2,
inp_h3, inpgate_h3],
outputs_info=[init_h1, init_h2, init_h3, init_kappa, init_w],
non_sequences=[context])
outs = h1.dot(h1_to_outs) + h2.dot(h2_to_outs) + h3.dot(h3_to_outs)
outs_shape = outs.shape
theano.printing.Print("outs.shape")(outs.shape)
outs = outs.dimshuffle(2, 1, 0)
vinp = outs.reshape((outs_shape[2], -1, 1))
theano.printing.Print("vinp.shape")(vinp.shape)
shp = vinp.shape
shuff_inpt_shapes = inpt.shape
theano.printing.Print("inpt.shape")(inpt.shape)
shuff_inpt = inpt.dimshuffle(2, 1, 0)
theano.printing.Print("shuff_inpt.shape")(shuff_inpt.shape)
shuff_inpt = shuff_inpt.reshape((shuff_inpt_shapes[2],
shuff_inpt_shapes[1] * shuff_inpt_shapes[0],
1))
theano.printing.Print("shuff_inpt.shape")(shuff_inpt.shape)
theano.printing.Print("vinp.shape")(vinp.shape)
# input from previous time, pred from previous feature
"""
dimshuffle hacks and [:, 0] to avoid this error:
TypeError: Inconsistency in the inner graph of scan 'scan_fn' : an input
and an output are associated with the same recurrent state and should have
the same type but have type 'TensorType(float32, col)' and
'TensorType(float32, matrix)' respectively.
"""
def out_step(shuff_inpt_tm1, vinp_t, pred_fm1, v_h1_tm1):
j_t = concatenate((shuff_inpt_tm1, vinp_t, pred_fm1.dimshuffle(0, 'x')),
axis=-1)
theano.printing.Print("j_t.shape")(j_t.shape)
vinp_h1_t, vgate_h1_t = outs_to_v_h1.proj(j_t)
v_h1_t = v_cell1.step(vinp_h1_t, vgate_h1_t, v_h1_tm1)
theano.printing.Print("v_h1_t.shape")(v_h1_t.shape)
pred_f = v_h1_t.dot(pred_proj) + pred_b
theano.printing.Print("pred_f.shape")(pred_f.shape)
return pred_f[:, 0], v_h1_t
init_pred = tensor.zeros((vinp.shape[1],), dtype=theano.config.floatX)
init_hidden = tensor.zeros((shuff_inpt.shape[1], n_v_proj),
dtype=theano.config.floatX)
theano.printing.Print("init_pred.shape")(init_pred.shape)
theano.printing.Print("init_hidden.shape")(init_hidden.shape)
r, updates = theano.scan(
fn=out_step,
sequences=[shuff_inpt, vinp],
outputs_info=[init_pred, init_hidden])
(pred, v_h1) = r
theano.printing.Print("pred.shape")(pred.shape)
pred = pred.dimshuffle(1, 0, 'x')
shp = pred.shape
theano.printing.Print("pred.shape")(pred.shape)
pred = pred.reshape((minibatch_size, shp[0] // minibatch_size,
shp[1], shp[2]))
theano.printing.Print("pred.shape")(pred.shape)
pred = pred.dimshuffle(1, 0, 2, 3)
theano.printing.Print("pred.shape")(pred.shape)
pred = pred[:, :, :, 0]
theano.printing.Print("pred.shape")(pred.shape)
theano.printing.Print("target.shape")(target.shape)
# binomial
#pred = sigmoid(pre_pred.reshape((shp[0], shp[1], -1)))
#cost = target * tensor.log(pred) + (n_bins - target) * tensor.log(1 - pred)
# MSE
cost = (pred - target) ** 2
cost = cost * mask.dimshuffle(0, 1, 'x')
# sum over sequence length and features, mean over minibatch
cost = cost.dimshuffle(0, 2, 1)
cost = cost.reshape((-1, cost.shape[2]))
cost = cost.sum(axis=0).mean()
l2_penalty = 0
for p in list(set(params) - set(biases)):
l2_penalty += (p ** 2).sum()
cost = cost + 1E-3 * l2_penalty
grads = tensor.grad(cost, params)
grads = gradient_clipping(grads, 10.)
learning_rate = 1E-4
opt = adam(params, learning_rate)
updates = opt.updates(params, grads)
if args.cont is not None:
print("Continuing training from saved model")
continue_path = args.cont
if not os.path.exists(continue_path):
raise ValueError("Continue model %s, path not "
"found" % continue_path)
saved_checkpoint = load_checkpoint(continue_path)
checkpoint_dict = saved_checkpoint
train_function = checkpoint_dict["train_function"]
cost_function = checkpoint_dict["cost_function"]
predict_function = checkpoint_dict["predict_function"]
attention_function = checkpoint_dict["attention_function"]
sample_function = checkpoint_dict["sample_function"]
"""
trained_weights = get_values_from_function(
saved_checkpoint["train_function"])
set_shared_variables_in_function(train_function, trained_weights)
"""
else:
train_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[cost, h1, h2, h3, kappa, w],
updates=updates)
cost_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[cost, h1, h2, h3, kappa, w])
predict_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[outs],
on_unused_input='warn')
attention_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[kappa, w], on_unused_input='warn')
sample_function = theano.function([init_x, c_sym, c_mask_sym, init_h1, init_h2,
init_h3, init_kappa, init_w],
[sampled, h1_s, h2_s, h3_s, k_s, w_s,
stop_s, stop_h],
on_unused_input="warn",
updates=supdates)
print("Beginning training loop")
checkpoint_dict = {}
checkpoint_dict["train_function"] = train_function
checkpoint_dict["cost_function"] = cost_function
checkpoint_dict["predict_function"] = predict_function
checkpoint_dict["attention_function"] = attention_function
checkpoint_dict["sample_function"] = sample_function
def _loop(function, itr):
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
prev_kappa = np_zeros((minibatch_size, att_size))
prev_w = np_zeros((minibatch_size, n_chars))
X_mb, X_mb_mask, c_mb, c_mb_mask = next(itr)
n_cuts = len(X_mb) // cut_len + 1
partial_costs = []
for n in range(n_cuts):
start = n * cut_len
stop = (n + 1) * cut_len
if len(X_mb[start:stop]) < cut_len:
new_len = cut_len - len(X_mb) % cut_len
zeros = np.zeros((new_len, X_mb.shape[1],
X_mb.shape[2]))
zeros = zeros.astype(X_mb.dtype)
mask_zeros = np.zeros((new_len, X_mb_mask.shape[1]))
mask_zeros = mask_zeros.astype(X_mb_mask.dtype)
X_mb = np.concatenate((X_mb, zeros), axis=0)
X_mb_mask = np.concatenate((X_mb_mask, mask_zeros), axis=0)
assert len(X_mb[start:stop]) == cut_len
assert len(X_mb_mask[start:stop]) == cut_len
rval = function(X_mb[start:stop],
X_mb_mask[start:stop],
c_mb, c_mb_mask,
prev_h1, prev_h2, prev_h3, prev_kappa, prev_w)
current_cost = rval[0]
prev_h1, prev_h2, prev_h3 = rval[1:4]
prev_h1 = prev_h1[-1]
prev_h2 = prev_h2[-1]
prev_h3 = prev_h3[-1]
prev_kappa = rval[4][-1]
prev_w = rval[5][-1]
partial_costs.append(current_cost)
return partial_costs
run_loop(_loop, train_function, train_itr, cost_function, valid_itr,
n_epochs=n_epochs, checkpoint_dict=checkpoint_dict,
checkpoint_every_n=checkpoint_every_n, skip_minimums=True)
| [
"kastnerkyle@gmail.com"
] | kastnerkyle@gmail.com |
781a83a87d5fb9e980be34d090ce68cf1aba93a2 | 66c3ff83c3e3e63bf8642742356f6c1817a30eca | /.vim/tmp/neocomplete/buffer_cache/=+home=+dante=+proyectos=+django-1.9=+sermul=+manage.py | dca2c396a7c3a45e15f9cbfa9f80d467b50c38e8 | [] | no_license | pacifi/vim | 0a708e8bc741b4510a8da37da0d0e1eabb05ec83 | 22e706704357b961acb584e74689c7080e86a800 | refs/heads/master | 2021-05-20T17:18:10.481921 | 2020-08-06T12:38:58 | 2020-08-06T12:38:58 | 30,074,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | {'usr', 'bin', 'env', 'python', 'import', 'os', 'sys', 'if', '__name__', '__main__', 'environ', 'setdefault', 'DJANGO_SETTINGS_MODULE', 'sermul', 'settings', 'from', 'django', 'core', 'management', 'execute_from_command_line', 'argv'}
| [
"pacifi.bnr@gmail.com"
] | pacifi.bnr@gmail.com |
ac00ac4bb96ebe184493e06849d1d2e99492b860 | 2f96d0e69ce3d6b1ea4623ed5b4c1741d9634ea9 | /tests/dummy_repo/tvm/python/tvm/hybrid/util.py | 556ede1519e92fb2666ef894fd89ca5bfffa2590 | [
"Apache-2.0"
] | permissive | tqchen/ffi-navigator | ae1e8923e4d5be589beabfadba91f4a3b39e03dd | 46b0d0c6bce388a8e1e2cb7ed28062e889e4596c | refs/heads/main | 2023-02-06T22:32:54.214871 | 2023-02-05T16:25:16 | 2023-02-05T16:25:16 | 230,478,838 | 217 | 24 | Apache-2.0 | 2023-02-05T16:25:18 | 2019-12-27T16:44:58 | Python | UTF-8 | Python | false | false | 921 | py | import ast
import inspect
import logging
import sys
import numpy
from .. import api as _api
from .. import make as _make
from .. import expr as _expr
from .. import stmt as _stmt
from .._ffi.base import numeric_types
from ..tensor import Tensor
from ..container import Array
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
from .. import ir_pass
def replace(op):
if isinstance(op, _stmt.Provide) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Provide(buf.op, op.value_index, op.value, op.args)
if isinstance(op, _expr.Call) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Call(buf.dtype, buf.name, op.args, \
_expr.Call.Halide, buf.op, buf.value_index)
return None
return ir_pass.IRTransform(body, None, replace, ['Provide', 'Call'])
| [
"noreply@github.com"
] | tqchen.noreply@github.com |
7469ee162d8ac9192020e9c5adfd03a95ac054e4 | a6607f113e8dbf501e016c5fcc2fa54ff9373448 | /mutationfuzzer.py | d29a03c7ca3f8b9f6f98d8d36319f864639898fb | [] | no_license | CyberDomeBlue/Python-Iterating-Fuzzer | c8119ec14a87342a6314bcf03aeff232fe67c1ac | ea1582933ec7a604efed10ca7c7ada8fe5eb6b1b | refs/heads/master | 2020-04-27T23:06:15.940387 | 2019-03-10T00:53:23 | 2019-03-10T00:53:23 | 174,762,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,597 | py | import socket, secrets, sys, time
from time import strftime, localtime
def mutationfuzzer():
try:
while 1:
print("++===========================+ +")
print("| | MutationFuzzer | |")
print("| |==========================| |")
host = input("Host to Connect to:" )
port = input("Port to connect to:" )
fuzzsize = input("Size of fuzz string:" )
commandtofuzz = input("Command to Fuzz:" )
started = time.strftime("%d %b %Y %H:%M:%S", localtime())
currenttime = time.strftime("%d %b %Y %H:%M:%S", localtime())
testcase = ("++==========================================================================+ +" + "\n"
"| |BEGINNING FUZZ TEST | |" + "\n"
"++==========================================================================+ +" + "\n"
"| | STARTTIME | | FUZZSTRING MAX SIZE " + "\n"
"| |" + started + '| | ' + fuzzsize + "\n"
"++==========================================================================+ +" + "\n"
)
print(testcase)
fuzzstring = ' '
for i in range (int(fuzzsize)):
fuzzstring += secrets.choice('abcdefghijklmnopqrstuvwxyz1234567890!~`@#$%^&*()_-+=[]{}\|/?<>,.;:')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
fuzzstringinfo = len(fuzzstring)
encoded = commandtofuzz.encode() + fuzzstring.encode()
time.sleep(0.25)
s.send(encoded)
s.close()
currenttime = time.strftime("%d %b %Y %H:%M:%S", localtime())
userinfo = ("FUZZ SIZE:" + str(fuzzstringinfo))
print(userinfo)
question = ("Are you done fuzzing:" )
if question == ("yes"):
sys.exit(0)
else:
pass
except ConnectionRefusedError:
print("SUCCESS HOST CRASHED")
crashvalue = int(fuzzstringinfo) - (200)
crashinfo = (currenttime + ' ' + "LENGTH TO CAUSE CRASH:" + ' ' + (str(crashvalue)))
print(crashinfo)
except ConnectionResetError:
print("CONNECTION WAS RESET")
mutationfuzzer()
| [
"noreply@github.com"
] | CyberDomeBlue.noreply@github.com |
4d13e10726f8c0ffe263a134f51bdaa594e56873 | 74ba4479584876e6f731e18e4b811db8fdc4b026 | /setup.py | 80f50ab119675e9bf978d7606a58548190bdc08e | [] | no_license | lewisrodgers/configloader | def347d4538b609f6bc668fb71fb77b7af4c0740 | 57b2bc891f0d32c7f79e0784d69daa9374907700 | refs/heads/master | 2023-03-24T04:32:12.113161 | 2021-03-18T19:26:44 | 2021-03-18T19:26:44 | 347,147,077 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | import os
from setuptools import setup
with open(os.path.join(".", "VERSION")) as version_file:
version = version_file.read().strip()
setup(
name="configloader",
version=version,
description="Configuration loader",
author="Lewis Rodgers",
author_email="lrodgers04@gmail.com",
packages=["configloader"]
)
| [
"lewis.rodgers@accenture.com"
] | lewis.rodgers@accenture.com |
3b25372db706c47033a08f0ac49b19f3a91727f9 | e77a6be9492afa0dc956d0485f643f755ef8aab0 | /smartdoc/views.py | 609d7fcddf00f4dc385c69a7fa7e5a25a01f5bee | [] | no_license | kingosky/jichu | 388e552e9a1b55d7d3973a7da102393963cd1fd2 | 567f5459d29f5fe6c1a110009fb0c1ab505efdc2 | refs/heads/master | 2022-12-15T03:14:45.705598 | 2019-07-17T02:55:54 | 2019-07-17T02:55:54 | 197,301,535 | 0 | 0 | null | 2022-12-08T01:22:43 | 2019-07-17T02:38:14 | JavaScript | UTF-8 | Python | false | false | 5,610 | py | from django.shortcuts import render
from django.views.generic import DetailView, ListView, UpdateView
from django.views.generic.edit import CreateView
from .models import Product, Category, Document
from django.db.models import Q
from .forms import ProductForm, CategoryForm, DocumentForm
from django.contrib.auth.decorators import login_required, permission_required
from django.utils.decorators import method_decorator
from django.http import Http404
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
import json
import datetime
@csrf_exempt
def document_search(request):
q = request.GET.get('q', None)
if q:
document_list = Document.objects.filter(Q(title__icontains=q) |
Q(product__name__icontains=q) |
Q(product__code__icontains=q))
context = {'document_list': document_list}
return render(request, 'smartdoc/document_search.html', context)
return render(request, 'smartdoc/document_search.html')
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
return json.JSONEncoder.default(self, obj)
@csrf_exempt
def doc_ajax_search(request):
q = request.GET.get('q', None)
if q:
document_list = Document.objects.filter(Q(title__icontains=q) |
Q(product__name__icontains=q) |
Q(product__code__icontains=q))
data = []
for document in document_list:
data.append({"title": document.title, "product_name": document.product.name,
"category_name": document.category.name,
"format": document.doc_file.url.split('.')[-1].upper(),
"size": "{:.1f}KB".format(document.doc_file.size/1024),
"version": document.version_no, "date": document.mod_date,
"product_id": document.product.id, "id": document.id,
"url": document.doc_file.url,
})
json_data = json.dumps(data, cls=MyEncoder)
return HttpResponse(json_data)
class ProductList(ListView):
model = Product
class ProductDetail(DetailView):
model = Product
@method_decorator(login_required, name='dispatch')
@method_decorator(permission_required('smartdoc.add_product', raise_exception=True), name='dispatch')
class ProductCreate(CreateView):
model = Product
template_name = 'smartdoc/form.html'
form_class = ProductForm
# Associate form.instance.user with self.request.user
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
@method_decorator(login_required, name='dispatch')
@method_decorator(permission_required('smartdoc.change_product', raise_exception=True), name='dispatch')
class ProductUpdate(UpdateView):
model = Product
template_name = 'smartdoc/form.html'
form_class = ProductForm
def get_object(self, queryset=None):
obj = super().get_object(queryset=queryset)
if obj.author != self.request.user:
raise Http404()
return obj
class CategoryList(ListView):
model = Category
class CategoryDetail(DetailView):
model = Category
@method_decorator(login_required, name='dispatch')
@method_decorator(permission_required('smartdoc.add_category', raise_exception=True), name='dispatch')
class CategoryCreate(CreateView):
model = Category
template_name = 'smartdoc/form.html'
form_class = CategoryForm
# Associate form.instance.user with self.request.user
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
@method_decorator(login_required, name='dispatch')
@method_decorator(permission_required('smartdoc.change_category', raise_exception=True), name='dispatch')
class CategoryUpdate(UpdateView):
model = Category
template_name = 'smartdoc/form.html'
form_class = CategoryForm
def get_object(self, queryset=None):
obj = super().get_object(queryset=queryset)
if obj.author != self.request.user:
raise Http404()
return obj
class DocumentList(ListView):
model = Document
class DocumentDetail(DetailView):
model = Document
@method_decorator(login_required, name='dispatch')
@method_decorator(permission_required('smartdoc.add_document', raise_exception=True), name='dispatch')
class DocumentCreate(CreateView):
model = Document
template_name = 'smartdoc/form.html'
form_class = DocumentForm
# Associate form.instance.user with self.request.user
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.product = Product.objects.get(id=self.kwargs['pk'])
return super().form_valid(form)
@method_decorator(login_required, name='dispatch')
@method_decorator(permission_required('smartdoc.change_document', raise_exception=True), name='dispatch')
class DocumentUpdate(UpdateView):
model = Document
template_name = 'smartdoc/form.html'
form_class = DocumentForm
def get_object(self, queryset=None):
obj = super().get_object(queryset=queryset)
if obj.author != self.request.user:
raise Http404()
return obj
| [
"kingosky.163.com"
] | kingosky.163.com |
69611b00a486d48c9a5aee89ea19ec98246b1ab6 | d3d0841cf891228912adfa48879b3b2bffa1c170 | /Douyin/tools/Encrypt.py | f6c2f7dad13db20dbb4804b8e9cf076e00fd96a4 | [] | no_license | forcemeter/DouYin-1 | bd35b9d1687a66b81890e3cdac9d9b6080ca97b4 | 642607f6ca0449bd996db146313fac94376dcf9a | refs/heads/master | 2022-09-10T03:12:27.961253 | 2018-09-06T07:33:11 | 2018-09-06T07:33:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | # -*- coding: utf-8 -*-
# @Time : 2018/5/26 11:52
# @Author : Hunk
# @Email : qiang.liu@ikooo.cn
# @File : Encrypt.py
# @Software: PyCharm
import time
import json
import requests
import hashlib
from urllib.parse import urlparse, parse_qs, urlencode
def get_aweme_token(url):
timestamp = time.time()
token_url = "http://47.97.186.56:4570/getascpmas41d8224167374f85994185cd7d68be88"
parse_param = parse_qs(urlparse(url).query, keep_blank_values=True)
data = {key: value[-1] for key, value in parse_param.items()}
data.pop("mas")
data.pop("cp")
data.pop("as")
data["_rticket"] = str(round(timestamp * 1000))
data["ts"] = str(int(timestamp))
ts_short = (str(int(timestamp)) + "504c53f18b834e8b9b853cc64628cd12").encode()
param = {"dic": data, "device_id": data["device_id"], "ts_short": int(timestamp),
"mykey": hashlib.md5(ts_short).hexdigest()}
token = requests.post(token_url, data=json.dumps(param)).json()
data["as"] = token["As"]
data["mas"] = token["Mas"]
data["cp"] = token["Cp"]
return url.split("?")[0] + "?" + urlencode(data)
if __name__ in "__main__":
url = "https://api.amemv.com/aweme/v1/aweme/favorite/?user_id=88832667622&max_cursor=0&count=20&retry_type=no_retry&iid=43398130756&device_id=57259297041&ac=wifi&channel=aweGW&aid=1128&app_name=aweme&version_code=183&version_name=1.8.3&device_platform=android&ssmix=a&device_type=MuMu&device_brand=Android&language=zh&os_api=23&os_version=6.0.1&uuid=008796758836908&openudid=14c5f0e306271ae&manifest_version_code=183&resolution=1024*576&dpi=192&update_version_code=1832&_rticket=1536213417686&ts=1536213420&as=a1b5ec697cea1ba1e04355&cp=c2a5b657cf099f1be1Uc]g&mas=008435bbf9b3a897df221b6a7f86e9c1e8acaccc2c0ca68c86468c"
print(get_aweme_token(url))
| [
"773369248@qq.com"
] | 773369248@qq.com |
a2de85bc8ad72ace7a5ffdb484c0071bf807bf99 | 05c206c66cd3068c6024213838bcb9c695f25e4b | /pre_processing.py | 31d597afbdffbcb6d1e79545e8a54c72fa4a11bd | [] | no_license | Promech2020/Covid-19_Social_Distance_Maintainer | d67626c8042f34daf18c8e02bf50b52e9dfe501c | b8da65d21c95a316897066eac78048c2a0b564bc | refs/heads/master | 2023-01-05T22:16:28.584268 | 2020-11-03T02:49:52 | 2020-11-03T02:49:52 | 305,307,490 | 0 | 0 | null | 2020-10-22T02:53:04 | 2020-10-19T08:01:50 | null | UTF-8 | Python | false | false | 1,374 | py | from perform_sdc import call_perform_sdc
import scipy.io.wavfile as wav
import time
import sys
import os
import cv2
def pre_process(v_path, wait_time_before, a_path):
video_path = select_video(v_path)
seconds = wait_to_play_warning(wait_time_before)
soundfile = select_audio(a_path)
# #Get length of audio file
(source_rate, source_sig) = wav.read(soundfile)
audio_file_length = len(source_sig) / float(source_rate)
call_perform_sdc(video_path, seconds, soundfile, audio_file_length)
#########################################
# Select the video #
#########################################
def select_video(video_name):
if video_name == "":
video_p=r"./input_video/GridTest5.mp4"
elif video_name == "WebCam":
video_p = "0"
else :
video_p = video_name
return video_p
#########################################
# Time to wait #
#########################################
def wait_to_play_warning(sec):
#Take input for how many seconds do you want to wait when two people are close enough
seconds = int(sec)
return seconds
#########################################
# Select Audio File #
#########################################
def select_audio(audio):
#Take input for how many seconds do you want to wait after playing warning.
if audio == "":
sound = r"./sound/covid_msg.wav"
else:
sound = audio
return sound
| [
"promechminds@gmail.com"
] | promechminds@gmail.com |
f0080a7074834fceb382f01bbc98da1cf2cd6fa8 | a07c179df81f27fe2022855e8d6b91b1c2da73f8 | /Advanced_python/object_oriented_programming/Inheritance/Single/PersonEmployee.py | 2ec64e9da52353202ac7bcede3cde0a72f3268e8 | [] | no_license | HarithaPS21/Luminar_Python | da36849ce0a6d92b9cf54ee7401ffdb3fa6e80d3 | d6057b1bfb1bb1293b1564fa766f0cca822e0642 | refs/heads/master | 2023-07-08T05:15:52.303951 | 2021-08-11T04:14:49 | 2021-08-11T04:14:49 | 385,459,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Single Inheritance - inheriting attributes and methods of a single class
class Person: # parent class/base class/super class
def pdetails(self,name,age,adrs):
self.name=name
self.age=age
self.adrs=adrs
print(self.name,self.age,self.adrs)
class Employee(Person): # child class/derived class/subclass
def empdetails(self,empid,dept,salary):
self.empid=empid
self.dept=dept
self.salary=salary
print("Employee ID: ",empid,"\nDepartment :",dept,"\nSalary :",salary)
print("salary of ",self.name,"is ",self.salary)
pe=Person()
pe.pdetails("Akhil",25,"Kollam") # own methods
emp=Employee()
emp.pdetails("Akhil",25,"Pathanamthitta") # using methods of parent class
emp.empdetails(2,"testing",25000) | [
"harithaps59@gmail.com"
] | harithaps59@gmail.com |
9840e6a469243c62f1b31dd12a64e6f336996205 | 9d2c9101541eb0a184a0fbd90da5ceed7bad8220 | /src/health_records_system.py | 1d031c0fcd8adb390e191c2952bb2444dd67b861 | [] | no_license | cpappas18/Health-Records-System | c1e05f5d6733f7891af3debf6f19611c66e07787 | f25f7f9e5a5aad5901e1d237235b42a6da50107b | refs/heads/master | 2023-07-02T18:28:45.180964 | 2021-08-08T22:58:04 | 2021-08-08T22:58:04 | 361,300,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,235 | py | import copy
class HealthRecordsSystem(object):
"""
Implements an Electronic Health Records System that stores patient information.
Adheres to the Singleton Design Pattern such that only one instantiation of this class is possible.
Assumes the role of Receiver as part of the Command Design Pattern.
"""
__instance = None
@staticmethod
def get_instance():
"""
Static access method for Singleton object
"""
if HealthRecordsSystem.__instance is None:
HealthRecordsSystem()
return HealthRecordsSystem.__instance
def __init__(self):
if HealthRecordsSystem.__instance is None:
HealthRecordsSystem.__instance = self
self._patients = {} # stores patients in a dictionary of ID:patient pairs
else:
raise Exception("HealthRecordsSystem class is a Singleton.")
@staticmethod
def _reset():
"""
Removes Singleton instance to allow for a new instantiation
"""
HealthRecordsSystem.__instance = None
def get_patient(self, id):
"""
Retrieves patient by their ID number.
:param id: unique number given to patient upon creation
:return: patient corresponding to the ID if it exists in the system, otherwise returns None
"""
try:
return self._patients[id]
except KeyError:
print(f"Patient #{id} does not exist in the System.")
return None
def add_patient(self, patient):
"""
Adds a patient to the system if the ID does not already exist.
If it does exist, the patient's information will be overwritten upon the user's input of Y/N.
:param patient: the new patient to be added to the system
"""
if patient.id in self._patients:
overwrite = input(f"Patient #{patient.id} already exists. Overwrite? Y/N ")
if overwrite == "Y":
self._patients[patient.id] = patient
print(f"Patient #{patient.id} successfully added to the system.")
else:
print(f"Failure: Patient #{patient.id} was not added to the system.")
else:
self._patients[patient.id] = patient
print(f"Patient #{patient.id} successfully added to the system.")
def remove_patient(self, id):
"""
Removes a patient from the system if the ID number exists, otherwise does nothing.
:param id: ID number of the patient to remove
:return: the removed patient if removal is successful, otherwise returns None
"""
try:
patient = self._patients[id]
del self._patients[id]
print(f"Patient #{id} successfully removed from the system.")
return patient
except KeyError:
print(f"Patient #{id} does not exist in the system.")
return None
class Patient(object):
def __init__(self, id, name, age, phone_number):
self._id = id
self._name = name
self._age = age
self._phone_number = phone_number
self._medication = {}
self._test_results = {}
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def age(self):
return self._age
@property
def phone_number(self):
return self._phone_number
@property
def medication(self):
return self._medication
@property
def test_results(self):
return self._test_results
def get_medication(self, med_name):
"""
Retrieves a patient's medication by its name.
:param med_name: name of the medication
:return: medication corresponding to the name if it exists in the patient's record, otherwise returns None
"""
try:
return self._medication[med_name]
except KeyError:
print(f"{med_name} does not exist in the patient's record.")
return None
def add_medication(self, med):
"""
Adds a medication to the patient's record if the medication's name does not already exist.
If it does exist, the medication's information will be overwritten upon the user's input of Y/N.
:param med: the new medication to be added to the patient's record
"""
if med.name in self._medication:
overwrite = input(f"Patient #{self._id} is already taking this medication. Overwrite dosage and frequency? Y/N ")
if overwrite == "Y":
self._medication[med.name] = med # update the medication information
print(f"Medication successfully updated in patient #{self._id}'s record.")
else:
print(f"Failure: Medication was not updated in patient #{self._id}'s record.")
else:
self._medication[med.name] = med
print(f"Medication successfully added to patient #{self._id}'s record.")
def remove_medication(self, med_name):
"""
Removes a medication from the patient's record if the name exists, otherwise does nothing.
:param med_name: name of the medication to remove
:return: the removed medication if removal is successful, otherwise returns None
"""
try:
med = self._medication[med_name]
del self._medication[med_name]
print(f"{med_name} successfully removed from the patient's record.")
return med
except KeyError:
print(f"{med_name} does not exist in the patient's record.")
return None
def clear_medication(self):
"""
Clears all medication from the patient's record.
"""
self._medication.clear()
def get_test_results(self, name, date):
"""
Retrieves a patient's test results by its name and date.
:param name: name of the test
:param date: date that the test was performed (DD/MM/YYYY)
:return: test results corresponding to the name and date if it exists in the patient's record, otherwise returns None
"""
try:
return self._test_results[(name, date)]
except KeyError:
print(f"Test for {name} does not exist in the patient's record on {date}.")
return None
def add_test_results(self, name, date, result):
"""
Adds a test result to the patient's record if the test's name does not already exist on the specified date.
If it does exist, the test result will be overwritten upon the user's input of Y/N.
:param name: name of the test
:param date: date that the test was performed (DD/MM/YYYY)
:param result: result of the test
"""
if (name, date) in self._test_results.keys():
overwrite = input(f"A result for this test on {date} has already been recorded. Overwrite test result? Y/N ")
if overwrite == "Y":
self._test_results[(name, date)] = result
print(f"Test result successfully updated in patient #{self._id}'s record.")
else:
print(f"Failure: Test result was not updated in patient #{self._id}'s record.")
return
else:
self._test_results[(name, date)] = result
print(f"Test result successfully added to patient #{self._id}'s record.")
def clear_test_results(self):
"""
Clears all tests from the patient's record.
"""
self._test_results.clear()
class Medication(object):
def __init__(self, name, dosage, frequency):
self._name = name
self._dosage = dosage
self._frequency = frequency
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def dosage(self):
return self._dosage
@dosage.setter
def dosage(self, value):
self._dosage = value
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, value):
self._frequency = value
| [
"chloeoliviapappas@gmail.com"
] | chloeoliviapappas@gmail.com |
2cea099a4ac565251c302b64d8ff402daf562e91 | 183e0a63af083221ca557b0e0e9badf9a4a83ad0 | /tokenstorage/urls.py | 35e41c8e4f40cdeaed8b36b67d6a3a2644224a1d | [] | no_license | Discovery-VSTS/settingmanagement | 3a998a5c8bdc3bc88e1aca90e130bad9263706e4 | ca8d5aae09f8e5fe175555ebc1a18854f0649563 | refs/heads/master | 2021-01-20T03:59:20.690027 | 2017-03-23T14:37:41 | 2017-03-23T14:37:41 | 84,086,008 | 0 | 1 | null | 2017-03-23T14:37:42 | 2017-03-06T14:58:17 | Python | UTF-8 | Python | false | false | 515 | py | from django.conf.urls import url
from .views import TokenStorage, TokenStorageViewSet, clear_database
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^tokenstorage/$', view=TokenStorage.as_view()),
url(r'^tokenstorage/reset/$', view=clear_database),
url(r'^tokenstorage/all/$', view=TokenStorageViewSet.as_view({'get': 'list'})),
url(r'^tokenstorage/(?P<instance_id>\w{1,50})/$', view=TokenStorage.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"minhlong.langos@gmail.com"
] | minhlong.langos@gmail.com |
f93d1698b5027bfb81cceb0b635441fddee194d0 | c9835b59806be834f647ac821c1672a6889a24c2 | /agent/yz_agent/serialize/json_msg.py | d161ea18a7c6fec0074dcda8d61fd9934b493c3e | [] | no_license | CannedFish/yz_data_collector | cd294061a38307699a6c6024843900f9250213b4 | 5629110923558680a7f6bdb0f845362b0e91c419 | refs/heads/master | 2020-04-05T13:07:35.781753 | 2017-07-07T09:46:00 | 2017-07-07T09:46:00 | 95,100,970 | 0 | 1 | null | 2017-07-07T09:46:00 | 2017-06-22T09:52:38 | Python | UTF-8 | Python | false | false | 826 | py | # -*- coding: utf-8 -*-
import json
class MsgBody(object):
_id = ''
_data = ''
_type = ''
_usage = ''
@property
def id(self):
return self._id
@id.setter
def id(self, v):
self._id = v
@property
def data(self):
return self._data
@data.setter
def data(self, v):
self._data = v
@property
def type(self):
return self._type
@type.setter
def type(self, v):
self._type = v
@property
def usage(self):
return self._usage
@usage.setter
def usage(self, v):
self._usage = v
def SerializeToString(self):
return json.dumps({
'id': self._id,
'data': json.loads(self._data),
'type': self._type,
'usage': self._usage
})
| [
"lianggy0719@126.com"
] | lianggy0719@126.com |
083ff4ceb28cf80e378d3051c65ce382c21f075c | 8ea561caaa632cb648e1ca4bb2c1eef6172c46c0 | /app/forms.py | 767e4041283bbbad874cbcace11aad861aa0016f | [] | no_license | taiwan-hero/carlton | 32342031375aa94e5c06e306c68fb974538c72e5 | ba2696031e05c1f0820a3accc13fab5b09c0a2d3 | refs/heads/master | 2020-04-02T03:51:16.470959 | 2016-07-24T04:40:54 | 2016-07-24T04:40:54 | 63,676,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # -*- encoding: utf-8 -*-
"""
Python Aplication Template
Licence: GPLv3
"""
from flask.ext.wtf import Form, TextField, TextAreaField, DateTimeField, PasswordField
from flask.ext.wtf import Required
class ExampleForm(Form):
title = TextField(u'Title', validators = [Required()])
content = TextAreaField(u'Content')
date = DateTimeField(u'Date', format='%d/%m/%Y %H:%M')
#recaptcha = RecaptchaField(u'Recaptcha')
class SignupForm(Form):
user = TextField(u'User', validators = [Required()])
password = PasswordField(u'Password', validators = [Required()])
class LoginForm(Form):
user = TextField(u'User', validators = [Required()])
password = PasswordField(u'Password', validators = [Required()])
| [
"tim@getlocalmeasure.com"
] | tim@getlocalmeasure.com |
04010f1316e22a96c0450ba8d41a3a66b39563ee | 5355abeac6fd30ecc908619e3aaf5c44ee2389b3 | /client.py | 80809b613a8e2155f3b33673fe960e0472532ff6 | [] | no_license | fanlai0990/paxos | 4edb782b7de59737cb5697c6df53c36166557aed | 18fc905c3c3efff401eaf07e06bea53aa4b1cd8e | refs/heads/master | 2021-04-28T16:15:40.096791 | 2018-02-27T02:32:06 | 2018-02-27T02:32:06 | 122,011,766 | 0 | 0 | null | 2018-02-19T02:43:30 | 2018-02-19T02:43:30 | null | UTF-8 | Python | false | false | 1,030 | py | import sys;
from utils import *;
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor;
class ClientDatagramProtocol(DatagramProtocol):
def __init__(self, host, port):
self.host = host;
self.port = port;
def startProtocol(self):
self.transport.connect(self.host, self.port);
print("Client has connected to the host %s %d" % (self.host,self.port));
self.transport.write('hello'.encode());
def datagramReceived(self, data, from_address):
print("received %r from %s:%d" % (data, from_address[0], from_address[1]));
def main():
argv = process_argv(sys.argv);
host = "127.0.0.1"
port = 9100;
if "port" in argv and argv["port"].isdigit():
port = int(argv["port"]);
if "host" in argv and is_valid_ip(argv["host"]):
host = argv["host"];
reactor.listenUDP(port, ClientDatagramProtocol(host,9200));
reactor.callWhenRunning(main)
reactor.run();
if __name__ == "__main__":
main(); | [
"hasanal@umich.edu"
] | hasanal@umich.edu |
6f6476757e06d7a487ecf584035e507e47e98cb6 | 9e9d23e7a57c46da27a491a61f19c7239d066bf8 | /biliup/__init__.py | e1ff55cbd324da2fcb10188ba6f6f304a81fa7ea | [
"MIT"
] | permissive | vmcole/bilibiliupload | f7c667927bfcc4a0c1c5eba96b674729ae776e62 | b5c416451f66c2ebe550694d4c4957129d0e966e | refs/heads/master | 2023-06-09T19:58:33.813073 | 2021-07-06T14:50:18 | 2021-07-06T14:50:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | import asyncio
from .common.reload import AutoReload
from .common.timer import Timer
from .engine.event import EventManager, Event
from .engine import config, invert_dict, Plugin
from . import plugins
__version__ = "0.0.8"
def create_event_manager():
streamer_url = {k: v['url'] for k, v in config['streamers'].items()}
inverted_index = invert_dict(streamer_url)
urls = list(inverted_index.keys())
pool1_size = config.get('pool1_size') if config.get('pool1_size') else 3
pool2_size = config.get('pool2_size') if config.get('pool2_size') else 3
# 初始化事件管理器
app = EventManager(config, pool1_size=pool1_size, pool2_size=pool2_size)
app.context['urls'] = urls
app.context['url_status'] = dict.fromkeys(inverted_index, 0)
app.context['checker'] = Plugin(plugins).sorted_checker(urls)
app.context['inverted_index'] = inverted_index
app.context['streamer_url'] = streamer_url
return app
event_manager = create_event_manager()
async def main():
from .handler import CHECK_UPLOAD, CHECK
event_manager.start()
async def check_timer():
event_manager.send_event(Event(CHECK_UPLOAD))
for k in event_manager.context['checker'].keys():
event_manager.send_event(Event(CHECK, (k,)))
wait = config.get('event_loop_interval') if config.get('event_loop_interval') else 40
# 初始化定时器
timer = Timer(func=check_timer, interval=wait)
interval = config.get('check_sourcecode') if config.get('check_sourcecode') else 15
# 模块更新自动重启
detector = AutoReload(event_manager, timer, interval=interval)
await asyncio.gather(detector.astart(), timer.astart(), return_exceptions=True)
| [
"34411314+ForgQi@users.noreply.github.com"
] | 34411314+ForgQi@users.noreply.github.com |
1988a8c3a0828ad4711f8691bc8c51c0d65d6d80 | fc721cd39c86a0773e7e4951605d94fe6dffba61 | /apps/user/models.py | 4b960de3e8a4644d886ffa60534c6a3b7605bdab | [] | no_license | maxingg/my_blog | 90a5f06a9c16df5ca5e66b22ab7b7ff2a007eb4e | 300e61b5540ddecdc88768aba2f56410df05011f | refs/heads/master | 2020-04-07T18:44:11.784282 | 2018-11-22T01:06:57 | 2018-11-22T01:06:57 | 158,621,571 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
__author__ = 'maxing'
__date__ = '2018/11/20 18:57'
class UserProfile(AbstractUser):
GENDER_CHOICES = (
("male", "男"),
("female", "女"),
)
# 昵称
nick_name = models.CharField(max_length=50, verbose_name="昵称", default="")
# 生日 可以为空
birthday = models.DateField(verbose_name="生日", null=True, blank=True)
hobby = models.CharField(default='', max_length=80, verbose_name='爱好')
# 性别
gender = models.CharField(
max_length=6,
choices=GENDER_CHOICES,
default="male",
verbose_name="性别",
)
# 地址
address = models.CharField(max_length=100, verbose_name="地址", default="")
# 电话
mobile = models.CharField(max_length=11, null=True, blank=True)
# 头像 默认用default.png
image = models.ImageField(
upload_to="images/portraits/%Y",
default="default.png",
max_length=100,
)
# meta信息,后台栏目名
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.username | [
"1501901576@qq.com"
] | 1501901576@qq.com |
1bd44bb3dbda7bb6bcfbee28eabd8571e7d0c751 | bb25174ab60fff6e3e45d5ffab1bf993c94ab730 | /calculator/models.py | 93f0313ebbb7df89a22c72d75337687492ff8b5f | [] | no_license | yurasenchuk/seidel_calculator | 3f081bef7f8c58fca2fb28acc555a7959385b128 | bce537cdb65a72137d98736c86d9e422df59834d | refs/heads/master | 2023-03-01T03:48:01.510332 | 2021-02-13T20:56:24 | 2021-02-13T20:56:24 | 331,643,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,107 | py | import copy
import time
from concurrent.futures._base import LOGGER
from math import fabs
from django.contrib.postgres.fields import ArrayField
from django.db import models, IntegrityError, DataError
from user.models import CustomUser
class Calculator(models.Model):
size = models.IntegerField(default=0)
matrix_a = ArrayField(ArrayField(models.CharField(max_length=10, blank=True), size=0), size=0, blank=True)
vector_b = ArrayField(models.CharField(max_length=10, blank=True), size=0, blank=True)
result = ArrayField(models.CharField(max_length=10, blank=True), size=0, blank=True)
e = models.FloatField(default=0.001)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
@staticmethod
def create(size, matrix_a, vector_b, e, user):
calculator = Calculator(size=size, matrix_a=matrix_a, vector_b=vector_b, e=e, user=user,
result=[0 for i in range(size)])
try:
calculator.save()
return calculator
except (IntegrityError, AttributeError, DataError, ValueError) as err:
LOGGER.error("Wrong attributes or relational integrity error")
def update(self, result):
self.result = result
self.save()
@staticmethod
def get_by_id(calculator_id):
try:
calculator = Calculator.objects.get(id=calculator_id)
return calculator
except CustomUser.DoesNotExist:
LOGGER.error("User does not exist")
return False
def to_dict(self):
return {
"id": self.id,
"matrix_a": self.matrix_a,
"vector_b": self.vector_b,
"result": self.result,
"e": self.e}
def calculate_seidel(self):
start = int(time.time())
x = [[0 for j in range(len(self.vector_b))] for i in range(1)]
matrix = [[float(self.matrix_a[i][j]) for j in range(self.size)] for i in range(self.size)]
vector = [float(self.vector_b[j]) for j in range(self.size)]
i = 0
while True:
if int(time.time()) - start > 30:
return False
x.append(self.seidel(x[i], matrix, vector))
i += 1
if len(x) >= 2 and max([fabs(x[i][j] - x[i - 1][j]) for j in range(len(x[0]))]) < self.e:
return x[i]
def seidel(self, x, matrix, vector):
n = len(matrix)
cur_x = copy.deepcopy(x)
for j in range(0, n):
d = copy.deepcopy(vector[j])
for i in range(0, n):
if j != i:
d -= matrix[j][i] * cur_x[i]
cur_x[j] = d / matrix[j][j]
return cur_x
@staticmethod
def results(user_id):
return list(Calculator.objects.all().filter(user_id=user_id))
@staticmethod
def delete_by_user_id(user_id):
try:
tasks = Calculator.results(user_id)
for i in tasks:
i.delete()
return True
except Calculator.DoesNotExist:
LOGGER.error("Task does not exist")
return False
| [
"63191681+yurasenchuk@users.noreply.github.com"
] | 63191681+yurasenchuk@users.noreply.github.com |
274332a28662cdd27514f4e4d6ea6d2fb35d89f7 | 82db461036ffb2adbf0424a6f0575cd9d24b48a8 | /main.py | aa2b3ceb4b62ba95ae0a6123184a319dd03db241 | [] | no_license | webclinic017/option_pdt | fdc559f02cc529b54278e90e04170713fe93684f | dd302c6b2661e26dbfcbea0384b99e85ae9584e1 | refs/heads/master | 2023-03-24T10:43:35.998775 | 2021-03-19T14:08:38 | 2021-03-19T14:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,828 | py | import sys
import os
parent_path = os.path.dirname(sys.path[0])
if parent_path not in sys.path:
sys.path.append(parent_path)
import json
import pickle
import logging
import pandas as pd
import numpy as np
from datetime import datetime
from library import get_strategy
from utils.util_func import *
from optparse import OptionParser
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser()
parser.add_option('-f', '--file_name', action='store', type='string', default=None)
(opts, args) = parser.parse_args()
file_path = f'library/strategy/{opts.file_name}.json'
strategy_data_file = opts.file_name.split('_')[0]+"_data"
with open(file_path, 'r') as f:
options = json.load(f)
'''
from datetime import datetime
import pandas as pd
import pickle
positions = pd.read_csv("data/positions_s.csv")
positions['group'] = positions['group'].astype(str)
#hedge_positions = pd.read_csv("data/hedge_positions.csv",index_col=0)
#hedge_positions['group'] = hedge_positions['group'].astype(str)
strategy_data = {'hedge_time':datetime.now()}
with open(f'data/delta_data.pkl','wb') as fw:
pickle.dump(strategy_data, fw)
with open(f'data/customer_position.pkl','wb') as fw:
pickle.dump(positions, fw)
today = datetime.now()
cols = ['EXP_DATE','ask_price', 'bid_price', 'creation_timestamp','instrument_name', 'K','S','cp',
'interest_rate','open_interest','underlying_index', 'volume','TTM']
option_df = pd.read_csv("data/option_df.csv",index_col=0)
option_df = option_df[cols]
#option_df['TTM'] = [days_diff(exp_date,today) for exp_date in option_df['EXP_DATE']]
option_df = option_df[option_df['TTM']>0.1]
portfolio = sim_positions(option_df,6)
subscription_list = [symbol2subs(symbol,"%d%b%y") for symbol in portfolio['instrument_name']]
'''
with open(f'data/{strategy_data_file}.pkl','rb') as fw:
strategy_data = pickle.load(fw)
with open(f'data/customer_position.pkl','rb') as fw:
positions = pickle.load(fw)
positions,is_removed = remove_expired_positions(positions)
if is_removed:
with open(f'data/customer_position.pkl','wb') as fw:
pickle.dump(positions, fw)
hedge_time = strategy_data['hedge_time']
#hedge_positions = strategy_data['hedge_positions']
#positions = {key:{k:0 for k,v in values.items()} for key,values in positions.items()}
#subscription_list = [symbol2subs(symbol,"%Y%m%d") for symbol in positions.keys() if symbol!='BTCUSD']
subscription_list = []
subscription_list.append('Deribit|BTCUSD|perp|ticker')
subscription_list.append('Deribit|BTCUSD|option|summaryinfo')
options['subscription_list'] = list(set(subscription_list))
options['hedge_time'] = hedge_time
options['positions'] = positions
if strategy_data_file == "delta_data":
options['account_target'] = float(strategy_data['account_target'])
stratgy = options['file_name']
context = get_strategy(stratgy)
context.logger.info('Start trading..')
context.config_update(**options)
context.pre_start(**options)
context.start()
#instrument = 'Deribit|BTCUSD-20200925-7000-P|option'
#instrument = 'Deribit|BTCUSD|option|summaryinfo'
#instrument = 'Deribit|BTCUSD|perp'
#context.send_order(instrument, 'sell', 0.1200, 0.1, 'Limit')
#context.send_order(instrument, 'sell', 0.1, 0.1, 'Fak', delay=3000)
#context.send_order(instrument, 'sell', 9500.5, 1, 'Limit',note='maker')
#context.send_order(instrument, 'buy', 8100.5, 1, 'Market',note='taker')
#context.inspect_order(instrument,'3887280714')
#context.send_order(instrument,'buy',7084,0.0706,'Limit')
| [
"noreply@github.com"
] | webclinic017.noreply@github.com |
880346dbe517aa4c58ed8212c70a857065c4acd0 | f35d98d459494f3395cc6df67a16822f512a1fa9 | /utility/MiscTool.py | 5bc451c96009c97bc4ef059cf231ab054eb4305a | [] | no_license | BenjaminMesic/HbbAnalysis | ff7a91c87c59859a052ec0982524f936da8870bd | 6bab2780c703af418b4155a0910769e12b62f4ec | refs/heads/master | 2020-05-22T04:24:43.304282 | 2019-01-20T11:37:51 | 2019-01-20T11:37:51 | 65,304,761 | 0 | 0 | null | 2017-02-23T13:14:40 | 2016-08-09T15:08:01 | Python | UTF-8 | Python | false | false | 2,333 | py | import os
import sys
def Print(print_type, *text):
try:
if print_type == 'python_info': # Bright Yellow
print '\033[1;33;40m' + ''.join(text) + '\033[0m'
elif print_type == 'analysis_info': # Bright Cyan
print '\033[1;36;40m{0:30s}{1}\033[0m'.format(*text)
elif print_type == 'analysis_info_list': # Bright Cyan
print '\033[1;36;40m' + text[0] + '\033[0m'
for _l in text[1]:
print '\033[1;36;40m{0:30s}{1}\033[0m'.format('' , _l)
elif print_type == 'error': # Bright Red
print '\033[1;31;40m' + ''.join(text) + '\033[0m'
elif print_type == 'status': # Bright Green
print '\033[1;32;40m' + ''.join(text) + '\033[0m'
except Exception, e:
print text
def make_directory(directory):
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
if not os.path.isdir(directory):
raise
return directory
def get_analysis_name():
''' Just function which gets argv and return analysis_name. Need sys library'''
_analysis_name = ''
if len(sys.argv) == 1:
_analysis_name = 'Wlv'
Print('analysis_info','Missing analysis_name argument, using default: ', _analysis_name)
else:
_analysis_name = sys.argv[1]
return _analysis_name
def get_configuration_files(analysis_name):
import analysis
exec('from analysis.{0} import configuration'.format(analysis_name))
return configuration
def ID_sample_dictionary( IDs, samples_configuration):
'Give list of IDs and return corresponding list of samples'
_samples = []
if IDs == ['all']:
return samples_configuration.samples_list.keys()
else:
# Loop over all IDs
for _id in IDs:
_sample = ''
# Loop over all samples
for _s in samples_configuration.samples_list.keys():
# Check if ID match sample
if _id == samples_configuration.samples_list[_s]['ID']:
_sample = _s
break
# Check if ID match subsample
elif 'sub' in samples_configuration.samples_list[_s] and _id in samples_configuration.samples_list[_s]['sub']:
_sample = _s
break
if _sample == '':
Print('error', 'Check your ID {0}'.format(_id))
else:
_samples.append(_s)
# remove duplicates
return list(set(_samples))
| [
"benjamin.mesic@cern.ch"
] | benjamin.mesic@cern.ch |
43734458b16e1c03d37ba48e9f01579b1cea3b07 | a060c70f8fbacc8b2455efce7b08beeacc7e0e8a | /PythonCrashCourse/Chapter06/many_ysers.py | 4fde50a7a0d57199f8b7bfa944b529ddb91cb243 | [] | no_license | mingqin-joy/python-crash-course | 091cb36ffd838fb8e9a9555c442c3a6994bd92aa | 31363d91d5cb9f28f145b5cc583a354bc08419ba | refs/heads/master | 2020-05-15T02:37:50.092231 | 2019-04-29T06:35:56 | 2019-04-29T06:35:56 | 182,052,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | users = {
'GC': {
'Xing': 'Guan',
'Ming': 'Chao',
'location': 'Yangzhou',
},
'ZMQ': {
'Xing': 'Zhou',
'Ming': 'Mingqin',
'location': 'Chongqing',
},
}
for username, user_info in users.items():
print("\nUsername: " + username)
Xingming = user_info['Xing'] + " " + user_info['Ming']
location = user_info['location']
print("\tXing: " + Xingming)
print("\tfrom: " + location)
| [
"mingqin-joy@outlook.com"
] | mingqin-joy@outlook.com |
31234a88d755e9be7c22c2decfa0fa637786e8ae | 0c549aa4d4172e4ef45f803ac0fbe7903710460f | /guessing_game.py | dcb6d53280138567bbd81cabf7d8085b2bff0993 | [] | no_license | mcjczapiewski/guessing_game | 50d76e842e7278fa71bdde0a7656154b10429791 | 5ef6393da1b13bb5daf0d34c886ac6dba2dda6b9 | refs/heads/master | 2022-12-19T04:16:29.792506 | 2020-03-14T14:35:55 | 2020-03-14T14:35:55 | 299,722,391 | 0 | 1 | null | 2020-10-01T05:01:18 | 2020-09-29T19:48:57 | Python | UTF-8 | Python | false | false | 1,190 | py | import random
def generate_numbers(start, end, list_name):
for i in range(10):
list_name.append(random.randint(start, end))
def play_the_game(list_of_numbers, range_end):
for i in range(10):
user_number = get_user_input(range_end)
check_user_input(user_number, list_of_numbers, i, range_end)
def get_user_input(range_end):
user_number = int(
input("Enter an integer from 1 to " + str(range_end) + ": ")
)
return user_number
def check_user_input(user_number, list_of_numbers, i, range_end):
while list_of_numbers[i] != user_number:
if user_number < list_of_numbers[i]:
print("guess is low")
user_number = get_user_input(range_end)
elif user_number > list_of_numbers[i]:
print("guess is high")
user_number = get_user_input(range_end)
else:
break
print("you guessed it!")
random_ints_1to99 = []
generate_numbers(start=1, end=99, list_name=random_ints_1to99)
play_the_game(random_ints_1to99, range_end=99)
random_ints_1to49 = []
generate_numbers(start=1, end=49, list_name=random_ints_1to49)
play_the_game(random_ints_1to49, range_end=49)
| [
"mcjczapiewski@gmail.com"
] | mcjczapiewski@gmail.com |
e8d6832b01ddb153bea7721f9728d12768dc77a3 | 3c259a3755fa81dbaa5a33591c4bcedb79c20314 | /config/ssef/ssef_eval_cqg_masked_2015.config | 897b88fb2d2d418c25f44140fa30a4d2702f637a | [
"MIT"
] | permissive | djgagne/hagelslag | f96bea7395d2d967e1dc84faccf910e01b83157b | 17757de7b55737f65f615e5dccad379604961832 | refs/heads/master | 2023-07-24T20:13:07.659540 | 2023-07-13T17:02:00 | 2023-07-13T17:02:00 | 37,555,335 | 64 | 26 | MIT | 2023-07-13T17:02:01 | 2015-06-16T20:48:43 | Jupyter Notebook | UTF-8 | Python | false | false | 3,621 | config | #!/usr/bin/env python
from datetime import datetime
import numpy as np
ensemble_members = ["wrf-s3cn_arw"] + ["wrf-s3m{0:d}_arw".format(m) for m in range(3, 14)]
scratch_path = "/sharp/djgagne/"
experiment_name = "cqg_masked"
config = dict(ensemble_name="SSEF",
ensemble_members=ensemble_members,
start_date=datetime(2015, 5, 12),
end_date=datetime(2015, 6, 5),
start_hour=13,
end_hour=36,
window_sizes=[1, 3, 24],
time_skip=1,
model_names=dict(dist=["Random Forest", "Elastic Net", "Random Forest CV"],
condition=["Random Forest"]),
model_types=["dist", "condition"],
size_thresholds=[5, 25, 50],
condition_threshold=0.5,
dist_thresholds=np.arange(0, 200),
num_max_samples=1000,
forecast_json_path=scratch_path + "track_forecasts_spring2015_{0}_json/".format(experiment_name),
track_data_csv_path=scratch_path + "track_data_spring2015_{0}_csv/".format(experiment_name),
forecast_sample_path=scratch_path + "track_samples_spring2015_{0}/".format(experiment_name),
mrms_path=scratch_path + "mrms_spring2015/",
mrms_variable="MESH_Max_60min_00.50",
obs_mask=True,
mask_variable="RadarQualityIndex_00.00",
forecast_thresholds=np.concatenate(([0, 0.01, 0.02], np.arange(0.05, 1.1, 0.05))),
dilation_radius=13,
forecast_bins={"dist": np.array(["Shape_f", "Location_f", "Scale_f"]),
"condition": np.array(["ProbHail"]),
"translation-x":np.arange(-240000, 264000, 24000),
"translation-y":np.arange(-240000, 264000, 24000),
"start-time":np.arange(-6, 7, 1)
},
object_thresholds=[0, 25, 50],
out_path=scratch_path + "evaluation_data_spring2015_{0}/".format(experiment_name),
obj_scores_file="object_scores_ssef_2015_cqg_closest_",
grid_scores_file="grid_scores_ssef_2015_cqg_cloest.csv",
obs_thresholds=[5, 25, 50, 75],
ensemble_variables=["uh_max", "hailsz", "cqgmax", "r10cmx"],
neighbor_thresholds={"dist": [25, 50],
"uh_max": [25, 75, 150],
"hailsz": [5, 25, 50],
"cqgmax": [5, 25, 50],
"r10cmx": [40, 60]},
neighbor_path="/sharp/djgagne/hail_consensus_ssef_{0}_2015/".format(experiment_name),
neighbor_score_path="/sharp/djgagne/neighbor_scores_ssef_unique_2015/ssef_{0}_diss_".format(experiment_name),
neighbor_radii=[14, 28],
smoothing_radii=[14, 21, 28],
neighbor_radius=42,
neighbor_sigma=1,
ml_grid_path=scratch_path + "hail_forecasts_grib2_ssef_cqg_masked_2015/",
coarse_neighbor_out_path= scratch_path + "ssef_coarse_neighbor_eval_2015/",
map_file = "/home/djgagne/hagelslag/mapfiles/ssef2015.map",
us_mask_file="/home/djgagne/hagelslag/mapfiles/ssef_2015_us_mask.nc",
coordinate_file="/sharp/djgagne/ssef_2015_grid.nc",
lon_bounds=[-106,-80],
lat_bounds=[28,48],
stride=14,
ensemble_path=scratch_path + "spring2015_nc/",
single_step=False,
)
| [
"djgagne@ou.edu"
] | djgagne@ou.edu |
12712fe4e23a5c73bf59f892cdc1ef0041cd1ab4 | 5410700e83210d003f1ffbdb75499062008df0d6 | /leetcode/isHappy.py | 92bdf82a57b5d864724396b17b24897d123370fd | [] | no_license | lilyandcy/python3 | 81182c35ab8b61fb86f67f7796e057936adf3ab7 | 11ef4ace7aa1f875491163d036935dd76d8b89e0 | refs/heads/master | 2021-06-14T18:41:42.089534 | 2019-10-22T00:24:30 | 2019-10-22T00:24:30 | 144,527,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | class Solution:
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
looplist = []
num = n
while num != 1:
if num not in looplist:
looplist.append(num)
else:
return False
num = self.sumLocation(num)
return True
def sumLocation(self, num):
strnum = str(num)
sumnum = 0
for i in range(len(strnum)):
sumnum += int(strnum[i]) ** 2
return sumnum | [
"myyan_yan@msn.com"
] | myyan_yan@msn.com |
7d23c3b8aa5f990dec58d8de72256fe7d697d9fd | a302afe51dd821cbbf6bfa1ec1184cb7d5a61ab5 | /solutions/task_13/solution_13_1.py | 66947a84d36b24b2beee2e0a4828db7b00a962cd | [] | no_license | ivanveriga/project-euler | 0234552bdb16d2a5db7a446003a3d60dfd3b50ac | 447b078a2cf23efae2acc84051560a5844ab9f73 | refs/heads/master | 2023-04-10T10:18:49.025281 | 2021-04-27T18:40:16 | 2021-04-27T18:40:16 | 282,042,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,266 | py | #Work out the first ten digits of the sum of the following one-hundred 50-digit numbers.
numbers="""37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690"""
print(str(sum([int(i) for i in numbers.split('\n')]))[0:10]) | [
"56963098+ivanveriga@users.noreply.github.com"
] | 56963098+ivanveriga@users.noreply.github.com |
a0602524e8bd8ee7ffd9da50880916d0a4c0a3da | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/timers/spf/state/__init__.py | baee691cec8aaf70f22e9f69662878b17b753e95 | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 31,796 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/spf/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines state information for ISIS SPF timers.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__spf_hold_interval','__spf_first_interval','__spf_second_interval','__adaptive_timer',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'timers', u'spf', u'state']
def _get_spf_hold_interval(self):
"""
Getter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
YANG Description: SPF Hold Down time interval in milliseconds.
"""
return self.__spf_hold_interval
def _set_spf_hold_interval(self, v, load=False):
"""
Setter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_hold_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_hold_interval() directly.
YANG Description: SPF Hold Down time interval in milliseconds.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_hold_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_hold_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_hold_interval(self):
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_first_interval(self):
"""
Getter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
return self.__spf_first_interval
def _set_spf_first_interval(self, v, load=False):
"""
Setter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_first_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_first_interval() directly.
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_first_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_first_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_first_interval(self):
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_second_interval(self):
"""
Getter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
return self.__spf_second_interval
def _set_spf_second_interval(self, v, load=False):
"""
Setter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_second_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_second_interval() directly.
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_second_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_second_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_second_interval(self):
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_adaptive_timer(self):
"""
Getter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
return self.__adaptive_timer
def _set_adaptive_timer(self, v, load=False):
"""
Setter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adaptive_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adaptive_timer() directly.
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adaptive_timer must be of a type compatible with oc-isis-types:adaptive-timer-type""",
'defined-type': "oc-isis-types:adaptive-timer-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)""",
})
self.__adaptive_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_adaptive_timer(self):
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
spf_hold_interval = __builtin__.property(_get_spf_hold_interval)
spf_first_interval = __builtin__.property(_get_spf_first_interval)
spf_second_interval = __builtin__.property(_get_spf_second_interval)
adaptive_timer = __builtin__.property(_get_adaptive_timer)
_pyangbind_elements = {'spf_hold_interval': spf_hold_interval, 'spf_first_interval': spf_first_interval, 'spf_second_interval': spf_second_interval, 'adaptive_timer': adaptive_timer, }
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/spf/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines state information for ISIS SPF timers.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__spf_hold_interval','__spf_first_interval','__spf_second_interval','__adaptive_timer',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'timers', u'spf', u'state']
def _get_spf_hold_interval(self):
"""
Getter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
YANG Description: SPF Hold Down time interval in milliseconds.
"""
return self.__spf_hold_interval
def _set_spf_hold_interval(self, v, load=False):
"""
Setter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_hold_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_hold_interval() directly.
YANG Description: SPF Hold Down time interval in milliseconds.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_hold_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_hold_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_hold_interval(self):
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_first_interval(self):
"""
Getter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
return self.__spf_first_interval
def _set_spf_first_interval(self, v, load=False):
"""
Setter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_first_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_first_interval() directly.
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_first_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_first_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_first_interval(self):
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_second_interval(self):
"""
Getter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
return self.__spf_second_interval
def _set_spf_second_interval(self, v, load=False):
"""
Setter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_second_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_second_interval() directly.
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_second_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_second_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_second_interval(self):
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_adaptive_timer(self):
"""
Getter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
return self.__adaptive_timer
def _set_adaptive_timer(self, v, load=False):
"""
Setter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adaptive_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adaptive_timer() directly.
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adaptive_timer must be of a type compatible with oc-isis-types:adaptive-timer-type""",
'defined-type': "oc-isis-types:adaptive-timer-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)""",
})
self.__adaptive_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_adaptive_timer(self):
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
spf_hold_interval = __builtin__.property(_get_spf_hold_interval)
spf_first_interval = __builtin__.property(_get_spf_first_interval)
spf_second_interval = __builtin__.property(_get_spf_second_interval)
adaptive_timer = __builtin__.property(_get_adaptive_timer)
_pyangbind_elements = {'spf_hold_interval': spf_hold_interval, 'spf_first_interval': spf_first_interval, 'spf_second_interval': spf_second_interval, 'adaptive_timer': adaptive_timer, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
994523ad13eaf886d1e9b898c2b4e1e3021ae3a6 | fac37d77a8d00e3d13106bcd728d51a455dd16f2 | /kmer.py | 2c016a97eb7bf7903ce31d36c4622ef1926e080c | [] | no_license | anu-bioinfo/rosalind-4 | c6a628bba94f647cf4a34bdf505f1527af4346a9 | 3ddc659d44298f4dd4b5dde66d7833b4d27a2580 | refs/heads/master | 2020-03-25T13:47:39.521215 | 2014-09-14T02:30:54 | 2014-09-14T02:30:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/env python
from __future__ import print_function
import os
from revp import read_fasta
from subs import substring_find
from lexf import lexf_order
def kmer_composition(dna_string):
output = []
for p in lexf_order(4, 'ACGT'):
pos = list(substring_find(dna_string, ''.join(p)))
output.append(str(len(pos)))
return output
if __name__ == "__main__":
with open(os.path.join('data', 'rosalind_kmer.txt')) as dataset:
seqs = read_fasta(dataset)
dna_string = seqs.popitem(last=False)[1]
print(*kmer_composition(dna_string))
| [
"luiz.irber@gmail.com"
] | luiz.irber@gmail.com |
1eb60c77791b51695ae3487f641618a22c7c6645 | d8c331c8081781cd287b0635d02cec51f57e38dc | /setup.py | 3d1b43c2810bc7a37ae20a8f34f254d206193b7e | [
"Apache-2.0"
] | permissive | vitthalpadwal/mypackage | 375d6e2a9a15935ec542dc39f68e530b16fa2cf3 | 5ba3ea8ea094b3a08aaa2137708b06f5429eecbd | refs/heads/master | 2020-03-16T17:31:09.917486 | 2018-05-10T03:21:41 | 2018-05-10T03:21:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | #!/usr/bin/env python
from setuptools import setup
setup(
setup_requires=['pbr>=1.9', 'setuptools>=0.9.8'],
pbr=True,
)
| [
"vitthalpadwal89@gmail.com"
] | vitthalpadwal89@gmail.com |
3a197f1c510e52760e103125918c88a6d3f183dc | 248316612964863433ba8f3b39749acba2ee50eb | /Scripts/rm_outliers.py | 44456376663e63b453d17a5f305e745f9b34797d | [] | no_license | emdezla/BossomHigss | 5fa6c84f42e45a6e3c8c9d7e7d31bfaacf7c6112 | 08a99d4602b919cd0010fad02a70bd5203103b6b | refs/heads/master | 2022-10-01T20:44:22.865622 | 2020-06-09T10:52:37 | 2020-06-09T10:52:37 | 214,812,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # -*- coding: utf-8 -*-
import numpy as np
def rm_outliers(x):
jet0, jet1, jet2, jet3 = data_processing(x)
jets = np.array([jet0,jet1,jet2,jet3])
for jet in jets:
means = np.mean(jet,axis=0)
stand_dev = np.std(jet,axis=0)
| [
"tylerbenkley@Tylers-MacBook-Air.local"
] | tylerbenkley@Tylers-MacBook-Air.local |
3f532246345c6898340e9b5f2125626a978ca0cf | fed6c6bdb6276d195bc565e527c3f19369d22b74 | /galaxy-galaxy lensing/prepare_cata/Fourier_Quad_cata/gather_raw_cata.py | 4e38e9d277633610cb84172ab6665238c0c69d4e | [] | no_license | hekunlie/astrophy-research | edbe12d8dde83e0896e982f08b463fdcd3279bab | 7b2b7ada7e7421585e8993192f6111282c9cbb38 | refs/heads/master | 2021-11-15T05:08:51.271669 | 2021-11-13T08:53:33 | 2021-11-13T08:53:33 | 85,927,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | import matplotlib
matplotlib.use("Agg")
import os
my_home = os.popen("echo $MYWORK_DIR").readlines()[0][:-1]
from sys import path
path.append('%s/work/mylib/'%my_home)
import tool_box
import h5py
from mpi4py import MPI
import numpy
import time
from subprocess import Popen
import warnings
warnings.filterwarnings('error')
# The new Fourier_Quad catalog differs from the old version!!!
# collect: collect the data from the files of each field. It creates the "fourier_cata.hdf5" in
# the parent directory of the one contain the field catalog.
# If the catalog file doesn't exist, run it firstly !!!.
# It will add the redshift parameters from CFHT catalog into the finial catalog.
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
cpus = comm.Get_size()
data_path = "/mnt/perc/hklee/CFHT/catalog/fourier_cata_new/"
raw_cata_path = data_path + "raw_cata_new/"
dicts, fields = tool_box.field_dict(data_path + "nname.dat")
my_field = tool_box.allot(fields, cpus)[rank]
chip_num = 36
for field_nm in my_field:
field_path = raw_cata_path + "%s/"%field_nm
files = os.listdir(field_path)
chip_exps = []
for nm in files:
if ".dat" in nm:
exp_nm = nm.split("p")[0]
if exp_nm not in chip_exps:
chip_exps.append(exp_nm)
chip_exps.sort()
file_count = 0
for exp_nm in chip_exps:
for i in range(1,chip_num+1):
chip_nm = "%sp_%d_shear.dat"%(exp_nm, i)
chip_path = field_path + chip_nm
if os.path.exists(chip_path):
try:
temp = numpy.loadtxt(chip_path, skiprows=1)
if file_count == 0:
data = temp
else:
data = numpy.row_stack((data, temp))
file_count += 1
except:
file_size = os.path.getsize(chip_path)/1024.
print("Empty: %s (%.3f KB)"%(chip_nm, file_size))
else:
print("Can't find %d"%chip_nm)
if file_count > 0:
final_path = data_path + "%s/%s_shear_raw.cat"%(field_nm, field_nm)
numpy.savetxt(final_path, data)
h5f = h5py.File(final_path,"w")
h5f["/data"] = data
h5f.close()
| [
"hekun_lee@sjtu.edu.cn"
] | hekun_lee@sjtu.edu.cn |
982a99c492b0b5caf7805c3ab382472f7d731a0d | 855d64110052ab060333d5d2e8f1ada7c36579e7 | /Graphing/vol_cor/fileread.py | 8124e3d2cc2c1c0e1638c8aadf09a8ee20419f76 | [] | no_license | mwood95/stockPIcker | de40e77008f33e18c8c68a3ce19c42a149d05e5d | cd9b3e4248b5575ad0a4c241aec0d8d438336062 | refs/heads/master | 2022-02-25T11:27:03.001193 | 2022-02-13T21:49:40 | 2022-02-13T21:49:40 | 95,626,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | f = open("data.txt")
line = f.read()
line = str(line)
line = line.split(':')
print line
f.close()
| [
"mikewood95@gmail.com"
] | mikewood95@gmail.com |
038be3106c05dcfa1cf28d115152639a38956939 | aa2645c96047d775061e0443299c64fc5b255027 | /0405 if1.py | a77e5980ffceb18e44a2854875622938e9a1089f | [] | no_license | sunnyhyo/Problem-Solving-and-SW-programming | ca63b705b27ebb49d32a0a6591211250f213d019 | 8689b9728c028a870dfba7a4d16601a248c7e792 | refs/heads/master | 2021-03-30T21:07:27.276272 | 2018-06-14T15:27:22 | 2018-06-14T15:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py |
#조건문 1/4
score=input("점수입력")
score=int(score)
if score > 90:
print("합격!!!")
print("장학금도 받을 수 있음")
| [
"sunnyhyo77@gmail.com"
] | sunnyhyo77@gmail.com |
3ee2e5b8231c35ed791296508519b38ff68c5c91 | 063775cfd2642614feb1d340a19740d3be3b4239 | /rich/console.py | 774f1bcf16f2c27f2bd2a00e20e9bc16f4c9ddd5 | [
"MIT"
] | permissive | adamchainz/rich | 7e0a328a6a5d0673255aa7f364d22e802a51b3e3 | 7b00f0ecb15a4698931d49922a665a6f02782e29 | refs/heads/master | 2023-08-18T13:40:07.405137 | 2020-01-26T17:24:55 | 2020-01-26T17:24:55 | 236,697,550 | 0 | 0 | MIT | 2020-01-28T09:18:29 | 2020-01-28T09:18:28 | null | UTF-8 | Python | false | false | 32,567 | py | from collections import ChainMap
from collections.abc import Mapping, Sequence
from contextlib import contextmanager
from dataclasses import dataclass, replace
from enum import Enum
import inspect
from itertools import chain
import os
from operator import itemgetter
import re
import shutil
import sys
from typing import (
Any,
Callable,
Dict,
IO,
Iterable,
List,
Optional,
NamedTuple,
overload,
Tuple,
TYPE_CHECKING,
Union,
)
from typing_extensions import Protocol, runtime_checkable, Literal
from ._emoji_replace import _emoji_replace
from . import markup
from .render_width import RenderWidth
from ._log_render import LogRender
from .default_styles import DEFAULT_STYLES
from . import errors
from .color import ColorSystem
from .highlighter import NullHighlighter, ReprHighlighter
from .pretty import Pretty
from .style import Style
from .tabulate import tabulate_mapping
from . import highlighter
from . import themes
from .pretty import Pretty
from .theme import Theme
from .segment import Segment
if TYPE_CHECKING: # pragma: no cover
from .text import Text
HighlighterType = Callable[[Union[str, "Text"]], "Text"]
JustifyValues = Optional[Literal["left", "center", "right", "full"]]
CONSOLE_HTML_FORMAT = """\
<!DOCTYPE html>
<head>
<style>
{stylesheet}
body {{
color: {foreground};
background-color: {background};
}}
</style>
</head>
<html>
<body>
<code>
<pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
</code>
</body>
</html>
"""
@dataclass
class ConsoleOptions:
"""Options for __console__ method."""
min_width: int
max_width: int
is_terminal: bool
encoding: str
justify: Optional[JustifyValues] = None
def update(
self,
width: int = None,
min_width: int = None,
max_width: int = None,
justify: JustifyValues = None,
):
"""Update values, return a copy."""
options = replace(self)
if width is not None:
options.min_width = options.max_width = width
if min_width is not None:
options.min_width = min_width
if max_width is not None:
options.max_width = max_width
if justify is not None:
options.justify = justify
return options
@runtime_checkable
class ConsoleRenderable(Protocol):
"""An object that supports the console protocol."""
def __console__(
self, console: "Console", options: "ConsoleOptions"
) -> Iterable[Union["ConsoleRenderable", Segment]]: # pragma: no cover
...
RenderableType = Union[ConsoleRenderable, Segment, str]
RenderResult = Iterable[Union[ConsoleRenderable, Segment]]
_null_highlighter = NullHighlighter()
class ConsoleDimensions(NamedTuple):
"""Size of the terminal."""
width: int
height: int
class StyleContext:
"""A context manager to manage a style."""
def __init__(self, console: "Console", style: Optional[Style]):
self.console = console
self.style = style
def __enter__(self) -> "Console":
if self.style is not None:
self.console.push_style(self.style)
self.console._enter_buffer()
return self.console
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.console._exit_buffer()
if self.style is not None:
self.console.pop_style()
COLOR_SYSTEMS = {
"standard": ColorSystem.STANDARD,
"256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR,
}
_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
class Console:
"""A high level console interface.
Args:
color_system (str, optional): The color system supported by your terminal,
either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
styles (Dict[str, Style], optional): An optional mapping of style name strings to :class:`~rich.style.Style` objects.
file (IO, optional): A file object where the console should write to. Defaults to stdoutput.
width (int, optional): The width of the terminal. Leave as default to auto-detect width.
height (int, optional): The height of the terminal. Leave as default to auto-detect height.
record (bool, optional): Boolean to enable recording of terminal output,
required to call :meth:`export_html` and :meth:`export_text`. Defaults to False.
markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
log_time_format (str, optional): Log time format if ``log_time`` is enabled. Defaults to "[%X] ".
highlighter(HighlighterType, optional): Default highlighter.
"""
def __init__(
self,
color_system: Optional[
Literal["auto", "standard", "256", "truecolor"]
] = "auto",
styles: Dict[str, Style] = None,
file: IO = None,
width: int = None,
height: int = None,
record: bool = False,
markup: bool = True,
log_time: bool = True,
log_path: bool = True,
log_time_format: str = "[%X] ",
highlighter: Optional["HighlighterType"] = ReprHighlighter(),
):
self._styles = ChainMap(DEFAULT_STYLES if styles is None else styles)
self.file = file or sys.stdout
self._width = width
self._height = height
self.record = record
self._markup = markup
if color_system is None:
self._color_system = None
elif color_system == "auto":
self._color_system = self._detect_color_system()
else:
self._color_system = COLOR_SYSTEMS[color_system]
self.buffer: List[Segment] = []
self._buffer_index = 0
self._record_buffer: List[Segment] = []
default_style = Style()
self.style_stack: List[Style] = [default_style]
self.current_style = default_style
self._log_render = LogRender(
show_time=log_time, show_path=log_path, time_format=log_time_format
)
self.highlighter: HighlighterType = highlighter or _null_highlighter
def __repr__(self) -> str:
return f"<console width={self.width} {str(self._color_system)}>"
def _detect_color_system(self,) -> Optional[ColorSystem]:
"""Detect color system from env vars."""
if not self.is_terminal:
return None
if os.environ.get("COLORTERM", "").strip().lower() == "truecolor":
return ColorSystem.TRUECOLOR
# 256 can be considered standard nowadays
return ColorSystem.EIGHT_BIT
def _enter_buffer(self) -> None:
"""Enter in to a buffer context, and buffer all output."""
self._buffer_index += 1
def _exit_buffer(self) -> None:
"""Leave buffer context, and render content if required."""
self._buffer_index -= 1
self._check_buffer()
def __enter__(self) -> "Console":
"""Own context manager to enter buffer context."""
self._enter_buffer()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""Exit buffer context."""
self._exit_buffer()
def push_styles(self, styles: Dict[str, Style]) -> None:
"""Merge set of styles with currently active styles.
Args:
styles (Dict[str, Style]): A mapping of style name to Style instance.
"""
self._styles.maps.append(styles)
@property
def color_system(self) -> Optional[str]:
"""Get color system string.
Returns:
Optional[str]: "standard", "256" or "truecolor".
"""
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
else:
return None
@property
def encoding(self) -> str:
"""Get the encoding of the console file, e.g. ``"utf-8"``.
Returns:
str: A standard encoding string.
"""
return getattr(self.file, "encoding", "utf-8")
@property
def is_terminal(self) -> bool:
"""Check if the console is writing to a terminal.
Returns:
bool: True if the console writting to a device capable of
understanding terminal codes, otherwise False.
"""
isatty = getattr(self.file, "isatty", None)
return False if isatty is None else isatty()
@property
def options(self) -> ConsoleOptions:
"""Get default console options."""
return ConsoleOptions(
min_width=1,
max_width=self.width,
encoding=self.encoding,
is_terminal=self.is_terminal,
)
@property
def size(self) -> ConsoleDimensions:
"""Get the size of the console.
Returns:
ConsoleDimensions: A named tuple containing the dimensions.
"""
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width, self._height)
width, height = shutil.get_terminal_size()
return ConsoleDimensions(
width if self._width is None else self._width,
height if self._height is None else self._height,
)
@property
def width(self) -> int:
"""Get the width of the console.
Returns:
int: The width (in characters) of the console.
"""
width, _ = self.size
return width
def line(self, count: int = 1) -> None:
"""Write new line(s).
Args:
count (int, optional): Number of new lines. Defaults to 1.
"""
assert count >= 0, "count must be >= 0"
if count:
self.buffer.append(Segment("\n" * count))
self._check_buffer()
def _render(
self, renderable: RenderableType, options: Optional[ConsoleOptions]
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options objects. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
render_iterable: Iterable[RenderableType]
render_options = options or self.options
if isinstance(renderable, Segment):
yield renderable
return
elif isinstance(renderable, ConsoleRenderable):
render_iterable = renderable.__console__(self, render_options)
elif isinstance(renderable, str):
from .text import Text
yield from self._render(Text(renderable), render_options)
return
else:
raise errors.NotRenderableError(
f"Unable to render {renderable!r}; "
"A str, Segment or object with __console__ method is required"
)
for render_output in render_iterable:
if isinstance(render_output, Segment):
yield render_output
else:
yield from self.render(render_output, render_options)
def render(
self, renderable: RenderableType, options: Optional[ConsoleOptions]
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options objects. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
yield from Segment.apply_style(
self._render(renderable, options), self.current_style
)
def render_all(
self, renderables: Iterable[RenderableType], options: Optional[ConsoleOptions]
) -> Iterable[Segment]:
"""Render a number of console objects.
Args:
renderables (Iterable[RenderableType]): Console objects.
options (Optional[ConsoleOptions]): Options for render.
Returns:
Iterable[Segment]: Segments to be written to the console.
"""
render_options = options or self.options
for renderable in renderables:
yield from self.render(renderable, render_options)
def render_lines(
self,
renderable: RenderableType,
options: Optional[ConsoleOptions],
style: Optional[Style] = None,
) -> List[List[Segment]]:
"""Render objects in to a list of lines.
The output of render_lines is useful when further formatting of rendered console text
is required, such as the Panel class which draws a border around any renderable object.
Args:
renderables (Iterable[RenderableType]): Any object or objects renderable in the console.
options (Optional[ConsoleOptions]): Console options used to render with.
Returns:
List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
"""
render_options = options or self.options
with self.style(style or "none"):
_rendered = self.render(renderable, render_options)
lines = list(
Segment.split_and_crop_lines(
_rendered, render_options.max_width, style=style
)
)
return lines
def render_str(self, text: str) -> "Text":
"""Convert a string to a Text instance.
Args:
text (str): Text to render.
Returns:
ConsoleRenderable: Renderable object.
"""
if self._markup:
return markup.render(text)
return markup.render_text(text)
def _get_style(self, name: str) -> Optional[Style]:
"""Get a named style, or `None` if it doesn't exist.
Args:
name (str): The name of a style.
Returns:
Optional[Style]: A Style object for the given name, or `None`.
"""
return self._styles.get(name, None)
def get_style(
self, name: Union[str, Style], *, default: Union[Style, str] = None
) -> Style:
"""Get a style merged with the current style.
Args:
name (str): The name of a style or a style definition.
Returns:
Style: A Style object.
Raises:
MissingStyle: If no style could be parsed from name.
"""
if isinstance(name, Style):
return name
try:
return self._styles.get(name) or Style.parse(name)
except errors.StyleSyntaxError as error:
if default is not None:
return self.get_style(default)
if " " in name:
raise
raise errors.MissingStyle(f"No style named {name!r}; {error}")
def push_style(self, style: Union[str, Style]) -> None:
"""Push a style on to the stack.
The new style will be applied to all `write` calls, until
`pop_style` is called.
Args:
style (Union[str, Style]): New style to merge with current style.
Returns:
None: [description]
"""
if isinstance(style, str):
style = self.get_style(style)
self.current_style = self.current_style + style
self.style_stack.append(self.current_style)
def pop_style(self) -> Style:
"""Pop a style from the stack.
This will revert to the style applied prior to the corresponding `push_style`.
Returns:
Style: The previously applied style.
"""
if len(self.style_stack) == 1:
raise errors.StyleStackError(
"Can't pop the default style (check there is `push_style` for every `pop_style`)"
)
style = self.style_stack.pop()
self.current_style = self.style_stack[-1]
return style
def style(self, style: Optional[Union[str, Style]]) -> StyleContext:
"""A context manager to apply a new style.
Example:
with context.style("bold red"):
context.print("Danger Will Robinson!")
Args:
style (Union[str, Style]): New style to apply.
Returns:
StyleContext: A style context manager.
"""
if style is None:
return StyleContext(self, None)
if isinstance(style, str):
_style = self.get_style(style)
else:
if not isinstance(style, Style):
raise TypeError(f"style must be a str or Style instance, not {style!r}")
_style = style
return StyleContext(self, _style)
def _collect_renderables(
self,
objects: Iterable[Any],
sep: str,
end: str,
emoji=True,
highlight: bool = True,
) -> List[ConsoleRenderable]:
"""Combined a number of renderables and text in to one renderable.
Args:
renderables (Iterable[Union[str, ConsoleRenderable]]): [description]
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
emoji (bool): If True, emoji codes will be replaced, otherwise emoji codes will be left in.
highlight (bool, optional): Perform highlighting. Defaults to True.
Returns:
List[ConsoleRenderable]: A list of things to render.
"""
from .text import Text
sep_text = Text(sep)
end_text = Text(end)
renderables: List[ConsoleRenderable] = []
append = renderables.append
text: List[Text] = []
append_text = text.append
_highlighter: HighlighterType
if highlight:
_highlighter = self.highlighter
else:
_highlighter = _null_highlighter
def check_text() -> None:
if text:
if end:
append_text(end_text)
append(sep_text.join(text))
del text[:]
for renderable in objects:
if isinstance(renderable, ConsoleRenderable):
check_text()
append(renderable)
continue
console_str_callable = getattr(renderable, "__console_str__", None)
if console_str_callable is not None:
append_text(console_str_callable())
continue
if isinstance(renderable, str):
render_str = renderable
if emoji:
render_str = _emoji_replace(render_str)
render_text = self.render_str(render_str)
append_text(_highlighter(render_text))
elif isinstance(renderable, Text):
append_text(renderable)
elif isinstance(renderable, (int, float, bool, bytes, type(None))):
append_text(_highlighter(repr(renderable)))
elif isinstance(renderable, (Mapping, Sequence)):
check_text()
append(Pretty(renderable, highlighter=_highlighter))
else:
append_text(_highlighter(repr(renderable)))
check_text()
return renderables
def rule(self, title: str = "", character: str = "─") -> None:
"""Draw a line with optional centered title.
Args:
title (str, optional): Text to render over the rule. Defaults to "".
character (str, optional): Character to form the line. Defaults to "─".
"""
from .text import Text
width = self.width
if not title:
self.print(Text(character * width, "rule.line"))
else:
title_text = Text.from_markup(title, "rule.text")
if len(title_text) > width - 4:
title_text.set_length(width - 4)
rule_text = Text()
center = (width - len(title_text)) // 2
rule_text.append(character * (center - 1) + " ", "rule.line")
rule_text.append(title_text)
rule_text.append(
" " + character * (width - len(rule_text) - 1), "rule.line"
)
self.print(rule_text)
def print(
self,
*objects: Any,
sep=" ",
end="\n",
style: Union[str, Style] = None,
emoji=True,
highlight: bool = True,
) -> None:
r"""Print to the console.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
emoji (bool): If True, emoji codes will be replaced, otherwise emoji codes will be left in.
highlight (bool, optional): Perform highlighting. Defaults to True.
"""
if not objects:
self.line()
return
renderables = self._collect_renderables(
objects, sep=sep, end=end, emoji=emoji, highlight=highlight,
)
render_options = self.options
extend = self.buffer.extend
render = self.render
with self.style(style):
for renderable in renderables:
extend(render(renderable, render_options))
def log(
self,
*objects: Any,
sep=" ",
end="\n",
highlight: bool = True,
log_locals: bool = False,
_stack_offset=1,
) -> None:
r"""Log rich content to the terminal.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
highlight (bool, optional): Perform highlighting. Defaults to True.
log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
was called. Defaults to False.
_stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
"""
if not objects:
self.line()
return
renderables = self._collect_renderables(
objects, sep=sep, end=end, highlight=highlight
)
caller = inspect.stack()[_stack_offset]
path = caller.filename.rpartition(os.sep)[-1]
line_no = caller.lineno
if log_locals:
locals_map = {
key: value
for key, value in caller.frame.f_locals.items()
if not key.startswith("__")
}
renderables.append(tabulate_mapping(locals_map, title="Locals"))
with self:
self.buffer.extend(
self.render(
self._log_render(self, renderables, path=path, line_no=line_no),
self.options,
)
)
def _check_buffer(self) -> None:
"""Check if the buffer may be rendered."""
if self._buffer_index == 0:
text = self._render_buffer()
self.file.write(text)
def _render_buffer(self) -> str:
"""Render buffered output, and clear buffer."""
output: List[str] = []
append = output.append
color_system = self._color_system
buffer = self.buffer[:]
if self.record:
self._record_buffer.extend(buffer)
del self.buffer[:]
for line in Segment.split_and_crop_lines(buffer, self.width):
for text, style in line:
if style:
append(style.render(text, color_system=color_system, reset=True))
else:
append(text)
append("\n")
rendered = "".join(output)
return rendered
def export_text(self, clear: bool = True, styles: bool = False) -> str:
"""Generate text from console contents (requires record=True argument in constructor).
Args:
clear (bool, optional): Set to ``True`` to clear the record buffer after exporting.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
Returns:
str: String containing console contents.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
if styles:
text = "".join(
(style.render(text, reset=True) if style else text)
for text, style in self._record_buffer
)
else:
text = "".join(text for text, _ in self._record_buffer)
if clear:
del self._record_buffer[:]
return text
def save_text(self, path: str, clear: bool = True, styles: bool = False) -> None:
"""Generate text from console and save to a given location (requires record=True argument in constructor).
Args:
path (str): Path to write text files.
clear (bool, optional): Set to ``True`` to clear the record buffer after exporting.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
"""
text = self.export_text(clear=clear, styles=styles)
with open(path, "wt") as write_file:
write_file.write(text)
def export_html(
self,
theme: Theme = None,
clear: bool = True,
code_format: str = None,
inline_styles: bool = False,
) -> str:
"""Generate HTML from console contents (requires record=True argument in constructor).
Args:
theme (Theme, optional): Theme object containing console colors.
clear (bool, optional): Set to ``True`` to clear the record buffer after generating the HTML.
code_format (str, optional): Format string to render HTML, should contain {foreground}
{background} and {code}.
inline_styes (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
Returns:
str: String containing console contents as HTML.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
fragments: List[str] = []
append = fragments.append
_theme = theme or themes.DEFAULT
stylesheet = ""
def escape(text: str) -> str:
"""Escape html."""
return text.replace("&", "&").replace("<", "<").replace(">", ">")
render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
if inline_styles:
for text, style in Segment.simplify(self._record_buffer):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
append(f'<span style="{rule}">{text}</span>' if rule else text)
else:
append(text)
else:
styles: Dict[str, int] = {}
for text, style in Segment.simplify(self._record_buffer):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
if rule:
style_number = styles.setdefault(rule, len(styles) + 1)
append(f'<span class="r{style_number}">{text}</span>')
else:
append(text)
else:
append(text)
stylesheet_rules: List[str] = []
stylesheet_append = stylesheet_rules.append
for style_rule, style_number in styles.items():
if style_rule:
stylesheet_append(f".r{style_number} {{{style_rule}}}")
stylesheet = "\n".join(stylesheet_rules)
rendered_code = render_code_format.format(
code="".join(fragments),
stylesheet=stylesheet,
foreground=_theme.foreground_color.hex,
background=_theme.background_color.hex,
)
if clear:
del self._record_buffer[:]
return rendered_code
def save_html(
self,
path: str,
theme: Theme = None,
clear: bool = True,
code_format=CONSOLE_HTML_FORMAT,
inline_styles: bool = False,
) -> None:
"""Generate HTML from console contents and write to a file (requires record=True argument in constructor).
Args:
path (str): Path to write html file.
theme (Theme, optional): Theme object containing console colors.
clear (bool, optional): Set to True to clear the record buffer after generating the HTML.
code_format (str, optional): Format string to render HTML, should contain {foreground}
{background} and {code}.
inline_styes (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
"""
html = self.export_html(
theme=theme,
clear=clear,
code_format=code_format,
inline_styles=inline_styles,
)
with open(path, "wt") as write_file:
write_file.write(html)
if __name__ == "__main__": # pragma: no cover
console = Console()
with console.style("dim on black"):
console.print("[b]Hello[/b], [i]World[/i]!")
console.print("Hello, *World*!")
console.log(
"JSONRPC *request*",
5,
1.3,
True,
False,
None,
{
"jsonrpc": "2.0",
"method": "subtract",
"params": {"minuend": 42, "subtrahend": 23},
"id": 3,
},
)
console.log("# Hello, **World**!")
console.log("Hello, World!", "{'a': 1}", repr(console))
console.log(
{
"name": None,
"empty": [],
"quiz": {
"sport": {
"answered": True,
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriros",
"Huston Rocket",
],
"answer": "Huston Rocket",
},
},
"maths": {
"answered": False,
"q1": {
"question": "5 + 7 = ?",
"options": [10, 11, 12, 13],
"answer": 12,
},
"q2": {
"question": "12 - 8 = ?",
"options": [1, 2, 3, 4],
"answer": 4,
},
},
},
}
)
console.log("foo")
| [
"willmcgugan@gmail.com"
] | willmcgugan@gmail.com |
e651bd8c315d35488b06e41fba60525173633897 | 4ada73b1bcadc979fff2cde4fbc35916a4feae44 | /programs/bline.py | 3118994d6b2c8ec7320fa972245bc460005bd879 | [] | no_license | naveenkamalpv/computer-graphics-openGL | 1fa351b7543c8a358ac5487a2c6f7aa83264de90 | b26f8e3ce3670223de3aa16ef1ce0098d54c8fda | refs/heads/master | 2021-05-17T09:40:37.204884 | 2020-03-28T06:35:59 | 2020-03-28T06:35:59 | 250,728,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
def init():
glClearColor(0.0,0.0,0.0,0.0)
gluOrtho2D(-200.0,200.0,-200.0,200.0)
glPointSize(3.0)
def Sign(x):
if x<0:
return -1
elif x==0:
return 0
else:
return 1
def setPixel(xcoordinate,ycoordinate):
glBegin(GL_POINTS)
glVertex2i(xcoordinate,ycoordinate)
glEnd()
glFlush()
def lineBressenham(x1,y1,x2,y2):
x=x1
y=y1
dx=abs(x2-x1)
dy=abs(y2-y1)
s1=Sign(x2-x1)
s2=Sign(y2-y1)
Interchange=0
if dy>dx:
Temp=dx
dx=dy
dy=Temp
Interchange=1
else:
Interchange=0
e=(2*dy)-dx
for i in range(1,dx+1):
setPixel(x,y)
while e>0:
if Interchange==1:
x=x+s1
else:
y=y+s2
e=e-(2*dx)
if Interchange==1:
y=y+s2
else:
x=x+s1
e=e+(2*dy)
def intake():
global x1,y1,x2,y2
x1,y1,x2,y2=map(int,input("Coordinate:").split())
def disp():
glClear(GL_COLOR_BUFFER_BIT)
lineBressenham(x1, y1, x2, y2)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(500,500)
glutInitWindowPosition(50,50)
glutCreateWindow("bressenham line")
intake()
glutDisplayFunc(disp)
init()
glutMainLoop()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | naveenkamalpv.noreply@github.com |
58f9152fddf03e8d20d8a7ec91a18b12835419f5 | a97834abe4ad70082fd3bd8c71af18dae9513323 | /core/utils/PredictionTimeExcel.py | 8eb92cfe50950ded3352928ddb44de39504c73a9 | [] | no_license | WarGen4ik/qorca | af9bfe17940ee7f614a227dc7298162eb6e70bc1 | c1482f27d6f56c706c2a892e2a457e800439fdb1 | refs/heads/master | 2022-12-14T16:32:13.338299 | 2018-06-20T09:14:12 | 2018-06-20T09:14:12 | 138,007,580 | 0 | 0 | null | 2022-12-08T02:11:10 | 2018-06-20T09:07:37 | Python | UTF-8 | Python | false | false | 9,892 | py | import openpyxl
from openpyxl.styles import *
from django.conf import settings
from django.utils.translation import gettext as _
from auth_main.models import User, Profile
from competition.utils import time_to_str
from core.models import CompetitionUser, CompetitionTeam, TeamRelationToUser, Distance, UserDistance
class PredictionTimeExcel:
alf = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __init__(self, competition):
self.competition = competition
def create_excel(self, is_finished):
wb = openpyxl.Workbook()
ws = wb.active
alignment = Alignment(horizontal='center')
border = Border(
left=Side(border_style="thin", color='000000'),
right=Side(border_style="thin", color='000000'),
top=Side(border_style="thin", color='000000'),
bottom=Side(border_style="thin", color='000000'),
outline=Side(border_style="thin", color='000000')
)
for day in range(self.competition.count_days):
if day != 0:
ws = wb.create_sheet()
ws.title = _('Day ') + str(day+1)
alf_index = 1
index = 1
columns = [_('Member'), _('Age group'), _('Team'), _('City'), _('Time'), _('Track')]
distance_index = 1
swim_index = 1
for distance in Distance.objects.filter(competition=self.competition, day=day+1).all():
char = self.get_char(alf_index - 1)
next_char = self.get_char(alf_index + 4)
for x in range(4):
ws.column_dimensions[self.get_char(x)].width = 15
for gender in range(2, 0, -1):
if gender == 1:
gender_text = _('Males')
else:
gender_text = _('Females')
not_end = True
column_index = 1
distance_swim_index = 1
while not_end:
if is_finished:
users_distances = UserDistance.objects.filter(distance=distance, user__profile__gender=gender, is_finished=is_finished)\
.order_by('-pre_time', 'user__last_name', 'user__first_name')[(distance_swim_index - 1) * self.competition.track_count:distance_swim_index * self.competition.track_count]
else:
users_distances = UserDistance.objects.filter(distance=distance,
user__profile__gender=gender) \
.order_by('-pre_time', 'user__last_name', 'user__first_name')[(distance_swim_index - 1) * self.competition.track_count:distance_swim_index * self.competition.track_count]
if not users_distances:
break
if swim_index == 14:
swim_index = swim_index
ws.merge_cells('{}{}:{}{}'.format(char, index, next_char, index))
ws['{}{}'.format(char, index)].font = Font(size=14, bold=True)
ws['{}{}'.format(char, index)].alignment = alignment
ws['{}{}'.format(char, index)] = _('Swim ') + '№{} - {}'.format(swim_index, gender_text)
index += 1
ws.merge_cells('{}{}:{}{}'.format(char, index, next_char, index))
ws['{}{}'.format(char, index)].font = Font(size=12, bold=True)
ws['{}{}'.format(char, index)].alignment = alignment
ws['{}{}'.format(char, index)] = _('Distance ') + '№{}'.format(distance_index) + ' - ' + str(
distance.length) + ' ' + distance.get_type_display()
index += 2
ws.merge_cells('{}{}:{}{}'.format(char, index, next_char, index))
ws['{}{}'.format(char, index)].font = Font(size=12, bold=True)
ws['{}{}'.format(char, index)].alignment = alignment
# ws['{}{}'.format(char, index)].border = border
index += 1
ws['{}{}'.format(self.get_char(column_index - 1), index)] = columns[column_index-1]
ws['{}{}'.format(self.get_char(column_index - 1), index)].alignment = alignment
ws['{}{}'.format(self.get_char(column_index - 1), index)].border = border
ws['{}{}'.format(self.get_char(column_index), index)] = columns[column_index]
ws['{}{}'.format(self.get_char(column_index), index)].alignment = alignment
ws['{}{}'.format(self.get_char(column_index), index)].border = border
ws['{}{}'.format(self.get_char(column_index + 1), index)] = columns[column_index+1]
ws['{}{}'.format(self.get_char(column_index + 1), index)].alignment = alignment
ws['{}{}'.format(self.get_char(column_index + 1), index)].border = border
ws['{}{}'.format(self.get_char(column_index + 2), index)] = columns[column_index+2]
ws['{}{}'.format(self.get_char(column_index + 2), index)].alignment = alignment
ws['{}{}'.format(self.get_char(column_index + 2), index)].border = border
ws['{}{}'.format(self.get_char(column_index + 3), index)] = columns[column_index+3]
ws['{}{}'.format(self.get_char(column_index + 3), index)].alignment = alignment
ws['{}{}'.format(self.get_char(column_index + 3), index)].border = border
ws['{}{}'.format(self.get_char(column_index + 4), index)] = columns[column_index+4]
ws['{}{}'.format(self.get_char(column_index + 4), index)].alignment = alignment
ws['{}{}'.format(self.get_char(column_index + 4), index)].border = border
index += 1
track_index = 1
if len(users_distances) == 4:
tracks = [index, index+3, index+2, index+1]
elif len(users_distances) == 3:
tracks = [index, index+1, index+2, index+3]
else:
tracks = [index, index+1, index+2, index+3]
for user_distance in users_distances:
ws['{}{}'.format(self.get_char(column_index - 1), tracks[track_index-1])] = user_distance.user.full_name
ws['{}{}'.format(self.get_char(column_index - 1), tracks[track_index-1])].border = border
ws['{}{}'.format(self.get_char(column_index), tracks[track_index-1])] = '{}({})'.format(user_distance.user.profile.get_age_group(), user_distance.user.profile.get_age_group_numbers())
ws['{}{}'.format(self.get_char(column_index), tracks[track_index-1])].border = border
try:
team = TeamRelationToUser.objects.filter(user=user_distance.user).first().team
CompetitionTeam.objects.get(team=team, competition=self.competition, is_complete=True)
team = team.name
except:
team = user_distance.user.profile.default_team
ws['{}{}'.format(self.get_char(column_index + 1), tracks[track_index-1])] = team
ws['{}{}'.format(self.get_char(column_index + 1), tracks[track_index-1])].border = border
ws['{}{}'.format(self.get_char(column_index + 2), tracks[track_index-1])] = user_distance.user.profile.city
ws['{}{}'.format(self.get_char(column_index + 2), tracks[track_index-1])].border = border
ws['{}{}'.format(self.get_char(column_index + 3), tracks[track_index-1])] = time_to_str(user_distance.pre_time)
ws['{}{}'.format(self.get_char(column_index + 3), tracks[track_index-1])].border = border
ws['{}{}'.format(self.get_char(column_index + 4), index + track_index -1)] = track_index
ws['{}{}'.format(self.get_char(column_index + 4), index + track_index -1)].border = border
track_index += 1
index += 5
swim_index += 1
distance_swim_index += 1
distance_index += 1
path = settings.BASE_DIR + "/media/predictions/" + str(self.competition.id) + ".xlsx"
wb.save(path)
return path
def is_group_exists(self, group, members):
for member in members:
if member.profile.get_age_group() == group:
return True
return False
def get_char(self, index):
try:
return self.alf[index]
except:
index -= 26
return 'A' + self.alf[index]
def get_all_members(self):
competition_rel_user = CompetitionUser.objects.filter(competition=self.competition).all()
single_members = list()
for rel in competition_rel_user:
single_members.append(rel.user)
teams = CompetitionTeam.objects.filter(competition=self.competition).all()
team_members = list()
for team in teams:
rels = TeamRelationToUser.objects.filter(team=team.team).all()
for rel in rels:
team_members.append(rel.user)
return single_members + list(set(team_members) - set(single_members))
| [
"illko73@mail.ru"
] | illko73@mail.ru |
48213817a016caeacd108b0ee68f0d38b928e671 | 35bcf49f196154dc5259efd1fb145cda0d3de1a3 | /Data-Structures/Week3/merging_tables.py | d6c1e62088e51dc3870bdef0d203e34cbd78eabb | [] | no_license | andyc1997/Data-Structures-and-Algorithms | 0113fb105c1f0022b1550f8186f146babacc1867 | bb715a3e2e0bb1699bd94708b3fe3b3227c8632a | refs/heads/main | 2023-06-18T01:26:12.229427 | 2021-07-17T03:30:42 | 2021-07-17T03:30:42 | 332,850,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,919 | py | # python3
# Task. There are n tables stored in some database. The tables are numbered from 1 to n. All tables share
# the same set of columns. Each table contains either several rows with real data or a symbolic link to
# another table. Initially, all tables contain data, and i-th table has r[i] rows. You need to perform 𝑚 of
# the following operations:
# 1. Consider table number destination[i] Traverse the path of symbolic links to get to the data. That is,
# while destination[i] contains a symbolic link instead of real data do
# destination[i] <- symlink(destination[i])
# 2. Consider the table number source[i] and traverse the path of symbolic links from it in the same manner as for destination[i].
# 3. Now, destination[i] and source[i] are the numbers of two tables with real data. If destination[i] != source[i]
# copy all the rows from table source[i] to table destination[i] then clear the table source[i]
# and instead of real data put a symbolic link to destination[i] into it.
# 4. Print the maximum size among all n tables (recall that size is the number of rows in the table).
# If the table contains only a symbolic link, its size is considered to be 0.
# Input Format. The first line of the input contains two integers n and m — the number of tables in the database and the number of merge queries to perform, respectively.
# The second line of the input contains n integers r[i] — the number of rows in the i-th table.
# Then follow m lines describing merge queries. Each of them contains two integers destination[i] and
# source[i] — the numbers of the tables to merge.
# Output Format. For each query print a line containing a single integer — the maximum of the sizes of all
# tables (in terms of the number of rows) after the corresponding operation.
class Database:
def __init__(self, row_counts):
self.row_counts = row_counts # row_counts[i] stores the number of rows in the i-th table, 0 if it contains symbol link
self.max_row_count = max(row_counts) # initial value of the maximal number of rows among all tables
n_tables = len(row_counts) # Number of tables
self.ranks = [1] * n_tables # Rank of each disjoint set, starts with singleton for each table
self.parents = list(range(n_tables)) # Parent of each table after merging, starts with singleton (each table is parent of itself)
def compare_max(self, k): # k is the index of newly merged table, compare the number of rows of new table with the maximum before merging
if self.max_row_count < self.row_counts[k]:
self.max_row_count = self.row_counts[k]
def union(self, i, j): # We do not care if it's source or destination now under union by rank, merge shallow tree to larger tree using rank
if self.ranks[i] > self.ranks[j]:
self.parents[j] = i
self.row_counts[i] += self.row_counts[j] # No matter which is merged to which, it does not affect the number of rows in total, and one of them must be 0
self.row_counts[j] = 0
self.compare_max(i) # Compare the number of rows of merged disjoint set with previous record
else:
self.parents[i] = j
self.row_counts[j] += self.row_counts[i] # No matter which is merged to which, it does not affect the number of rows in total, and one of them must be 0
self.row_counts[i] = 0
self.compare_max(j) # Compare the number of rows of merged disjoint set with the maximum to check if merging operations leads to a new table with more rows than before
if self.ranks[i] == self.ranks[j]: # In case of merging two trees of the same height, increase the rank by 1
self.ranks[j] += 1
def merge(self, src, dst):
# https://en.wikipedia.org/wiki/Disjoint-set_data_structure#Operations
src_parent = self.get_parent(src) # Parent of source table
dst_parent = self.get_parent(dst) # PArent of destination table
if src_parent == dst_parent: # If they share the same parent, no merging operations as they are already in the same disjoint set
return False
self.union(src_parent, dst_parent) # Otherwise, merge them using union by rank
return True
def get_parent(self, table): # Apply a path compression algorithm here to shorten the tree height between children nodes and their parents
if table != self.parents[table]:
self.parents[table] = self.get_parent(self.parents[table])
return self.parents[table]
def main():
n_tables, n_queries = map(int, input().split())
counts = list(map(int, input().split()))
assert len(counts) == n_tables
db = Database(counts)
for i in range(n_queries):
dst, src = map(int, input().split())
db.merge(src - 1, dst - 1)
print(db.max_row_count)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | andyc1997.noreply@github.com |
9f5efcb0f9d51a2430adf33f838b10e9ea55edbc | fbeeffbc312adc489a60804a891474a6ff660052 | /castle.py | a244ed106b62d07a0ff1c0fb839b040827e78bfd | [] | no_license | harras/whitecliff | bdcd4ccf71a2202f6635af0db63e99b10531a582 | f06a7673ca599a351baa70897f9cfa173f25479f | refs/heads/master | 2021-01-22T06:06:38.281598 | 2017-10-20T14:31:43 | 2017-10-20T14:31:43 | 102,284,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,786 | py | import room
import npc
import random
castle ={}
rooms = []
npcs = {}
npcs_ = {}
monsters = {}
monsters_ = {}
# Initializing all the rooms
# Floor 1
rooms.append(room.Room("Front Porch", 1)) # 0
rooms.append(room.Room("Forier", 10)) # 1
rooms.append(room.Room("Closet", 1)) # 2
rooms.append(room.Room("Den", 2)) # 3
rooms.append(room.Room("Patio", 1)) # 4
rooms.append(room.Room("Hallway 1", 5)) # 5
rooms.append(room.Room("Bathroom 1", 1)) # 6
rooms.append(room.Room("Kitchen", 1)) # 7
rooms.append(room.Room("Dining Room", 3)) # 8
rooms.append(room.Room("Green House", 3)) # 9
rooms.append(room.Room("Garden Path", 1)) # 10
#Floor 2
rooms.append(room.Room("Guest Bed 1", 2)) # 11
rooms.append(room.Room("Guest Bed 2", 2)) # 12
rooms.append(room.Room("Tower Study", 1)) # 13
rooms.append(room.Room("Library", 2)) # 14
rooms.append(room.Room("Staircase", 4)) # 15
#Floor 3
rooms.append(room.Room("Hallway 2", 5)) # 16
rooms.append(room.Room("Attic", 1)) # 17
rooms.append(room.Room("Training Room", 1)) # 18
rooms.append(room.Room("Bathroom 2", 1)) # 19
rooms.append(room.Room("Ilta's Room", 2)) # 20
rooms.append(room.Room("Ilta's Balcony", 1))# 21
#Roof
rooms.append(room.Room("Roof", 1)) # 22
#Basement
rooms.append(room.Room("Toto's Chamber", 2))# 23
rooms.append(room.Room("Basement Hall", 8)) # 24
rooms.append(room.Room("Cave", 1)) # 25
rooms.append(room.Room("Cell 1", 1)) # 26
rooms.append(room.Room("Cell 2", 1)) # 27
rooms.append(room.Room("Cell 3", 1)) # 28
rooms.append(room.Room("Cell 4", 1)) # 29
rooms.append(room.Room("Cell 5", 1)) # 30
rooms.append(room.Room("Cell 6", 1)) # 31
rooms.append(room.Room("Mansion Core", 1)) # 32
# Creates defualt castle layout
# Painstaking... should've been a dict
# Human readability is important, dammit!
# This all looks like nonsense
def init_castle():
rooms[0].add_adj(rooms[1])
rooms[1].add_adj(rooms[0], rooms[5], rooms[8], rooms[2], rooms[3], rooms[13], rooms[14], rooms[15], rooms[11], rooms[12])
rooms[2].add_adj(rooms[1])
rooms[3].add_adj(rooms[1], rooms[4])
rooms[4].add_adj(rooms[3])
rooms[5].add_adj(rooms[6], rooms[7], rooms[9], rooms[8], rooms[1])
rooms[6].add_adj(rooms[5])
rooms[7].add_adj(rooms[5])
rooms[8].add_adj(rooms[5], rooms[9], rooms[1])
rooms[9].add_adj(rooms[10], rooms[8], rooms[5])
rooms[10].add_adj(rooms[9])
rooms[11].add_adj(rooms[1], rooms[12])
rooms[12].add_adj(rooms[1], rooms[13])
rooms[13].add_adj(rooms[1])
rooms[14].add_adj(rooms[15], rooms[1])
rooms[15].add_adj(rooms[14], rooms[1], rooms [16], rooms[22])
rooms[16].add_adj(rooms[17], rooms[15], rooms[18], rooms[19], rooms[20])
rooms[17].add_adj(rooms[16])
rooms[18].add_adj(rooms[16])
rooms[19].add_adj(rooms[16])
rooms[20].add_adj(rooms[16], rooms[21])
rooms[21].add_adj(rooms[20])
rooms[22].add_adj(rooms[15])
rooms[23].add_adj(rooms[25], rooms[24])
rooms[24].add_adj(rooms[31], rooms[30], rooms[29], rooms[23], rooms[26], rooms[27], rooms[28], rooms[32])
rooms[25].add_adj(rooms[23])
rooms[26].add_adj(rooms[24])
rooms[27].add_adj(rooms[24])
rooms[28].add_adj(rooms[24])
rooms[29].add_adj(rooms[24])
rooms[30].add_adj(rooms[24])
rooms[31].add_adj(rooms[24])
rooms[32].add_adj(rooms[24])
# NPCs and Monsters
# Consider dropping the name attribute...
def init_npcs():
npcs['witch1'] = npc.NPC("Witch 1", rooms[17], False)
npcs['witch2'] = npc.NPC("Witch 2", rooms[3])
npcs['shadow1'] = npc.NPC("Shadow 1", rooms[3])
npcs['shadow2'] = npc.NPC("Shadow 2", rooms[3])
npcs['shadow3'] = npc.NPC("Shadow 3", rooms[3])
monsters['beast'] = npc.NPC("Displacer Beast", rooms[26], False, 4)
monsters['cube'] = npc.NPC("Gelatinous Cube", rooms[27], False, 7)
monsters['ithilid'] = npc.NPC("Ithilid", rooms[28], False, 2) # Int rolls?
monsters['rust'] = npc.NPC("Rust Monster", rooms[29], False, 8) # Eats weapons
monsters['grue'] = npc.NPC("Grue", rooms[30], False, 3) # Casts magical darkness, never seen
monsters['xorn'] = npc.NPC("Xorn", rooms[31], False, 4) # Eats magic items
# Functions related to saving. Restructures rooms into a dict, no pointers
# Creates dict
def write_castle():
for i in rooms:
castle[i.name] = []
for j in range(i.door_num):
castle[i.name].append(i.adj[j].name)
# Reads dict
def read_castle():
for i in rooms:
i.clear()
parent = None
for i in castle:
parent = find_room(i)
for j in castle[i]:
child = find_room(j)
parent.adj.append(child)
# Garbage code
def write_npcs():
for i in npcs:
npcs_[i] = npcs[i]
npcs_[i].location = npcs_[i].location.name
for i in monsters:
monsters_[i] = monsters[i]
monsters_[i].location = monsters_[i].location.name
# I'm sorry, God
def read_npcs():
for i in npcs_:
npcs[i] = npcs_[i]
npcs[i].location = find_room(npcs[i].location)
for i in monsters_:
monsters[i] = monsters_[i]
monsters[i].location = find_room(monsters[i].location)
# Helper function
def find_room(s):
for i in rooms:
if s.lower() == i.name.lower():
return i
# Main algorithm. It works, but consider revising.
# An open list that shrinks when rooms become unavailible...
# Would be save time complexity
# Rereading this now, lol... concerns over runtime
def shuffle():
open_list = list(rooms)
for i in open_list:
i.clear()
while(open_list):
for i in open_list:
rand = int(random.random()*len(open_list))
if(not i.is_avail()):
open_list.remove(i)
elif(not open_list[rand].is_avail()):
open_list.remove(open_list[rand])
elif(i == open_list[rand]):
if(i.counter + 1) < i.door_num:
i.add_adj(open_list[rand])
open_list[rand].add_adj(i)
else:
continue
else:
i.add_adj(open_list[rand])
open_list[rand].add_adj(i)
# Helper function for seeing the full range
# of rooms availible to a player
def dfs(start):
visited = set()
stack = []
stack.append(start)
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
for i in vertex.adj:
if i not in visited:
stack.append(i)
return visited
# Printing functions
def print_rooms():
for i in rooms:
i.print_adj()
# Event functions and helper functions
# Tells DM if monsters are in the same room as NPCs
# figure this out in a bit
# I realize the doubling up for the two lists is bad code.
# I'm owning up to it... it makes it so I don't have to
# double up in main.
def step(npc_list = None):
if(npc_list):
for n in npc_list:
npc_list[n].step()
else:
for n in npcs:
npcs[n].step()
for n in monsters:
monsters[n].step()
# Make 'open all' not affect this
# add 'flagged rooms' option
def throw_npc_exception(room=None, b=False, room_adj=None):
s = ['Green House', 'Training Room', 'Toto\'s Chamber', 'Library']
if(b):
for n in npcs:
if npcs[n].location.name in s:
print "* " + npcs[n].name + " is in (the) " + npcs[n].location.name
for n in npcs:
for m in monsters:
if npcs[n].location.name == monsters[m].location.name:
monsters[m].set_mobility(True)
print("** " + npcs[n].name + " and the " + monsters[m].name + " are both in (the) " + npcs[n].location.name)
for n in monsters:
if monsters[n].location.name in s:
print "* " + monsters[n].name + " is in (the) " + monsters[n].location.name
for n in npcs:
if(room_adj):
if npcs[n].location.name == room_adj.name:
print("! " + npcs[n].name + " is in that room")
if(room):
if npcs[n].location.name == room.name:
print("!! " + npcs[n].name + " is in the room you're in")
for n in monsters:
if(room_adj):
if monsters[n].location.name == room_adj.name:
monsters[n].set_mobility(True)
monsters[n].step()
if monsters[n].location.name == room_adj.name:
print("!!! The" + monsters[n].name + " is in that room")
if(room):
if monsters[n].location.name == room.name:
monsters[n].set_mobility(True)
print("!!!! The " + monsters[n].name + " is in the room you're in")
# Shitty helper functions
def find_dict(s):
try:
if(npcs[s]):
return npcs
except KeyError:
pass
try:
if(monsters[s]):
return monsters
except KeyError:
pass
return
def find_npc(s):
try:
if(npcs[s]):
return npcs[s]
except KeyError:
pass
try:
if(monsters[s]):
return monsters[s]
except KeyError:
pass
return
def print_npc_locations():
print
for n in npcs:
print(npcs[n].name + " is in (the) " + npcs[n].location.name)
print
for n in monsters:
print(monsters[n].name + " is in (the) " + monsters[n].location.name)
def print_npc_vars():
print ""
for n in npcs:
print ""
npcs[n].print_vars()
print "__________________"
print ""
for n in monsters:
print ""
monsters[n].print_vars()
# Events players trigger during the game
def event_1(current):
if(npcs['witch2']):
npcs['witch2'].location = current
npcs['shadow5'] = npc.NPC("Shadow 5", current)
else:
npcs['shadow5'] = npc.NPC("Shadow 5", current)
npcs['shadow6'] = npc.NPC("Shadow 6", current)
npcs['shadow7'] = npc.NPC("Shadow 7*", current)
npcs['shadow8'] = npc.NPC("Shadow 8", current)
throw_npc_exception(current, True)
def event_2(current):
npcs['highchurch'] = npc.NPC("Captain Highchurch", rooms[8])
npcs['soldier1'] = npc.NPC("Solider 1", rooms[8])
npcs['soldier2'] = npc.NPC("Solider 2", rooms[8])
npcs['soldier3'] = npc.NPC("Solider 3", rooms[8])
npcs['soldier4'] = npc.NPC("Solider 4", rooms[9])
npcs['soldier5'] = npc.NPC("Solider 5", rooms[10])
npcs['soldier6'] = npc.NPC("Solider 6", rooms[1])
npcs['soldier7'] = npc.NPC("Solider 7", rooms[1])
npcs['soldier8'] = npc.NPC("Solider 8", rooms[1])
npcs['soldier9'] = npc.NPC("Solider 9", rooms[15])
npcs['soldier10'] = npc.NPC("Solider 10", rooms[15])
npcs['soldier11'] = npc.NPC("Solider 1", rooms[4])
npcs['soldier12'] = npc.NPC("Solider 1", rooms[4])
throw_npc_exception(current, True) | [
"nickharras@gmail.com"
] | nickharras@gmail.com |
a6971d41702409591fcad35aa105ef9816e13c4c | 8f8e2da05878feb1de9f0fc7e3e65b258088caa3 | /scripts/export.py | 7801ccd14edacac88317b6da0cf10a1faed8bbbe | [] | no_license | jvandew/wwww | dd8e5a41e65c462a0f97d0059d291bb5d8a728b5 | ac04e780541be960e2e5011969a922d0a22afd51 | refs/heads/master | 2021-06-23T16:00:35.796716 | 2019-04-25T04:03:09 | 2019-04-25T04:03:09 | 115,649,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | from argparse import ArgumentParser, FileType
from google.cloud import datastore
def parse_args():
parser = ArgumentParser('Export RSVP data to a .tsv file')
parser.add_argument('destination', type=FileType('w', encoding='UTF-8'))
return parser.parse_args()
def sanitize_string(string):
return string.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')
def main():
args = parse_args()
datastore_client = datastore.Client()
max_attendees = 0
lines = []
query = datastore_client.query(kind='rsvp')
for rsvp in query.fetch():
if 'going' in rsvp:
going = rsvp['going']
invited = ', '.join((
'{} {}'.format(
sanitize_string(invited['first_name']),
sanitize_string(invited['last_name']),
) for invited in rsvp['invited']
))
line = '{}\t{}'.format(going, invited)
if going:
count = len(rsvp['attending'])
email = sanitize_string(rsvp['email'])
other_notes = sanitize_string(rsvp['other_notes'])
line += '\t{}\t{}\t{}'.format(count, email, other_notes)
for attendee in rsvp['attending']:
line += '\t{}\t{}\t{}'.format(
sanitize_string(attendee['name']['first_name']),
sanitize_string(attendee['name']['last_name']),
sanitize_string(attendee['dietary_notes']),
)
max_attendees = max(max_attendees, count)
lines.append(line + '\n')
header = 'going\tinvited\tcount\temail(s)\tother_notes'
for i in range(1, max_attendees + 1):
header += '\tfirst_name {}\tlast_name {}\tdietary_notes {}'.format(i, i, i)
args.destination.write(header + '\n')
for line in lines:
args.destination.write(line)
if __name__ == '__main__':
main()
| [
"vandeweertj@gmail.com"
] | vandeweertj@gmail.com |
8addba9019db08f318b7a15e674840a95a4c3472 | 6cc66652dacf2e74387885c944cd5d12e461e1ca | /causeme/causeme_tcdf.py | a90506ba75433b62b30c6397b446382586d4f777 | [] | no_license | Gerdome/psda-group-4-assignment-4 | 30e10d9bc0918e94950542bc892c09fb9abc1f82 | e64e0c7f26221b7b4824c3bd1a461e0ef6278306 | refs/heads/master | 2022-11-30T11:06:20.333948 | 2020-07-20T14:23:32 | 2020-07-20T14:23:32 | 289,042,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,392 | py | """
This file must contain a function called my_method that triggers all the steps
required in order to obtain
*val_matrix: mandatory, (N, N) matrix of scores for links
*p_matrix: optional, (N, N) matrix of p-values for links; if not available,
None must be returned
*lag_matrix: optional, (N, N) matrix of time lags for links; if not available,
None must be returned
Zip this file (together with other necessary files if you have further handmade
packages) to upload as a code.zip. You do NOT need to upload files for packages
that can be imported via pip or conda repositories. Once you upload your code,
we are able to validate results including runtime estimates on the same machine.
These results are then marked as "Validated" and users can use filters to only
show validated results.
Shown here is a vector-autoregressive model estimator as a simple method.
"""
import numpy as np
import pandas as pd
import TCDF
cuda = False # 'Use CUDA (GPU) (default: False)')
#, help='Number of epochs (default: 1000)')
kernel_size = 6 #, help='Size of kernel, i.e. window size. Maximum delay to be found is kernel size - 1. Recommended to be equal to dilation coeffient (default: 4)')
#, help='Number of hidden layers in the depthwise convolution (default: 0)')
#, help='Learning rate (default: 0.01)')
optimizer = 'Adam' #, choices=['Adam', 'RMSprop'], help='Optimizer to use (default: Adam)')
log_interval = 500 #, help='Epoch interval to report loss (default: 500)')
seed = 1111 #, help='Random seed (default: 1111)')
dilation_coefficient = 6 #, help='Dilation coefficient, recommended to be equal to kernel size (default: 4)')
#, help="Significance number stating when an increase in loss is significant enough to label a potential cause as true (validated) cause. See paper for more details (default: 0.8)")
def runTCDF(data, hidden_layers, learning_rate, epochs, significance):
"""Loops through all variables in a dataset and return the discovered causes, time delays, losses, attention scores and variable names."""
df_data = pd.DataFrame(data)
allcauses = dict()
alldelays = dict()
allreallosses=dict()
allscores=dict()
columns = list(df_data)
for c in columns:
idx = df_data.columns.get_loc(c)
causes, causeswithdelay, realloss, scores = TCDF.findcauses(c, cuda=cuda, epochs=epochs,
kernel_size=kernel_size, layers=hidden_layers+1, log_interval=log_interval,
lr=learning_rate, optimizername=optimizer,
seed=seed, dilation_c=dilation_coefficient, significance=significance, data=df_data, verbose=False)
allscores[idx]=scores
allcauses[idx]=causes
alldelays.update(causeswithdelay)
allreallosses[idx]=realloss
return allcauses, alldelays, allreallosses, allscores, columns
# Your method must be called 'my_method'
# Describe all parameters (except for 'data') in the method registration on CauseMe
def my_method(data, hidden_layers = 0, learning_rate = 0.01, epochs = 1000, significance = 0.8):
# Input data is of shape (time, variables)
T, N = data.shape
# Standardize data --> TODO: Not needed here?
#data -= data.mean(axis=0)
#data /= data.std(axis=0)
allcauses, alldelays, allreallosses, allscores, columns = runTCDF(data, hidden_layers, learning_rate, epochs, significance)
#return allcauses, alldelays, allreallosses, allscores, columns
#########
# CauseMe requires to upload a score matrix and
# optionally a matrix of p-values and time lags where
# the links occur
# In val_matrix an entry [i, j] denotes the score for the link i --> j and
# must be a non-negative real number with higher values denoting a higher
# confidence for a link.
# Fitting a VAR model results in several lagged coefficients for a
# dependency of j on i.
# Here we pick the absolute value of the coefficient corresponding to the
# lag with the smallest p-value.
val_matrix = np.zeros((N, N), dtype='float32')
# Matrix of p-values
p_matrix = np.ones((N, N), dtype='float32')
# Matrix of time lags
lag_matrix = np.zeros((N, N), dtype='uint8')
for cause, lag in alldelays.items():
i, j = cause
val_matrix[i, j] = 1
p_matrix[i, j] = 0
lag_matrix[i, j] = lag
return val_matrix, p_matrix, lag_matrix | [
"thassilo.helmold@student.kit.edu"
] | thassilo.helmold@student.kit.edu |
a4447efe94b092c2a01c2d6e6e1ece4a9215a5ea | 6c7b4ad829164c1c25387b645e60950946266a3f | /CameraLerpSystem.py | 9618ba01cba7ffbbc0cc64b7fdc7a8003b6358bb | [] | no_license | swssqewe12/Bubbles | 124bc59d56af3e303d7927917fc4cc77c3610b8a | ba748f7131bd9414c01fbc5c0941363024b0938e | refs/heads/master | 2021-07-24T03:41:46.673426 | 2020-04-19T16:32:51 | 2020-04-19T16:32:51 | 143,007,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | import esp, mathutils
class CameraLerpSystem(esp.Processor):
def __init__(self, cameras=[]):
self.cameras = cameras
def fixed_update(self, dt):
for camera in self.cameras:
if camera.lerp_speed > 0:
camera.zoom = max(min(mathutils.map_range(camera.lerp_speed, 0, 1, camera.zoom, camera.target_zoom), camera.max_zoom), camera.min_zoom)
camera.pos.x = mathutils.map_range(camera.lerp_speed, 0, 1, camera.pos.x, camera.target_pos.x)
camera.pos.y = mathutils.map_range(camera.lerp_speed, 0, 1, camera.pos.y, camera.target_pos.y) | [
""
] | |
42185505bd29c5ac54777505fbabd4ea3b4e1322 | e3488783b0cfc500a44361dc528ab93a88e923d9 | /normalizer/algorithms/norm_country.py | 56cee0402b629ff2f4cd822d78f348b7f72c2cba | [
"MIT"
] | permissive | JackonYang/paper-reactor | dce6ab5c34dd238d030cec81a915c6b6140152a4 | cfb327baa49e744b7a21c8b50df4bf612f4fb503 | refs/heads/master | 2023-03-09T10:41:32.143460 | 2021-02-13T10:29:24 | 2021-02-13T10:29:24 | 337,173,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | import json
import os
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../'))
country_data_file = os.path.join(
PROJECT_ROOT, 'metadata/iso_3166_country_codes.json')
country_map = {}
# @API
def norm_country(addr):
if len(country_map) == 0:
build_country_map()
for p in reversed(addr.split(',')):
p = p.strip(' .').lower()
if p in country_map:
return country_map[p]
return None
country_map_extra = {
'UK': 'United Kingdom',
'Northern Ireland': 'United Kingdom', # part of the country
'Russia': 'Russian Federation',
'Korea': 'Korea (the Republic of)',
'South Korea': 'Korea (the Republic of)',
'S. Korea': 'Korea (the Republic of)',
'Republic of Korea': 'Korea (the Republic of)',
'Czech Republic': 'Czechia',
'The Netherlands': 'Netherlands',
'PR China': 'China',
'P. R. China': 'China',
"People's Republic of China": 'China',
"People’s Republic of China": 'China',
'Taiwan': 'Taiwan (Province of China)',
'México': 'Mexico',
'United States': 'United States of America',
'Napoli': 'Italy', # city of the country
'Iran': 'Iran (Islamic Republic of)',
'Kingdom of Saudi Arabia': 'Saudi Arabia',
'bâtiment A. Kastler': 'France', # city of the country
'UAE': 'United Arab Emirates',
'FI-40014': 'Finland',
'3112 Etcheverry Hall': 'United States of America', # location in the country
'School of Electrical and Computer Engineering': 'Brazil', # not sure
}
def build_country_map():
with open(country_data_file, 'r') as fr:
data = json.load(fr)
for item in data:
name = item['country_name']
# name and alias keys
keys = ['country_name', 'alpha_2', 'alpha_3']
for k in keys:
country_map[item[k].lower()] = name
# manually adding
for k, v in country_map_extra.items():
country_map[k.lower()] = v
if __name__ == '__main__':
print(norm_country('Shanghai, China'))
print(norm_country('Shanghai China'))
| [
"i@jackon.me"
] | i@jackon.me |
8ca87d1f0a1907774df77a749ff725891e8cf4b2 | 8802fa5c4467fb6c783b6593ac2443f6a6e0a438 | /grupo03_site/grupo03_site/wsgi.py | ead079d8d623f82c195b9978b7a7c600939d522f | [] | no_license | rcastill/inf323-utfsm-grupo03 | 74cad25f97017270311780ddfe8009f20c564e42 | e018e15076e822a0c0b9e8bee6bc1177d728e4d4 | refs/heads/master | 2021-08-30T17:51:21.756030 | 2017-12-18T22:11:32 | 2017-12-18T22:11:32 | 103,285,728 | 0 | 0 | null | 2017-12-06T04:02:18 | 2017-09-12T15:07:11 | Python | UTF-8 | Python | false | false | 402 | py | """
WSGI config for grupo03_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "grupo03_site.settings")
application = get_wsgi_application()
| [
"diego.villegas.12@sansano.usm.cl"
] | diego.villegas.12@sansano.usm.cl |
3641a1a7abf5f59ae8d5f366b0ad5dc4e2e16061 | 9621f93999596a0f19df69aee91ad5b9b726c2d8 | /sLAB_All_0430/RMT_Main.py | 37a054056a2a844746699e0fa8f96da409fff2e4 | [] | no_license | Chia-Long/DQA_Wab | 57fcd4506d63d794ad67a0c9e0330aece5819ec1 | 9fe9eab675f03389101a7d561fa2df582f6e6562 | refs/heads/master | 2022-07-18T04:46:04.083682 | 2020-05-21T02:50:59 | 2020-05-21T02:50:59 | 261,126,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,243 | py | import dash
from dash.dependencies import Input, Output
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import random
import dash_bootstrap_components as dbc
#colors = ['E9C46A','F4A261','E76F51','00AEE0','04294F','672A4E','EF476F','FFD166','06D6A0','118AB2','073B4C','264653','2A9D8F','69306D','F2D7EE','D3BCC0']
# 讀取Memory代碼對應表(ID v.s. Memory Types)
df_comp_memory = pd.read_csv("data/component_memory.csv", index_col="ID")
memory_part = df_comp_memory['Part Number'].to_list()
memory_id = df_comp_memory.index.to_list()
#print(df_comp_memory)
df = pd.read_csv('data/RMT.csv')
orders = list(df.columns)
df_result = df.iloc[:,0:21]
result_orders = ['Test_ID', 'PCB_Name', 'Memory_Name', 'Memory_Frequency','Intel_code_name', 'Sample', 'RxDqs-', 'RxDqs+', 'RxV-', 'RxV+', 'TxDq-', 'TxDq+', 'TxV-', 'TxV+', 'Cmd-', 'Cmd+', 'CmdV-', 'CmdV+', 'Ctl-', 'Ctl+','User']
df_result = df_result[result_orders]
data_column_names = []
Graph_data_list = []
#app = dash.Dash(__name__)
app = dash.Dash(__name__,
meta_tags=[
{
'charset': 'utf-8',
},
{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1, shrink-to-fit=no'
}
],
external_stylesheets=[dbc.themes.BOOTSTRAP])
app.layout = html.Div([
html.Div([
html.A([html.Img(id='logo',src='/assets/Advantech-logo-200P-color.jpg', width='150',style={'display': 'inline-block', 'margin': '10px 15px'})], href='http://172.17.9.206/'),
html.A([html.Label('CIoT DQA RMT Database', style={'display': 'inline-block','fontFamily': 'Arial','fontSize': '20pt','textAlign': 'center','verticalAlign': 'middle','margin': '0px'}),], href='http://172.17.9.218:8000'),
html.A([html.Img(src='/assets/faq.png', title='User Guide',
style={'height': '40px',
'width': '40px',
'display': 'inline-block',
'float': 'right',
#'position': 'relative',
'marginTop': '10px',
'marginRight': '20px'})
], href='/assets/NCG_DQA_Database_User_Guide_V01_03-Jan-2020_release.pdf', target='_blank'),
html.A([html.Img(src='/assets/server.png', title='Create Data',
style={'height': '40px',
'width': '40px',
'display': 'inline-block',
'float': 'right',
#'position': 'relative',
'marginTop': '10px',
'marginRight': '10px'})
], href='http://172.17.9.218:8050/apps/Login', id='login'),
html.A([html.Img(src='/assets/output.png', title='View Data',
style={'height': '40px',
'width': '40px',
'display': 'inline-block',
'float': 'right',
#'position': 'relative',
'marginTop': '10px',
'marginRight': '10px'})
], href='http://172.17.9.218:8050/apps/WebOutput'),
], style={'marginBottom': '10px'}),
dcc.Location(id='url', refresh=True),
dcc.Interval(id='RMT-interval', interval=10000, n_intervals=0),
dash_table.DataTable(
id='datatable-interactivity',
columns=[
{"name": i, "id": i, "deletable": False, "selectable": False} for i in df_result.columns
],
data=df.to_dict('records'),
editable=False,
filter_action="native",
sort_action="native",
sort_mode="multi",
column_selectable="single",
#row_selectable="multi",
row_deletable=True,
selected_columns=[],
selected_rows=[],
page_action="native",
page_current= 0,
page_size= 10,
style_table={
'height': '500px',
'overflowY': 'scroll',
'border': 'thin lightgrey solid'
},
style_cell_conditional=[{'textAlign': 'center'}]
),
html.Div(id='datatable-interactivity-container'),
html.Div(id='rmt-container'),
html.Div(id='margin-container'),
])
@app.callback(Output('rmt-container', "children"),
[Input('datatable-interactivity', "derived_virtual_data")])
def update_graphs(rows):
Graph_data_list = []
dff = pd.DataFrame(rows, columns=orders)
#dff = dff.iloc[:,0:20]
dff_result = dff.iloc[:,6:20]
dff_result2 = dff.iloc[:,21:35]
data_column_names = list(dff_result.columns)
#print(data_column_names)
for i in range(len(dff_result)):
a = {'x': data_column_names, 'y': dff_result.loc[i].values.tolist(), 'type': 'bar', 'name': dff.iloc[i].iat[1]+ '_' + dff.iloc[i].iat[2],'text': dff_result.loc[i].values.tolist(),'textposition': 'outside'}
b = {'x': data_column_names, 'y': dff_result2.loc[i].values.tolist(), 'type': 'markers', 'name': dff.iloc[i].iat[1]+'_Guideline', 'markers' : '202'}
Graph_data_list.append(a)
Graph_data_list.append(b)
return [
dcc.Graph(
id='rmt_column',
figure={
"data": Graph_data_list,
"layout": {
"title": {"text": 'Worst Case Margin Result vs Guideline'}
},
},
)
# check if column exists - user may have deleted it
# If `column.deletable=False`, then you don't
# need to do this check.
#for mem_column in ["All_Reads(MB/s)", "1:1_Reads_Writes(MB/s)"] if mem_column in dff
]
@app.callback(Output('margin-container', "children"),
[Input('datatable-interactivity', "derived_virtual_data")])
def update_graphs(rows):
Margin_data_list = []
#margin = []
dff = pd.DataFrame(rows, columns=orders)
#dff = dff.iloc[:,0:20]
dff_result = dff.iloc[:,6:20]
dff_result2 = dff.iloc[:,21:35]
data_column_names = list(dff_result.columns)
#print(len(dff_result.loc[0].values.tolist()))
#print(data_column_names)
for i in range(len(dff_result)):
margin = []
for j in range(len(dff_result.loc[i].values.tolist())):
d = dff_result.loc[i].values.tolist()[j] - dff_result2.loc[i].values.tolist()[j]
margin.append(d)
margin_data = {'x': data_column_names, 'y': margin, 'type': 'bar', 'name': dff.iloc[i].iat[1]+ '_' + dff.iloc[i].iat[2],'text': margin,'textposition': 'outside'}
margin_data2 = {'x': None, 'y': None, 'type': 'linear', 'name': dff.iloc[i].iat[1]+'_Guideline'}
Margin_data_list.append(margin_data)
Margin_data_list.append(margin_data2)
return [
dcc.Graph(
id='rmt_column',
figure={
"data": Margin_data_list,
"layout": {
"title": {"text": 'Margin'}
},
},
)
# check if column exists - user may have deleted it
# If `column.deletable=False`, then you don't
# need to do this check.
#for mem_column in ["All_Reads(MB/s)", "1:1_Reads_Writes(MB/s)"] if mem_column in dff
]
@app.callback(Output('datatable-interactivity', 'data'),
[Input('url', 'pathname')])
def update_data(intervals):
global df
df = pd.read_csv('data/RMT.csv')
df_result = df.iloc[:,0:21]
df_result = df_result[result_orders]
data=df.to_dict('records')
return data
if __name__ == '__main__':
app.run_server(debug=False,host='0.0.0.0',port=8000) | [
"kobe760903@gmail.com"
] | kobe760903@gmail.com |
5e85018f07593d4b7ddcf88e31a13a87a254e21f | 2eeacfa3de196f4186e7a4371201c16a4c4e9437 | /nodes/serializers/edge_serializer.py | bb77f0fd22df13824765b65c226bf7ad24e68929 | [] | no_license | yasser-aboelgheit/bfs | 59564b78560f1b37bad77f0425e7fc31888c87f3 | b72aab5be2f2d3e2c62bab4b8ed5cdb050b3bd1d | refs/heads/main | 2023-04-09T03:15:43.552522 | 2021-04-21T13:23:50 | 2021-04-21T13:23:50 | 358,872,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | from rest_framework import serializers
from nodes.models import Node, Edge
class EdgeSerializer(serializers.Serializer):
node_from = serializers.CharField(required=True)
node_to = serializers.CharField(required=True)
def validate_node_from(self, value):
node_from = Node.objects.filter(name=value).last()
if not node_from:
node_from = Node.objects.create(name=value)
return node_from
def validate_node_to(self, value):
node_to = Node.objects.filter(name=value).last()
if not node_to:
node_to = Node.objects.create(name=value)
return node_to
| [
"yasseraboelgheit@weaccept.co"
] | yasseraboelgheit@weaccept.co |
9d54ff837c1a8f276a97e819ccf6c7a49e66713b | 24144f83276705fe2f4df295ee50199c2035ca7b | /active/theses-mainz.py | 0acd9b145b345b370518620e935b1280fb1eaed5 | [] | no_license | AcidBurn429/ejlmod | a2e4eb6bb28bcb6bbccc3d83e2e24f5aed23d4eb | dec50edbb14380686072d7311589a2363ef5cd00 | refs/heads/master | 2023-08-14T21:19:10.890194 | 2021-09-28T13:39:06 | 2021-09-28T13:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | # -*- coding: utf-8 -*-
#harvest theses from Mainz U.
#FS: 2020-01-27
import getopt
import sys
import os
import urllib2
import urlparse
from bs4 import BeautifulSoup
import re
import ejlmod2
import codecs
import datetime
import time
import json
xmldir = '/afs/desy.de/user/l/library/inspire/ejl'
retfiles_path = "/afs/desy.de/user/l/library/proc/retinspire/retfiles"
now = datetime.datetime.now()
stampoftoday = '%4d-%02d-%02d' % (now.year, now.month, now.day)
publisher = 'Mainz U.'
jnlfilename = 'THESES-MAINZ-%s' % (stampoftoday)
hdr = {'User-Agent' : 'Magic Browser'}
recs = []
rpp = 40
pages = 3
for page in range(pages):
tocurl = 'https://openscience.ub.uni-mainz.de/simple-search?query=&filter_field_1=organisationalUnit&filter_type_1=equals&filter_value_1=FB+08+Physik%2C+Mathematik+u.+Informatik&filter_field_2=publicationType&filter_type_2=equals&filter_value_2=Dissertation&sort_by=dc.date.issued_dt&order=desc&rpp=' + str(rpp) + '&etal=0&start=' + str(page*rpp)
print '==={ %i/%i }==={ %s }===' % (page+1, pages, tocurl)
tocpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(tocurl))
for tr in tocpage.body.find_all('tr'):
rec = {'tc' : 'T', 'keyw' : [], 'jnl' : 'BOOK', 'note' : []}
for td in tr.find_all('td', attrs = {'headers' : 't1'}):
rec['year'] = td.text.strip()
rec['date'] = td.text.strip()
for td in tr.find_all('td', attrs = {'headers' : 't3'}):
for a in td.find_all('a'):
rec['tit'] = a.text.strip()
rec['hdl'] = re.sub('.*handle\/', '', a['href'])
rec['artlink'] = 'https://openscience.ub.uni-mainz.de' + a['href']
recs.append(rec)
time.sleep(10)
i = 0
for rec in recs:
i += 1
print '---{ %i/%i }---{ %s }------' % (i, len(recs), rec['artlink'])
try:
artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
time.sleep(4)
except:
try:
print "retry %s in 180 seconds" % (rec['artlink'])
time.sleep(180)
artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
except:
print "no access to %s" % (rec['artlink'])
continue
for tr in artpage.body.find_all('tr'):
for td in tr.find_all('td', attrs = {'class' : 'metadataFieldLabel'}):
tdt = td.text.strip()
for td in tr.find_all('td', attrs = {'class' : 'metadataFieldValue'}):
#authors
if tdt == 'Authors:':
rec['autaff'] = [[ td.text.strip(), publisher ]]
#language
elif tdt == 'Language :':
if td.text.strip() == 'german':
rec['language'] = 'German'
#abstract
elif tdt == 'Abstract:':
rec['abs'] = td.text.strip()
#license
elif re.search('Information', tdt):
for a in td.find_all('a'):
if re.search('creativecommons.org', a['href']):
rec['license'] = {'url' : a['href']}
#pages
elif tdt == 'Extent:':
if re.search('\d\d', td.text):
rec['pages'] = re.sub('.*?(\d\d+).*', r'\1', td.text.strip())
#DOI
elif tdt == 'DOI:':
for a in td.find_all('a'):
rec['doi'] = re.sub('.*org\/', '', a['href'])
#FFT
for td in tr.find_all('td', attrs = {'class' : 'standard'}):
for a in td.find_all('a'):
if re.search('pdf$', a['href']):
if 'license' in rec.keys():
rec['FFT'] = 'https://openscience.ub.uni-mainz.de' + a['href']
else:
rec['hidden'] = 'https://openscience.ub.uni-mainz.de' + a['href']
print ' ', rec.keys()
#closing of files and printing
xmlf = os.path.join(xmldir, jnlfilename+'.xml')
xmlfile = codecs.EncodedFile(codecs.open(xmlf, mode='wb'), 'utf8')
ejlmod2.writenewXML(recs, xmlfile, publisher, jnlfilename)
xmlfile.close()
#retrival
retfiles_text = open(retfiles_path, "r").read()
line = jnlfilename+'.xml'+ "\n"
if not line in retfiles_text:
retfiles = open(retfiles_path, "a")
retfiles.write(line)
retfiles.close()
| [
"florian.schwennsen@desy.de"
] | florian.schwennsen@desy.de |
d49bcc85fb670923856b90cd4b3431c31b19fed9 | 8671856181ef218f147f23f367fd0b1dc7592e1a | /realtor/migrations/0020_auto_20190918_1213.py | 69d2a3d67932c1247662582520c4265d41e2eef5 | [] | no_license | Alishrf/Shop_Website | e4fef9618aec2db6f4a655ff643aa68cf42dbb68 | 971d4a2ff8b7a68a0157681ff26404fe403502e6 | refs/heads/master | 2020-08-11T06:03:47.642870 | 2019-10-14T14:29:30 | 2019-10-14T14:29:30 | 214,504,737 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # Generated by Django 2.2.4 on 2019-09-18 07:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('realtor', '0019_auto_20190918_1203'),
]
operations = [
migrations.AlterField(
model_name='realtor',
name='hire_date',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 18, 12, 13, 29, 200152)),
),
]
| [
"a.sharifzadeh11@gmail.com"
] | a.sharifzadeh11@gmail.com |
4a622a2368284086785f3560694bbf3c5c1fa1b0 | b28c15914b3cd8df8442c5f13ac300957e442816 | /scriptprogramming/settings.py | 970be0a229ed137a0f38fb8eb0280cc3ef4072c4 | [] | no_license | rakkeshasa/scriptprogramming | ff72220904ba1732b2740d22253db2b5c0b05fe4 | f26cbaaec8eb07ae4ff8683a5c31c70bff5ad836 | refs/heads/main | 2023-05-06T04:57:09.147500 | 2021-05-21T06:37:39 | 2021-05-21T06:37:39 | 369,426,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | """
Django settings for scriptprogramming project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-r^8*z7@=y7n85-@yv8$oc_nm)*dgs-ym=he(ep38sl163ui*$r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scriptprogramming.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scriptprogramming.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"rakkeshasa2@gmail.com"
] | rakkeshasa2@gmail.com |
df776be8e3c5a10d0ea0d5ac96bb71188cc0c541 | be5f4d79910e4a93201664270916dcea51d3b9ee | /fastdownward/experiments/issue627/merge-v3-v5.py | 100f1f2a1136f016756e9f799a53b284019dc988 | [
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-or-later"
] | permissive | mehrdadzakershahrak/Online-Explanation-Generation | 17c3ab727c2a4a60381402ff44e95c0d5fd0e283 | e41ad9b5a390abdaf271562a56105c191e33b74d | refs/heads/master | 2022-12-09T15:49:45.709080 | 2019-12-04T10:23:23 | 2019-12-04T10:23:23 | 184,834,004 | 0 | 0 | MIT | 2022-12-08T17:42:50 | 2019-05-04T00:04:59 | Python | UTF-8 | Python | false | false | 1,574 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir
from relativescatter import RelativeScatterPlotReport
import os
def main(revisions=None):
exp = IssueExperiment(benchmarks_dir=".", suite=[])
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v3-eval"),
filter=lambda(run): "base" not in run["config"],
)
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v5-eval"),
filter=lambda(run): "base" not in run["config"],
)
for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']:
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v5-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v5_memory_%s.png' % config_nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v5-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v5_total_time_%s.png' % config_nick
)
exp()
main(revisions=['issue627-v3', 'issue627-v5'])
| [
"zaker.mehrdad@gmail.com"
] | zaker.mehrdad@gmail.com |
bb73625db798cf09320a109e072bfb4f77b93ec6 | 72672d838747507ee297dafa819f0b1ed99c923b | /qt/gui/modales.py | e38d6b59148b12999e779a9dbd92f63aab0b759a | [] | no_license | yoann01/bullseye | df98971bd7fddf15f8cbcbad8359efcd70e11605 | 97af9831c3021983794a5fad7abbb93c49dfd88d | refs/heads/master | 2020-02-26T14:01:12.244018 | 2012-05-23T22:43:25 | 2012-05-23T22:43:25 | 38,533,771 | 1 | 0 | null | 2015-07-04T12:51:43 | 2015-07-04T12:51:43 | null | UTF-8 | Python | false | false | 21,620 | py | # -*- coding: utf-8 -*-
import os
from common import settings, xdg
from PySide import QtGui, QtCore
from qt.util import treemodel
from data import elements
class Criterion(QtGui.QWidget):
FIELDS = (('artist', _('Artist')), ('album', _('Album')), ('rating', _('Rating')), ('playcount', _('Playcount')), ('path', _('Path')))
NUMERIC_OPERATORS = ((" = ", _("equals")), (" < ", _("is inferior to")), (" > ", _("is superior to")), (" <= ", _("is lower than")), (" >= ", _("is at least")))
OPERATORS = ((" = ", _('is')), (" != ", _('is not'), ), (" LIKE ", _("like")), (" NOT LIKE ", _("not like")))
FIELDMODEL = QtGui.QStandardItemModel()
for key, label in FIELDS:
FIELDMODEL.appendRow([QtGui.QStandardItem(key), QtGui.QStandardItem(label)])
OPERATORMODEL = QtGui.QStandardItemModel()
for key, label in OPERATORS:
OPERATORMODEL.appendRow([QtGui.QStandardItem(key), QtGui.QStandardItem(label)])
NUMERIC_OPERATORSMODEL = QtGui.QStandardItemModel()
for key, label in NUMERIC_OPERATORS:
NUMERIC_OPERATORSMODEL.appendRow([QtGui.QStandardItem(key), QtGui.QStandardItem(label)])
def __init__(self, config):
'''
config is a tuple containing a field, an operator and a value. eg ('artist', ' = ', 'AC/DC')
'''
QtGui.QWidget.__init__(self)
layout = QtGui.QHBoxLayout()
self.fieldCB = QtGui.QComboBox()
self.fieldCB.setModel(self.FIELDMODEL)
self.fieldCB.setModelColumn(1)
pos = 0
toMatch = config[0]
while pos < len(self.FIELDS) and not self.FIELDS[pos][0] == toMatch :
pos += 1
if pos < len(self.FIELDS) and self.FIELDS[pos][0] == toMatch:
self.fieldCB.setCurrentIndex(pos)
self.fieldCB.currentIndexChanged[int].connect(self.setUpAccordingToField)
self.operatorCB = QtGui.QComboBox()
self.entryWidget = QtGui.QLineEdit()
self.spinBox = QtGui.QSpinBox()
self.spinBox.setMinimum(0)
deleteButton = QtGui.QPushButton(QtGui.QIcon.fromTheme('list-remove'), None)
deleteButton.clicked.connect(self.deleteLater)
layout.addWidget(self.fieldCB, 0)
layout.addWidget(self.operatorCB, 0)
layout.addWidget(self.entryWidget)
layout.addWidget(self.spinBox)
layout.addWidget(deleteButton, 0)
self.setLayout(layout)
self.setUpAccordingToField()
if config[0] in ('playcount', 'rating'):
operators = self.NUMERIC_OPERATORS
self.spinBox.setValue(config[2])
else:
operators = self.OPERATORS
self.entryWidget.setText(config[2])
pos = 0
toMatch = config[1]
while pos < len(operators) and not operators[pos][0] == toMatch :
pos += 1
if pos < len(operators) and operators[pos][0] == toMatch:
self.operatorCB.setCurrentIndex(pos)
def getConfig(self):
field = self.FIELDS[self.fieldCB.currentIndex()][0]
operatorModel = self.operatorCB.model()
operatorKey = operatorModel.data(operatorModel.index(self.operatorCB.currentIndex(), 0))
if field in ('playcount', 'rating'):
value = self.spinBox.value()
else:
value = self.entryWidget.text()
return field, operatorKey, value
def setUpAccordingToField(self, pos=None):
if pos is None:
pos = self.fieldCB.currentIndex()
if self.FIELDS[pos][0] in ('playcount', 'rating'):
self.entryWidget.hide()
self.spinBox.show()
self.operatorCB.setModel(self.NUMERIC_OPERATORSMODEL)
else:
self.entryWidget.show()
self.spinBox.hide()
self.operatorCB.setModel(self.OPERATORMODEL)
self.operatorCB.setModelColumn(1)
class CriterionManager(QtGui.QWidget):
"""
Widget to handle SQL Query creation with GUI
"""
def __init__(self):
QtGui.QWidget.__init__(self)
layout = QtGui.QVBoxLayout()
BB = QtGui.QHBoxLayout()
self.whatever = QtGui.QCheckBox(_("Whatever criterion matches"))
BB.addWidget(self.whatever)
self.randomOrder = QtGui.QCheckBox(_("Random order"))
BB.addWidget(self.randomOrder)
addButton = QtGui.QPushButton(QtGui.QIcon.fromTheme('list-add'), None)
addButton.clicked.connect(self.addCriterion)
BB.addWidget(addButton)
self.criterionBox = QtGui.QVBoxLayout()
layout.addLayout(self.criterionBox)
layout.addLayout(BB, 0)
self.setLayout(layout)
def addCriterion(self, field=None, operator=None, condition=None):
'''
Ajoute un nouveau critère de séléction (à configurer graphiquement) qui sera traité lors de la validation
'''
self.criterionBox.addWidget(Criterion((field, operator, condition)), 0)
def getConfig(self):
"""
Return all parameter in dict
"""
config = {}
if self.randomOrder.isChecked():
config['random'] = True
else:
config['random'] = False
if self.whatever.isChecked():
config['link'] = ' OR '
else:
config['link'] = ' AND '
config['criterions'] = []
for i in range(self.criterionBox.count()):
config['criterions'].append(self.criterionBox.itemAt(i).widget().getConfig())
return config
def loadCriterions(self, dic):
random = dic['random']
logic_operator = dic['link']
if(random):
self.randomOrder.setCheckState(QtCore.Qt.Checked)
if(logic_operator == " OR "):
self.whatever.setCheckState(QtCore.Qt.Checked)
crits = dic['criterions']
for crit in crits:
criterion = crit[0]
operator = crit[1]
condition = crit[2]
self.addCriterion(criterion, operator, condition)
def reset(self):
self.randomOrder.setCheckState(QtCore.Qt.Unchecked)
self.whatever.setCheckState(QtCore.Qt.Unchecked)
for i in range(self.criterionBox.count()):
self.criterionBox.itemAt(i).widget().deleteLater()
class DynamicPlaylistCreator(QtGui.QDialog):
'''
Créateur de fichiers contenant des paramètres pour créer une requête SQL séléctionnant des pistes y correspondant
'''
def __init__(self, name=None):
QtGui.QDialog.__init__(self)
layout = QtGui.QVBoxLayout()
nameLayout = QtGui.QHBoxLayout()
self.nameEntry = QtGui.QLineEdit()
nameLayout.addWidget(QtGui.QLabel(_('Name') + ' : '), 0)
nameLayout.addWidget(self.nameEntry, 1)
self.previousName = name
if(name == None):
self.setWindowTitle(_("Add a dynamic playlist"))
else:
self.setWindowTitle(_("Edit a dynamic playlist"))
self.criterionManager = CriterionManager()
layout.addLayout(nameLayout, 0)
layout.addWidget(self.criterionManager, 1)
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
layout.addWidget(buttonBox)
if name == None:
self.criterionManager.addCriterion()
else:
self.nameEntry.setText(name)
self.loadCriterions(name)
self.setLayout(layout)
def accept(self):
name = self.nameEntry.text()
f = open(os.path.join(xdg.get_data_home(), 'playlists') + os.sep + 'dynamic' + os.sep + name, 'w')
f.write(str(self.criterionManager.getConfig()))
QtGui.QDialog.accept(self)
def exec_(self):
if QtGui.QDialog.exec_(self) and QtGui.QDialogButtonBox.Ok:
return self.nameEntry.text()
else:
return None
def loadCriterions(self, name):
f = open(os.path.join(xdg.get_data_home(), 'playlists') + os.sep + 'dynamic' + os.sep + name, 'r')
data = f.readlines()
f.close()
self.criterionManager.loadCriterions(eval(data[0]))
class FilterManager(QtGui.QWidget):
"""
Widget that handles filter managment with a CriterionManager
"""
def __init__(self, config):
QtGui.QWidget.__init__(self)
self.config = config
model = QtGui.QStandardItemModel()
for key in self.config.iterkeys():
model.appendRow(QtGui.QStandardItem(key))
self.filterCB = QtGui.QComboBox()
self.filterCB.setModel(model)
self.filterCB.currentIndexChanged[int].connect(self.loadFilter)
addButton = QtGui.QPushButton(QtGui.QIcon.fromTheme('list-add'), None)
addButton.clicked.connect(self.addFilter)
deleteButton = QtGui.QPushButton(QtGui.QIcon.fromTheme('list-remove'), None)
deleteButton.clicked.connect(self.deleteFilter)
self.enabled = QtGui.QCheckBox(_('Enabled'))
actionBox = QtGui.QHBoxLayout()
actionBox.addWidget(self.enabled, 0)
actionBox.addWidget(deleteButton, 0)
actionBox.addWidget(self.filterCB, 1)
actionBox.addWidget(addButton, 0)
self.criterionManager = CriterionManager()
layout = QtGui.QVBoxLayout()
layout.addLayout(actionBox, 0)
layout.addWidget(self.criterionManager, 1)
self.setLayout(layout)
try:
self.filterCB.setCurrentIndex(0)
self.activeFilter = self.filterCB.currentText()
except:
pass
def addFilter(self):
name = QtGui.QInputDialog.getText(self, _('New filter name'), _('Enter a name') + ' : ')[0]
model = self.filterCB.model()
model.appendRow(QtGui.QStandardItem(name))
def deleteFilter(self):
model = self.filterCB.model()
filterName = self.filterCB.currentText()
del self.config[filterName]
self.activeFilter = None
model.removeRows(self.filterCB.currentIndex(), 1)
def getConfig(self):
filter = self.filterCB.currentText()
if(filter != None): # saving current
self.config[filter] = self.criterionManager.getConfig()
self.config[filter]['enabled'] = self.enabled.isChecked()
return self.config
def loadFilter(self):
try:
if(self.activeFilter != None): # saving previous
self.config[self.activeFilter] = self.criterionManager.getConfig()
self.config[self.activeFilter]['enabled'] = self.enabled.isChecked()
except:
pass
filter = self.filterCB.currentText()
self.activeFilter = filter
self.criterionManager.reset()
self.enabled.setCheckState(QtCore.Qt.Unchecked)
try:
self.criterionManager.loadCriterions(self.config[filter])
if(self.config[filter]['enabled']):
self.enabled.setCheckState(QtCore.Qt.Checked)
except KeyError:
pass
class SettingsEditor(QtGui.QDialog):
def __init__(self, section='general'):
QtGui.QDialog.__init__(self)
self.setWindowTitle(_('Settings editor'))
mainLayout = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
layout = QtGui.QHBoxLayout()
self.widgetLayout = layout
mainLayout.addLayout(layout)
mainLayout.addWidget(buttonBox)
# --- Sections selector
TreeView = QtGui.QTreeView(self)
TreeView.setMinimumWidth(200)
TreeView.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.sections = treemodel.SimpleTreeModel()
def addSection(key, text, iconPath=None, parent=None):
node = treemodel.SimpleTreeItem(parent, key, iconPath, text)
self.sections.append(parent, node)
return node
addSection('general', _('General'))
addSection('folders', _('Indexed folders'))
musicNode = addSection('music', _('Music'))
addSection('music_scrobbler', _('Audioscrobbler'), None, musicNode)
addSection('music_filters', _('Filters'), None, musicNode)
addSection('videos', _('Videos'))
TreeView.setModel(self.sections)
#TreeView.header().setSectionHidden(1, True)
TreeView.clicked.connect(self.sectionActivated)
layout.addWidget(TreeView, 0)
self.widgets = {}
generalLayout = QtGui.QFormLayout()
#self.CB_gui_framework = QtGui.QComboBox()
#self.CB_gui_framework.addItem('Gtk 2')
#self.CB_gui_framework.addItem('Qt 4')
#generalLayout.addRow(_('GUI framework' + ' : '), self.CB_gui_framework)
self.pictures_enabled = QtGui.QCheckBox(_('Enable pictures manager'))
self.pictures_enabled.setChecked(settings.get_option('pictures/enabled', False))
generalLayout.addRow(self.pictures_enabled)
generalWidget = QtGui.QWidget()
generalWidget.setLayout(generalLayout)
self.widgets['general'] = generalWidget
self.activeWidget = generalWidget
layout.addWidget(generalWidget, 1)
foldersLayout = QtGui.QVBoxLayout()
foldersView = QtGui.QTableView()
foldersView.setMinimumWidth(400)
foldersView.verticalHeader().hide()
self.foldersModel = QtGui.QStandardItemModel(0, 2)
self.foldersModel.setHorizontalHeaderLabels([_("Path"), _("Dig")])
for path, dig in settings.get_option('music/folders', []):
checkBox = QtGui.QStandardItem()
checkBox.setCheckable(True)
if dig:
checkBox.setCheckState(QtCore.Qt.Checked)
self.foldersModel.appendRow([QtGui.QStandardItem(path), checkBox])
foldersView.setModel(self.foldersModel)
foldersView.horizontalHeader().setResizeMode (0, QtGui.QHeaderView.Stretch)
foldersView.horizontalHeader().setResizeMode (1, QtGui.QHeaderView.Fixed)
addFolderButton = QtGui.QPushButton(QtGui.QIcon.fromTheme('list-add'), _('Add'))
def add_folder():
folderPath = QtGui.QFileDialog.getExistingDirectory(self)
checkBox = QtGui.QStandardItem()
checkBox.setCheckable(True)
self.foldersModel.appendRow([QtGui.QStandardItem(folderPath), checkBox])
addFolderButton.clicked.connect(add_folder)
removeFolderButton = QtGui.QPushButton(QtGui.QIcon.fromTheme('list-remove'), _('Remove'))
def remove_folder():
selected = foldersView.selectedIndexes()
selected.reverse()
for index in selected:
self.foldersModel.removeRows(index.row(), 1)
removeFolderButton.clicked.connect(remove_folder)
buttonsLayout = QtGui.QHBoxLayout()
buttonsLayout.addWidget(addFolderButton)
buttonsLayout.addWidget(removeFolderButton)
foldersLayout.addWidget(foldersView)
foldersLayout.addLayout(buttonsLayout)
foldersWidget = QtGui.QWidget()
foldersWidget.setLayout(foldersLayout)
self.widgets['folders'] = foldersWidget
foldersWidget.hide()
layout.addWidget(foldersWidget, 1)
# --- Music section ---
musicLayout = QtGui.QFormLayout()
self.CB_music_playback_lib = QtGui.QComboBox()
libs = {'GStreamer':0, 'MPlayer':1, 'VLC':1, 'Phonon':2}
self.CB_music_playback_lib.addItem('GStreamer')
#self.CB_music_playback_lib.addItem('MPlayer')
self.CB_music_playback_lib.addItem('VLC')
self.CB_music_playback_lib.addItem('Phonon')
self.CB_music_playback_lib.setCurrentIndex(libs[settings.get_option('music/playback_lib', 'Phonon')])
musicLayout.addRow(_('Playback library') + ' : ', self.CB_music_playback_lib)
self.CB_icon_size_panel_music = QtGui.QComboBox()
self.CB_icon_size_panel_music.addItem('16')
self.CB_icon_size_panel_music.addItem('24')
self.CB_icon_size_panel_music.addItem('32')
self.CB_icon_size_panel_music.addItem('48')
self.CB_icon_size_panel_music.addItem('64')
self.CB_icon_size_panel_music.setCurrentIndex(settings.get_option('music/panel_icon_size', 32) / 16)
musicLayout.addRow(_('Panel icon size') + ' : ', self.CB_icon_size_panel_music)
self.usePerformer = QtGui.QCheckBox()
if settings.get_option('music/use_performer', False):
self.usePerformer.setCheckState(QtCore.Qt.Checked)
musicLayout.addRow(_('Show performer instead of artist in library browser') + ' : ', self.usePerformer)
musicWidget = QtGui.QWidget()
musicWidget.setLayout(musicLayout)
self.widgets['music'] = musicWidget
musicWidget.hide()
layout.addWidget(musicWidget, 1)
# --- Audioscrobbler section ---
self.audioscrobbler_login = QtGui.QLineEdit()
self.audioscrobbler_login.setText(settings.get_option('music/audioscrobbler_login', ''))
self.audioscrobbler_password = QtGui.QLineEdit()
self.audioscrobbler_password.setEchoMode(QtGui.QLineEdit.Password)
self.audioscrobbler_password.setText(settings.get_option('music/audioscrobbler_password', ''))
scrobblerLayout = QtGui.QFormLayout()
scrobblerLayout.addRow(_('Login') + ' : ', self.audioscrobbler_login)
scrobblerLayout.addRow(_('Password') + ' : ', self.audioscrobbler_password)
self.addWidgetFor('music_scrobbler', scrobblerLayout)
# --- Music filters ---
mfiltersWidget = FilterManager(settings.get_option('music/filters', {}))
self.widgets['music_filters'] = mfiltersWidget
mfiltersWidget.hide()
layout.addWidget(mfiltersWidget, 1)
# --- Videos section ---
videosLayout = QtGui.QFormLayout()
self.CB_video_playback_lib = QtGui.QComboBox()
self.CB_video_playback_lib.addItem('GStreamer')
#self.CB_video_playback_lib.addItem('MPlayer')
self.CB_video_playback_lib.addItem('VLC')
self.CB_video_playback_lib.addItem('Phonon')
self.CB_video_playback_lib.setCurrentIndex(libs[settings.get_option('videos/playback_lib', 'Phonon')])
videosLayout.addRow(_('Playback library') + ' : ', self.CB_video_playback_lib)
self.addWidgetFor('videos', videosLayout)
self.loadSection(section)
self.setLayout(mainLayout)
def accept(self):
print 'TO COMPLETE'
folders = []
for i in range(self.foldersModel.rowCount()):
folders.append((self.foldersModel.item(i, 0).text(), self.foldersModel.item(i, 1).checkState() == QtCore.Qt.Checked))
settings.set_option('music/folders', folders)
# --- Music settings --- :
settings.set_option('music/playback_lib', self.CB_music_playback_lib.currentText())
settings.set_option('music/panel_icon_size', int(self.CB_icon_size_panel_music.currentText()))
settings.set_option('music/use_performer', self.usePerformer.isChecked())
settings.set_option('music/filters', self.widgets['music_filters'].getConfig())
#Audioscrobbler settings :
settings.set_option('music/audioscrobbler_login', self.audioscrobbler_login.text())
settings.set_option('music/audioscrobbler_password', self.audioscrobbler_password.text())
#Videos settings
settings.set_option('videos/playback_lib', self.CB_video_playback_lib.currentText())
#for UCmodule in ('videos',):
#settings.set_option(UCmodule + '/indexed_extensions', self.widgets[UCmodule].extensions.text())
#settings.set_option(UCmodule + '/preload', self.widgets[UCmodule].preload.isChecked())
#settings.set_option(UCmodule + '/panel_icon_size', int(self.widgets[UCmodule].CB_icon_size_panel.currentText()))
QtGui.QDialog.accept(self)
def addWidgetFor(self, section, layout):
widget = QtGui.QWidget()
widget.setLayout(layout)
self.widgets[section] = widget
widget.hide()
self.widgetLayout.addWidget(widget, 1)
def loadSection(self, section):
self.activeWidget.hide()
self.activeWidget = self.widgets[section]
self.activeWidget.show()
def sectionActivated(self, index):
section = index.internalPointer().key
self.loadSection(section)
class TagsEditor(QtGui.QDialog):
'''
Éditeur de tags de fichiers musicaux
TODO indicator to show what we're editing
'''
def __init__(self, data):
QtGui.QDialog.__init__(self)
self.setWindowTitle(_('Tags editor'))
mainLayout = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
layout = QtGui.QFormLayout()
self.formLayout = layout
mainLayout.addLayout(layout)
mainLayout.addWidget(buttonBox)
self.setLayout(mainLayout)
single = type(data).__name__ == 'int';
if single:
layout.addRow(QtGui.QLabel(_('Single file mod')))
else:
layout.addRow(QtGui.QLabel(_('Container mod')))
self.tagsEntries = {}
def add_line_for(tag, text=''):
self.tagsEntries[tag] = QtGui.QLineEdit()
self.tagsEntries[tag].setText(str(text))
layout.addRow(_(tag) + " : ", self.tagsEntries[tag])
if type(data).__name__=='dict':
self.incomingData = data
self.cm_manager = CriterionManager()
mainLayout.insertWidget(1, self.cm_manager)
else:
piste_ID = data
self.loadTrackData(piste_ID)
if(self.incomingData != None):
for key in self.incomingData.iterkeys():
add_line_for(key, self.incomingData[key])
def accept(self):
#FIXME optimize process by saving file only once (only where all tags are processed)
try:
matchingTracks = (self.track,)
except AttributeError:
matchingTracks = elements.bdd.get_tracks(self.incomingData, self.cm_manager.get_config())
for track in matchingTracks:
for key in self.incomingData.iterkeys():
if(self.tagsEntries[key].text() != self.incomingData[key]):
track.set_tag(key, self.tagsEntries[key].text())
QtGui.QDialog.accept(self)
def loadTrackData(self, piste_ID):
self.track = elements.Track(int(piste_ID))
fichier = self.track.path
self.incomingData = self.track.get_tags()
self.formLayout.addRow(QtGui.QLabel(_('Path') + " : " + fichier))
class UCStructureHelper(QtGui.QDialog):
def __init__(self, module):
QtGui.QDialog.__init__(self)
self.module = module
self.setWindowTitle(_("Preparing for files moving"))
layout = QtGui.QFormLayout()
self.folderButton = QtGui.QPushButton(settings.get_option(module + '/structure_folder', '~/' + module + '/Bullseye'))
layout.addRow(_('Structure root'), self.folderButton)
self.folderButton.clicked.connect(self.changeDirectory)
self.modeCB = QtGui.QComboBox()
layout.addRow(_('Main source'), self.modeCB)
self.showAntagonistc = QtGui.QCheckBox()
layout.addRow(_('Show antagonistic'), self.showAntagonistc)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addLayout(layout, 1)
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
layout.addWidget(buttonBox)
self.setLayout(mainLayout)
def exec_(self):
if QtGui.QDialog.exec_(self) and QtGui.QDialogButtonBox.Ok:
folderPath = self.folderButton.text()
settings.set_option(self.module + '/structure_folder', folderPath)
return folderPath
else:
return None
def changeDirectory(self):
folderPath = QtGui.QFileDialog.getExistingDirectory(self)
self.folderButton.setText(folderPath) | [
"Maitre.Piccolo@gmail.com"
] | Maitre.Piccolo@gmail.com |
d9b2748d71c6625f047387b40d158afc6042ea83 | 8a780de2929d5d92bd9682afa0fcfdf867d9e11e | /py/hello.py | 188fa9e4203a8c60dea98d34b0e66a68ad3ec7bc | [] | no_license | jennylia/pythonData | 64a71a9ba0917913905113c68418bc71faef4ff1 | 6fa04add6c740c0499166e8d89bdee27343613ae | refs/heads/master | 2021-01-01T05:51:17.025900 | 2015-05-09T18:08:12 | 2015-05-09T18:08:12 | 35,136,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | print "hello"
import sys
import os
for i in range(3):
print i
# Let us try using the command argv like in c
#arg1 = sys.argv[1]
#arg2 = sys.argv[2]
#
#print arg1
#print arg2
x = raw_input("enter something")
print x
| [
"jenny.lian@ericsson.com"
] | jenny.lian@ericsson.com |
3ca976c315126de5355e9a6a4a798b0fc42eacf5 | 7e8b561ad29b18457ba1cba17bcfd98db84a5f96 | /limitRecursion.py | d3e06bec61f0e20e78ab222bf5ce50d6ae43baa5 | [] | no_license | ErikssonHerlo/JPR_Editor | 129c126a5bdac0400ef554f4fab66da60fd62e47 | a94339d75e7fed5fa31acbb3ec6d83b144799fe3 | refs/heads/main | 2023-05-31T18:00:00.434657 | 2021-07-05T05:56:56 | 2021-07-05T05:56:56 | 378,616,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | import sys
print(sys.getrecursionlimit())
sys.setrecursionlimit(1500)
print(sys.getrecursionlimit())
"""
Creditos:
Jose Francisco Puac - Repositorio del Curso
Se utilizo como una base para el proyecto
Eriksson Hernández - Desarollador
""" | [
"erikssonhernandez201830459@cunoc.edu.gt"
] | erikssonhernandez201830459@cunoc.edu.gt |
ccbdd6b5052306c828cb0f31d51e2593ac61e4d7 | ac98625836b837b6cf6c792b0c2af7aa7557dfee | /run_test.py | 4758c1eb3491cc98904687354ceaebb339cfe1d3 | [] | no_license | Vladi-shar/DataStracturesTests192 | 9ff22113ec517e3dc1a5bd4f5173c502c78b6132 | f5a8631bbb5ecd1ee321dee9d7a8c40434c1ba3c | refs/heads/master | 2020-05-19T09:04:26.083560 | 2019-05-06T05:55:43 | 2019-05-06T05:55:43 | 184,938,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,270 | py | #!/usr/bin/env python
import os
import shutil
import sys
def clear_dir(test_project):
for the_file in os.listdir(test_project):
file_path = os.path.join(test_project, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
def replace_size_with_getSize():
os.rename("FloorsArrayList.java", "FloorsArrayListOriginal.java")
with open("FloorsArrayListOriginal.java") as f:
new_text = f.read().replace('size()', 'getSize()')
with open("FloorsArrayList.java" , "w") as f:
f.write(new_text)
def transfer_student_files(test_project):
curr_path = os.getcwd()
replace_size_with_getSize()
print("Copying student files into test project...")
for file in os.listdir(curr_path):
if file == "DynamicSet.java" or file == "FloorsArrayLink.java" or file == "FloorsArrayList.java":
shutil.copyfile(curr_path + "/" + file, test_project + "/" + file)
print("Finished copying files.")
def run_mvn_tests(tester, test_num, single_test):
curr_path = os.getcwd()
# print(curr_path)
os.chdir(tester)
# print("Running tests...")
if "Correctness" in tester:
if single_test:
test_name = get_test_name(test_num)
print("Running single correctness test: " + test_name + "...")
os.system("./mvnw -Dtest=" + test_name + " test > " + curr_path + "/correctness_" + test_name +"_testlog.txt")
else:
print("Running Correctness tests...")
os.system("./mvnw test > " + curr_path + "/correctness_testlog.txt")
elif "Runtime" in tester:
print("Running Runtime test...")
os.system("./mvnw test > " + curr_path + "/runtime_testlog.txt")
os.chdir(curr_path)
print("Finished running tests.")
print()
def print_aux_result():
passed = False
filepath = os.getcwd() + '/correctness_testlog.txt'
with open(filepath) as fp:
line = fp.readline()
while line:
if '[INFO] Results:' in line:
passed = True
line = fp.readline()
if "Tests run: 357" in line and passed:
passed = False
line = fp.readline()
if passed:
if "[ERROR]" in line:
print(line)
line = fp.readline()
# todo elif runtime message
def print_aux_result_runtime():
passed = False
filepath = os.getcwd() + '/runtime_testlog.txt'
with open(filepath) as fp:
line = fp.readline()
while line:
if "==RunTimeTest=====================================" in line:
passed = True
line = fp.readline()
if "==Done============================================" in line:
passed = False
line = fp.readline()
if passed:
print(line)
line = fp.readline()
def get_test_name(test_num):
print("test num: " + test_num)
switcher = {
'0': "LinkConstructorTest",
'1': "LinkSetterGetterTest",
'2': "ListConstructorTest",
'3': "ListLookupTest",
'4': "ListInsertTest",
'5': "ListRemoveTest",
'6': "ListSortedTest",
'7': "ListMinimumMaximumTest",
'8': "ListSuccessorPredecessorTest",
'9': "ListArraySizesInvariantTest",
}
ret = switcher.get(test_num)
return ret
def main():
tester = sys.argv[1]
test_project = tester + "/src/main/java"
clear_dir(test_project)
transfer_student_files(test_project)
if len(sys.argv) > 2:
if sys.argv[2] == "ordered":
# print ("Running tests in order")
for x in range(0, 10):
run_mvn_tests(tester, str(x), True)
else:
# print("single test")
run_mvn_tests(tester, sys.argv[2], True)
else:
run_mvn_tests(tester, 0, False)
if "Correctness" in test_project:
print_aux_result()
elif "Runtime" in test_project:
print_aux_result_runtime()
if __name__ == '__main__':
main()
| [
"shargvla@post.bgu.ac.il"
] | shargvla@post.bgu.ac.il |
df2ae295c54fb683b759cc4c1e136dd1eb1ff174 | f950df22b5b43c7c85890615fe0ac1b503d14cbc | /m2m/settings.py | a06fd24ba3477d2c62ad6c20a5a3370d5ddb3ca7 | [] | no_license | diegoalfonsodiaz/examenfinalPrograComercial | 40d68a39481f921223a49e71d667343e1c1f90c5 | a37e2b715ff78c509ca8c89061dc50a00fc6c4c6 | refs/heads/master | 2020-04-07T09:26:43.924011 | 2018-11-19T17:44:23 | 2018-11-19T17:44:23 | 158,252,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | """
Django settings for m2m project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6sux*i^0oe$mk8i#m8w-e91t(i7pw&-kb20s7ir+o5gt&%=4co'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'notas',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'm2m.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'm2m.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"diegoalfonsodiaz@gmail.com"
] | diegoalfonsodiaz@gmail.com |
2c5ca4c4b7688e09c4ba04af67b07d2117303eb6 | 5fcb4c418f0a5a1455b4a396d3d9c516f5f76ca6 | /黑鹰视频/venv/Scripts/django-admin.py | 084e68ed1c20a44f2e2350979fb2fc5dae611c4e | [] | no_license | ooyuanyu/blackeagle | ab984366f6b1895312bfa23f96b162c88505c3d4 | c16784d816444dd4d2a5a70f05151c71b64f4646 | refs/heads/master | 2020-03-29T21:31:18.970428 | 2018-10-08T07:40:22 | 2018-10-08T07:40:22 | 150,371,315 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #!E:\项目\黑鹰视频\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"825031736@qq.com"
] | 825031736@qq.com |
c8de73340647586a8a6510b8ed4c23440264bcb1 | 424df0d19d35daa9ffe8c518308b250f6abbbf5a | /node_modules/sqlite3/build/config.gypi | 8d997eaae3149de11ae8d8e4f0b2c2fc3e852b14 | [
"BSD-3-Clause"
] | permissive | jmanuel1407/e-nutrition | 60e563a294407784c20f372edcf57ae8b9024ed1 | 90786196ed4fddf0aa2ea87b64d70e68a7f57bad | refs/heads/master | 2021-01-10T09:53:39.940095 | 2015-12-18T20:05:05 | 2015-12-18T20:05:05 | 44,704,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,450 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 48,
"host_arch": "ia32",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "ia32",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/jannet/.node-gyp/0.10.37",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/home/jannet/e-nutrition/node_modules/sqlite3/lib/binding/node-v11-linux-ia32/node_sqlite3.node",
"module_name": "node_sqlite3",
"module_path": "/home/jannet/e-nutrition/node_modules/sqlite3/lib/binding/node-v11-linux-ia32",
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.28 node/v0.10.37 linux ia32",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/jannet/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/jannet/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "2",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "0.10.37",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/jannet/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}
| [
"letty_217@hotmail.com"
] | letty_217@hotmail.com |
5dedf4b23a5cd72cdc5babcf443ed3b1082c6ca4 | 2ea7c15ee5ff6a4d1cbbe41c97c8ef2175c74f8f | /models/common/base.py | 9617cea6207d0d637fd35e393c1d031f2995f89a | [] | no_license | zhoupp0518/squad-gated-rep | 2eb63a5de1a296730c062ed4c9b7fa37b82235f3 | a085c25c8dc50340006cd597aef1a7609e1a7239 | refs/heads/master | 2020-09-10T01:31:21.638288 | 2017-10-03T14:25:31 | 2017-10-05T07:38:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,429 | py | import copy
import json
import os
from typing import Any, Dict, List, Tuple
import tensorflow as tf
import numpy as np
from common.data.dataset.proc_dataset import ProcDataset
from common.data.evaluate import evaluate
from common.data.pipeline.embedder import load_word_embedder, load_small_char_embedder, load_char_embedder
from common.data.pipeline.iterator import BatchIter, SampleBatch
from common.util.cache import cache
from common.util.printing import print_epoch_summary, ordinal
from common.util.time import now
class BaseModel(object):
def __init__(self, config: Dict[str, Any], datasets: List[ProcDataset]):
self.config = copy.deepcopy(config)
print('running with config: ', self.config)
self.conf_random_seed = int(self.config['random_seed'])
self.conf_filter_short_questions = bool(self.config['filter_short_questions'])
self.conf_save_meta_graph = bool(self.config['save_meta_graph'])
self.conf_patience = int(self.config['patience'])
self.conf_layer_size = int(self.config['layer_size'])
self.conf_att_size = int(self.config['att_size'])
self.conf_char_rnn_size = int(self.config['char_rnn_size'])
self.conf_dropout = float(self.config['dropout'])
self.conf_max_par_length = int(self.config['max_par_length'])
self.conf_max_qu_length = int(self.config['max_qu_length'])
self.conf_max_char_length = int(self.config['max_char_length'])
self.conf_rnn_parallelity = int(self.config['rnn_parallelity'])
self.conf_iter_bucket_size = int(self.config['iter_bucket_size'])
self.conf_batch_size = int(self.config['batch_size'])
self.conf_apply_grads_interval = int(self.config['apply_grads_interval'])
self.conf_optimizer = self.config['optimizer']
self.conf_opt_lr = float(self.config['opt_lr'])
self.conf_opt_epsilon = float(self.config['opt_epsilon'])
self.conf_opt_adam_beta1 = float(self.config['opt_adam_beta1'])
self.conf_opt_adam_beta2 = float(self.config['opt_adam_beta2'])
self.conf_opt_adadelta_rho = float(self.config['opt_adadelta_rho'])
self.conf_lr_reduction_criterion = self.config['lr_reduction_criterion'].upper()
if self.conf_lr_reduction_criterion.upper() != 'F1' and self.conf_lr_reduction_criterion != 'EM':
raise Exception('illegal value for parameter lr_reduction_criterion: {}, allowed values: F1, EM'
.format(self.conf_lr_reduction_criterion))
self.conf_cache_dir = self.config['cache_dir']
self.conf_weights_dir = self.config['weights_dir']
self.conf_placement = self.config['placement']
self.word_embedder = cache(lambda: load_word_embedder(datasets, self.config['data_dir']),
self.config['cache_dir'], '{}_{}_{}_{}_{}'.format(
len(datasets), len(datasets[0].documents), len(datasets[-1].documents),
datasets[0].documents[0].raw.title, datasets[-1].documents[-1].raw.title
))
if bool(self.config['load_embeddings_from_file']):
self.char_embedder = cache(lambda: load_char_embedder(self.config['data_dir']),
self.config['cache_dir'], 'char_embedder')
else:
self.char_embedder = cache(lambda: load_small_char_embedder(self.config['data_dir'], self.conf_layer_size),
self.config['cache_dir'], 'small_char_embedder')
self.graph = tf.Graph()
with self.graph.as_default():
self.num_resets = tf.Variable(initial_value=0, name='num_resets', trainable=False, dtype=tf.int32)
# child functions
self.dropout_keep_prob = None
self.predictions = None
self.loss = None
self.graph = self.build_graph(self.graph)
self.check_interface()
# Add all the other common code for the initialization here
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
self.session = tf.Session(config=sess_config, graph=self.graph)
with self.graph.as_default():
self.bad_iter_count = tf.Variable(initial_value=0, name='bad_iter_count', trainable=False, dtype=tf.int32)
self.increment_bad_iter_count_op = tf.assign(self.bad_iter_count,
tf.add(self.bad_iter_count, tf.constant(1, dtype=tf.int32)))
self.best_f1_score = tf.Variable(initial_value=-1.0, name='best_f1_score', trainable=False, dtype=tf.float32)
self.best_em_score = tf.Variable(initial_value=-1.0, name='best_em_score', trainable=False, dtype=tf.float32)
self.prev_f1_score = tf.Variable(initial_value=-1.0, name='prev_f1_score', trainable=False, dtype=tf.float32)
self.prev_em_score = tf.Variable(initial_value=-1.0, name='prev_em_score', trainable=False, dtype=tf.float32)
self.best_epoch_id = tf.Variable(initial_value=1, name='best_epoch_id', trainable=False, dtype=tf.int32)
static_graph_vars, saveable_graph_vars, optimizer_vars = self.var_groups()
self.saver = tf.train.Saver(var_list=[*saveable_graph_vars, *optimizer_vars], save_relative_paths=True)
self.static_var_init_op = tf.variables_initializer(static_graph_vars)
self.saveable_var_init_op = tf.variables_initializer(saveable_graph_vars)
self.optimizer_init_op = tf.variables_initializer(optimizer_vars)
self.print_trainable_params()
self.loaded_epoch_id = self.init_weights()
def check_interface(self):
if self.dropout_keep_prob is None:
raise Exception('self.dropout_keep_prob is None, needs to be set in child model')
if self.predictions is None:
raise Exception('self.predictions is None, needs to be set in child model')
if self.loss is None:
raise Exception('self.loss is None, needs to be set in child model')
def build_graph(self, graph) -> tf.Graph:
raise NotImplementedError('The build_graph function must be overriden by the specialized model')
def build_inputs(self):
self.dropout_keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='dropout_keep_prob')
self.par_words = tf.placeholder(name='input_par_words', dtype=tf.int32,
shape=[self.conf_batch_size, None])
self.par_num_words = tf.placeholder(name='input_par_num_words', dtype=tf.int32,
shape=[self.conf_batch_size])
self.par_chars = tf.placeholder(name='input_par_chars', dtype=tf.int32,
shape=[self.conf_batch_size, None, None])
self.par_num_chars = tf.placeholder(name='input_par_num_chars', dtype=tf.int32,
shape=[self.conf_batch_size, None])
self.qu_words = tf.placeholder(name='input_qu_words', dtype=tf.int32,
shape=[self.conf_batch_size, None])
self.qu_num_words = tf.placeholder(name='input_qu_num_words', dtype=tf.int32,
shape=[self.conf_batch_size])
self.qu_chars = tf.placeholder(name='input_qu_chars', dtype=tf.int32,
shape=[self.conf_batch_size, None, None])
self.qu_num_chars = tf.placeholder(name='input_qu_num_chars', dtype=tf.int32,
shape=[self.conf_batch_size, None])
self.answer_labels = tf.placeholder(name='input_answer_labels', dtype=tf.int32,
shape=[self.conf_batch_size, 2])
def build_optimizer(self, learning_rate: float, reuse: bool=False) -> None:
print('building optimizer {} with learning rate {} and reuse {}'
.format(self.conf_optimizer, learning_rate, reuse))
with tf.variable_scope('OPTIMIZER', reuse=reuse):
if self.conf_optimizer == 'Adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=self.conf_opt_adam_beta1, beta2=self.conf_opt_adam_beta2,
epsilon=self.conf_opt_epsilon)
elif self.conf_optimizer == 'Adadelta':
self.optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate,
rho=self.conf_opt_adadelta_rho,
epsilon=self.conf_opt_epsilon)
self.optimizer_op = self.optimizer.minimize(self.loss)
def reset_optimizer(self, learning_rate: float, hard_reset: bool) -> None:
print('resetting optimizer, with learning rate {}, hard reset: {}'.format(learning_rate, hard_reset))
with self.graph.as_default():
self.build_optimizer(learning_rate, True)
_, _, optimizer_vars = self.var_groups()
self.optimizer_init_op = tf.variables_initializer(optimizer_vars)
# relevant only for optimizers like Adam or Adadelta that keep track of moments per variable
if hard_reset:
self.session.run([self.optimizer_init_op])
def var_groups(self) -> Tuple[List[tf.Variable], List[tf.Variable], List[tf.Variable]]:
static_graph_vars = []
saveable_graph_vars = []
optimizer_vars = []
for var in tf.global_variables():
if 'OPTIMIZER' in var.name:
optimizer_vars.append(var)
elif 'word_embeddings' in var.name:
static_graph_vars.append(var)
else:
saveable_graph_vars.append(var)
return static_graph_vars, saveable_graph_vars, optimizer_vars
def print_trainable_params(self) -> None:
with self.graph.as_default():
total_params = 0
for var in tf.trainable_variables():
shape = var.get_shape()
var_params = 1
for dim in shape:
var_params *= dim.value
total_params += var_params
print('[{}] there are {} total trainable parameters in this model'.format(now(), total_params))
def to_feed_dict(self, batch: SampleBatch, use_dropout: bool) -> Tuple[List[str], Dict[tf.Variable, np.array]]:
if use_dropout:
dropout_keep_prob = self.conf_dropout
else:
dropout_keep_prob = 1.0
return batch.qu_ids, {
self.dropout_keep_prob: dropout_keep_prob,
self.par_words: batch.par_words,
self.par_num_words: batch.par_num_words,
self.par_chars: batch.par_chars,
self.par_num_chars: batch.par_num_chars,
self.qu_words: batch.qu_words,
self.qu_num_words: batch.qu_num_words,
self.qu_chars: batch.qu_chars,
self.qu_num_chars: batch.qu_num_chars,
self.answer_labels: batch.answer_labels,
}
def init_weights(self) -> int:
checkpoint = tf.train.get_checkpoint_state(self.conf_weights_dir)
with self.graph.as_default():
self.session.run(self.static_var_init_op)
if checkpoint is not None:
print('Loading the model {} from folder {}'.format(checkpoint.model_checkpoint_path, self.conf_weights_dir))
self.saver.restore(self.session, checkpoint.model_checkpoint_path)
self.reset_optimizer(self.conf_opt_lr / 2.0 ** (self.session.run(self.num_resets)), False)
return int(checkpoint.model_checkpoint_path.split('-')[-1].split('.')[0])
else:
print('No existing checkpoint found in folder: %s' % self.conf_weights_dir)
self.session.run([self.saveable_var_init_op, self.optimizer_init_op])
return -1
def load_weights(self, epoch_id: int) -> None:
save_path = os.path.join(self.conf_weights_dir, 'model-{}'.format(epoch_id))
print('Loading the model from file: {}'.format(save_path))
self.session.run(self.static_var_init_op)
self.saver.restore(self.session, save_path)
def train_epoch(self, dataset: ProcDataset, epoch_id: int) -> None:
batch_iter = self.create_iter(dataset, True)
num_batches = batch_iter.num_valid_batches()
info_interval = num_batches // 10
batch_counter, loss_sum = 0, 0.0
for batch_id in range(num_batches):
_, feed_dict = self.to_feed_dict(batch_iter.__next__(), True)
iterations = (batch_id + 1) * self.conf_batch_size
loss_val, _ = self.session.run([self.loss, self.optimizer_op], feed_dict=feed_dict)
batch_counter += 1
# noinspection PyTypeChecker
loss_sum += float(np.sum(loss_val, 0))
if (batch_id + 1) % info_interval == 0:
train_loss = loss_sum / batch_counter
print('[{} | {} | {}] train loss: {}'.format(now(), epoch_id, iterations, train_loss))
batch_counter, loss_sum = 0, 0.0
def train(self, train_dataset: ProcDataset, valid_dataset: ProcDataset, max_epochs=1000, patience=0) -> None:
for epoch_id in range(self.loaded_epoch_id + 1, max_epochs):
print("[{} | {}] current best em: {}, current best f1: {}, current bad iter count {}, current num resets {}"
.format(now(), epoch_id, *self.session.run([self.best_em_score, self.best_f1_score,
self.bad_iter_count, self.num_resets])))
self.train_epoch(train_dataset, epoch_id)
em_score, f1_score = evaluate(valid_dataset, self.infer(valid_dataset))
print_epoch_summary(epoch_id, em_score, f1_score)
self.session.run(self.increment_bad_iter_count_op)
bad_iter_count, best_f1_score, best_em_score, best_epoch_id, num_resets = \
self.session.run([self.bad_iter_count, self.best_f1_score, self.best_em_score,
self.best_epoch_id, self.num_resets])
if f1_score > best_f1_score and self.conf_lr_reduction_criterion == 'F1' or \
em_score > best_em_score and self.conf_lr_reduction_criterion == 'EM':
print('[{} | {}] new best iteration!'.format(now(), epoch_id))
self.session.run([
tf.assign(self.bad_iter_count, 0),
tf.assign(self.best_f1_score, f1_score),
tf.assign(self.best_em_score, em_score),
tf.assign(self.best_epoch_id, epoch_id),
])
elif bad_iter_count > patience:
num_resets = self.session.run(self.num_resets)
print('[{} | {}] resetting the {} time'.format(now(), epoch_id, ordinal(num_resets + 1)))
self.init_weights()
self.session.run([
tf.assign(self.bad_iter_count, 0),
tf.assign(self.num_resets, num_resets + 1),
])
self.reset_optimizer(self.conf_opt_lr / 2.0 ** (num_resets + 1), True)
else:
print('[{} | {}] bad iteration, not doing anything yet, curr_patience {}, patience'
.format(now(), bad_iter_count, patience))
self.save(epoch_id)
def save(self, epoch_id) -> str:
tf.logging.set_verbosity('DEBUG')
model_file_stump = os.path.join(self.conf_weights_dir, 'model')
print('[{}] Saving to {} with epoch_id {}'.format(now(), model_file_stump, epoch_id))
save_path = self.saver.save(self.session, model_file_stump, epoch_id,
write_meta_graph=self.conf_save_meta_graph)
conf_json = os.path.join(self.conf_weights_dir, 'config.json')
if not os.path.isfile(conf_json):
with open(conf_json, 'w') as f:
json.dump(self.config, f)
print('[{}] finished saving!'.format(now()))
tf.logging.set_verbosity('WARN')
return save_path
def infer(self, dataset: ProcDataset) -> Dict[str, str]:
print('[{}] starting inference ...'.format(now()))
dataset_iter = self.create_iter(dataset, False)
num_samples = dataset_iter.num_samples()
index_results = {}
for first_sample_index in range(0, num_samples + self.conf_batch_size, self.conf_batch_size):
qu_ids, feed_dict = self.to_feed_dict(dataset_iter.__next__(), False)
pred_val = self.session.run(self.predictions, feed_dict=feed_dict)
for i in range(0, self.conf_batch_size):
# noinspection PyTypeChecker
index_results[qu_ids[i]] = (int(np.argmax(pred_val[i, 0])), int(np.argmax(pred_val[i, 1])))
text_results = {}
for doc in dataset.documents:
for par in doc.paragraphs:
for qu in par.questions:
first_token_index, last_token_index = index_results[qu.raw.id]
first_token_index = min([first_token_index, len(par.tokens) - 1])
last_token_index = min([last_token_index, len(par.tokens) - 1])
char_offset_start = par.tokens[first_token_index].char_offset
char_offset_end = par.tokens[last_token_index].char_offset_end()
text_results[qu.raw.id] = par.raw.context[char_offset_start:char_offset_end]
return text_results
def create_iter(self, dataset: ProcDataset, discard_invalid_samples: bool) -> BatchIter:
return BatchIter(dataset, self.word_embedder, self.char_embedder, self.conf_batch_size,
self.conf_max_par_length, self.conf_max_qu_length, self.conf_max_char_length,
discard_invalid_samples, self.conf_filter_short_questions, self.conf_iter_bucket_size)
def apply_dropout(self, var: tf.Variable) -> tf.Variable:
return tf.nn.dropout(var, self.dropout_keep_prob)
def apply_dropout_to_list(self, vars: List[tf.Variable]) -> List[tf.Variable]:
return [self.apply_dropout(v) for v in vars]
| [
"mail.duer@gmail.com"
] | mail.duer@gmail.com |
6a54266ad2841e62e545a21267385cd18a73cc11 | 339ece4ecc7abd42a11f1bd78bb695981dae3f59 | /circle.py | 40e1f69e528138111a33ebbc06a7617e1d2b7730 | [] | no_license | Somesh1501/Codes | 05c1d54f51ab8316de7784e08d5271e207088894 | 08592e06d2d7f10ccb32caf7c3e2915c996d98e8 | refs/heads/master | 2021-09-24T10:51:57.834752 | 2018-10-08T17:03:36 | 2018-10-08T17:03:36 | 109,552,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | import turtle
b=turtle.Turtle()
def de():
for i in range(50):
b.fd(100)
b.lt(31)
def ed():
for i in range(30):
for i in range(50):
b.fd(100)
b.lt(135)
def gh():
de()
dj()
def dj():
de()
b.fd(100)
ed()
def ds():
for i in range (2):
dj()
dj()
| [
"someshdk1501@gmail.com"
] | someshdk1501@gmail.com |
1a1b7696f4ce2e13094a1f79e092e53fcc9eb461 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j15376-1702/sdB_galex_j15376-1702_lc.py | e2d309905a69f149eca005da69c193f1c0718906 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[234.417917,-17.037508], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_galex_j15376-1702/sdB_galex_j15376-1702_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.