blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1f2bc27194e8f706625493989d95c5335783f9f | fc58366ed416de97380df7040453c9990deb7faa | /daoliagent/services/arp.py | 7d9cf1622fdcd08505553150ef2cdef052d75232 | [
"Apache-2.0"
] | permissive | foruy/openflow-multiopenstack | eb51e37b2892074234ebdd5b501b24aa1f72fb86 | 74140b041ac25ed83898ff3998e8dcbed35572bb | refs/heads/master | 2016-09-13T08:24:09.713883 | 2016-05-19T01:16:58 | 2016-05-19T01:16:58 | 58,977,485 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,005 | py | from oslo.config import cfg
from ryu.lib.packet import arp
from ryu.lib.packet import ethernet
from ryu.lib.packet import packet
from ryu.lib import addrconv
from ryu.ofproto import ether
from daoliagent.services.base import PacketBase
from daoliagent.lib import SMAX
from daoliagent.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class PacketARP(PacketBase):
priority = 5
def _arp(self, msg, dp, in_port, pkt_ether, pkt_arp, address):
ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)
actions = [ofp_parser.OFPActionSetField(eth_src=address),
ofp_parser.OFPActionOutput(ofp.OFPP_IN_PORT)]
match = ofp_parser.OFPMatch(
in_port=in_port, eth_type=ether.ETH_TYPE_ARP,
arp_spa=pkt_arp.src_ip, arp_tpa=pkt_arp.dst_ip)
LOG.debug("arp response %(src_mac)s-%(src_ip)s -> %(dst_mac)s-%(dst_ip)s",
{'src_mac': address, 'src_ip': pkt_arp.dst_ip,
'dst_mac': pkt_arp.src_mac, 'dst_ip': pkt_arp.src_ip})
self.add_flow(dp, match, actions)
self.packet_out(msg, dp, actions)
def _redirect(self, msg, dp, in_port, pkt_ether, pkt_arp, output):
ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)
actions = [ofp_parser.OFPActionOutput(output)]
match = ofp_parser.OFPMatch(
in_port=in_port, eth_type=ether.ETH_TYPE_ARP,
arp_spa=pkt_arp.src_ip, arp_tpa=pkt_arp.dst_ip)
self.add_flow(dp, match, actions)
self.packet_out(msg, dp, actions)
def run(self, msg, pkt_ether, pkt_arp, gateway, **kwargs):
dp = msg.datapath
in_port = msg.match['in_port']
ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)
src_mac = pkt_arp.src_mac
dst_ip = pkt_arp.dst_ip
LOG.debug("arp request %(src_mac)s-%(src_ip)s -> %(dst_mac)s-%(dst_ip)s",
{'src_mac': src_mac, 'src_ip': pkt_arp.src_ip,
'dst_mac': pkt_arp.dst_mac, 'dst_ip': dst_ip})
if gateway.int_dev != gateway.ext_dev:
int_port = self.port_get(dp, devname=gateway.int_dev)
tap_port = self.port_get(dp, devname=gateway.vint_dev)
if not int_port or not tap_port:
return True
if in_port == int_port.port_no:
if pkt_arp.dst_ip == gateway['int_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, tap_port.port_no)
return True
if in_port == tap_port.port_no:
if pkt_arp.src_ip == gateway['int_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, int_port.port_no)
return True
port = self.port_get(dp, devname=gateway['ext_dev'])
if not port:
return True
if in_port == port.port_no:
if pkt_arp.dst_ip == gateway['ext_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, ofp.OFPP_LOCAL)
return True
if in_port == ofp.OFPP_LOCAL:
if pkt_arp.src_ip == gateway['ext_ip']:
self._redirect(msg, dp, in_port, pkt_ether, pkt_arp, port.port_no)
return True
num_ip = addrconv.ipv4._addr(dst_ip).value
if pkt_arp.opcode != arp.ARP_REQUEST:
LOG.debug("unknown arp op %s", pkt_arp.opcode)
elif (num_ip & 0x0000FFFF == SMAX - 1):
#br_port = self.port_get(dp, devname=gateway['vext_dev'])
#self._arp(dp, in_port, pkt_ether, pkt_arp, br_port.hw_addr)
self._arp(msg, dp, in_port, pkt_ether, pkt_arp, gateway['vint_mac'])
else:
servers = self.db.server_get_by_mac(src_mac, dst_ip, False)
if servers['src'] and servers['dst']:
self._arp(msg, dp, in_port, pkt_ether, pkt_arp, servers['dst'].mac_address)
else:
self._arp(msg, dp, in_port, pkt_ether, pkt_arp, gateway['vint_mac'])
| [
"wenxiang.wang1204@gmail.com"
] | wenxiang.wang1204@gmail.com |
7eba5f147fc621a898d8f0d6fad9215a40574729 | fc58b26212f6133839b12f8fd98cbd5e984629ba | /app/app.py | 12ce5d736fa08540be4030741477a3d72400ec53 | [] | no_license | nytr0gen/hts | e87062551f0003b40d2630d7186c7a32b019a18c | 2d7866707db6b61dacf42507bffca51f3096843b | refs/heads/master | 2020-06-27T03:41:17.491656 | 2017-07-18T05:36:11 | 2017-07-18T05:36:11 | 97,043,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | from flask import Flask, request, jsonify
from bson.json_util import dumps
from config import db, config
app = Flask('hts')
# main flask api route
# /items/?subreddit=<subreddit>&from=<t1>&to=<t2>
# /items/?subreddit=<subreddit>&from=<t1>&to=<t2>&keyword=<kw>
@app.route('/items/')
def items():
subreddit = request.args.get('subreddit')
q_from = request.args.get('from')
q_to = request.args.get('to')
if (subreddit is None or len(subreddit) == 0 or
q_from is None or q_to is None):
data = dict(error='Invalid query')
return jsonify(data), 400
try:
q_from = int(q_from)
q_to = int(q_to)
except ValueError:
data = dict(error='Invalid from or to in query')
return jsonify(data), 400
# query filter by subreddit, q_from, q_to
query = {
'subreddit': subreddit,
'created': { # am presupus [from, to]
'$gte': q_from,
'$lte': q_to,
}
}
# add keyword to query filter
keyword = request.args.get('keyword') # optional
if keyword is not None:
# works only for complete words
# if a sentence contains "word" and you look for "ord", it will result
# only in items containing "ord".
# $regex can be used for partial words
query['$text'] = { '$search': keyword }
# reverse order
c_items = db.items.find(query).sort('created', -1)
# subreddit not found
if c_items.count() == 0:
data = dict(error='Query Not Found')
return jsonify(data), 404
'''
Data returned by the web server should be in JSON format, sorted in reverse chronological order.
For simplicity, all parameters are mandatory, the server should respond with an error if they're missing.
'''
# TODO de facut paginare
data = list(c_items)
return app.response_class(
response=dumps(data),
status=200,
mimetype='application/json'
)
if __name__ == '__main__':
app.run(debug=config['app']['debug'],
host=config['app']['host'],
port=config['app']['port'])
| [
"nytr0gen.george@gmail.com"
] | nytr0gen.george@gmail.com |
89fc6dc680c2a7f8d24f9378f69e90655b89aa98 | aa08a55c48016ae948f3e4619b1d300914312f61 | /web_flask/6-number_odd_or_even.py | 2be8bbf65bc27a0e5484c5b0dd5cfc5c1c734f63 | [] | no_license | DiegoSusviela/AirBnB_clone_v2 | 5afda7dce3f57bf42dbf78129b0942e803c5ded6 | 87e4cc5c9affbe329bd8e816ddfc58e73387aaae | refs/heads/main | 2023-08-14T10:06:25.567140 | 2021-09-21T20:00:44 | 2021-09-21T20:00:44 | 405,736,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | #!/usr/bin/python3
"""starts flask"""
from flask import Flask, abort, render_template
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_hbnb():
"""returns"""
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb():
"""returns"""
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def cfun(text):
"""returns"""
return 'C %s' % text.replace("_", " ")
@app.route('/python', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def pythoncool(text='is cool'):
"""returns"""
return 'Python ' + text.replace('_', ' ')
@app.route('/number/<n>', strict_slashes=False)
def numver(n):
"""returns"""
try:
return '%d is a number'.format(int(n))
except:
abort(404)
@app.route('/number_template/<n>', strict_slashes=False)
def number_templates(n):
"""returns"""
try:
return render_template('5-number.html', n=int(n))
except:
abort(404)
@app.route('/number_odd_or_even/<n>', strict_slashes=False)
def numbersandevenness(n):
"""display a HTML page only if n is an integer"""
try:
return render_template('6-number_odd_or_even.html', n=int(n))
except:
abort(404)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| [
"dieguitosus@hotmail.com"
] | dieguitosus@hotmail.com |
af881679204b1449f665ea8edb98563261127fb5 | c8d5c879b12e124c6840b90afbdc8b928227efa4 | /my_unet_v2_loc.py | 99b9a8217856fd8c0b3b839f8b8fc4a63f83af08 | [
"MIT"
] | permissive | yimejky/fiit-dp-unet-visualiser | 5828b52e8b0fd3c8fec4c940ba077d3414cdeab4 | 835190c771a1b4c0a1640b10f4f05dc1a129d706 | refs/heads/master | 2023-02-22T04:11:14.058425 | 2021-01-01T15:27:45 | 2021-01-01T15:27:45 | 268,133,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,721 | py |
import sys
sys.path.append('../')
from pycore.tikzeng import *
from pycore.blocks import *
def block(conv_num, pool_num, in_channels, out_channels, height, height_out, to=None, width=2, width_scaller=2):
cn = conv_num
pn = pool_num
if to is None:
offset = "(1,0,0)"
else:
offset = "(-1,0,0)"
if to is None:
to = f"(pool{pn-1}-east)"
return [
to_Conv(f"conv{cn}", '', in_channels, offset=offset, to=to, height=height, depth=height, width=width),
to_Conv(f"conv{cn+1}", '', out_channels, offset="(0,0,0)", to=f"(conv{cn}-east)", height=height, depth=height, width=width*width_scaller),
to_Pool(f"pool{pn}", offset="(0,0,0)", to=f"(conv{cn+1}-east)", height=height_out, depth=height_out, width=1),
]
def unblock(conv_num, pool_num, in_channels, out_channels, height, to=None, width=2, width_scaller=2):
cn = conv_num
pn = pool_num
if to is None:
to = f"(unconv{cn-1}-east)"
return [
to_UnPool(f"unpool{pn}", offset="(1,0,0)", to=to, height=height, depth=height, width=1),
to_Conv(f'unconv{cn}', '', in_channels, offset="(0,0,0)", to=f"(unpool{pn}-east)", height=height, depth=height, width=width*width_scaller),
to_Conv(f'unconv{cn+1}', '', out_channels, offset="(0,0,0)", to=f"(unconv{cn}-east)", height=height, depth=height, width=width),
]
arch = [
to_head( '..' ),
to_cor(),
to_begin(),
to_input( './input.png' ),
]
in_ch = 4
width = 1
max_num = 3
width_scaller = 1.5
width_offset = 2
height_init = 8
max_width = width * width_scaller ** max_num
max_in_ch = in_ch * 2 ** max_num
max_height = height_init * (max_num+1)
for i in range(max_num):
j = i+1
tmp_in_ch = in_ch * (2**(i))
tmp_out_ch = in_ch * (2**(i+1))
tmp_width = width * (width_scaller**i) + width_offset
tmp_height = height_init*(max_num-i+1)
to = '(0,0,0)' if i <= 0 else None
print(f"done1 {i}")
arch.extend(block(1 + i*2, j, tmp_in_ch, tmp_out_ch, height=tmp_height, height_out=tmp_height-height_init, to=to, width=tmp_width, width_scaller=width_scaller))
arch.extend([
to_Conv("middle_conv1", '', 32, offset="(1,0,0)", to=f"(pool3-east)", height=height_init, depth=height_init, width=max_width+width_offset),
to_Conv("middle_conv2", '', 64, offset="(0,0,0)", to=f"(middle_conv1-east)", height=height_init, depth=height_init, width=max_width*width_scaller+width_offset)
])
for i in range(max_num):
j = i+1
tmp_in_ch = max_in_ch // (2**i)
tmp_out_ch = max_in_ch // (2**i)
tmp_width = max_width / (width_scaller**i) + width_offset
to = '(middle_conv2-east)' if i <= 0 else None
print(f"done2 {i}")
arch.extend(unblock(1 + i*2, j, tmp_in_ch, tmp_out_ch, height=height_init*(j+1), to=to, width=tmp_width, width_scaller=1))
arch.extend([
to_Conv("output", '', 1, offset="(1,0,0)", to=f"(unconv6-east)", height=max_height, depth=max_height, width=width + width_offset, caption="sigmoid"),
# to_ConvSoftMax("output", 1, "(1,0,0)", to="(unconv6-east)", caption="Sigmoid", height=max_height, depth=max_height),
to_connection("pool1", "conv3"),
to_connection("pool2", "conv5"),
to_connection("pool3", "middle_conv1"),
to_connection("middle_conv2", "unpool1"),
to_connection("unconv2", "unpool2"),
to_connection("unconv4", "unpool3"),
to_connection("unconv6", "output"),
to_skip(of='conv2', to='unconv5', pos=1.25),
to_skip(of='conv4', to='unconv3', pos=1.25),
to_skip(of='conv6', to='unconv1', pos=1.25),
to_end()
])
def main():
namefile = str(sys.argv[0]).split('.')[0]
to_generate(arch, namefile + '.tex' )
if __name__ == '__main__':
main()
| [
"nikolas.tsk@gmail.com"
] | nikolas.tsk@gmail.com |
34e39d656595b9d37b2a3f10902af713720b6660 | 7bb8650c873c9c9122332890020df25da168ec63 | /Practice Problems and Exams/Lists and File Reading.py | f74ce374c2ce788d9c4ed0b7ad22ba213a01f5bb | [] | no_license | akhlaque-ak/pfun | b64e9f4e4f09fcfe3213ac21120e72ab463f9ca5 | 19b3e06261e76968220f84967bb477ac9688a17c | refs/heads/master | 2020-07-24T20:48:01.273396 | 2019-08-08T08:56:14 | 2019-08-08T08:56:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | # ---------------
# Question 1 (15 Marks)
# ---------------
# Given two lists:
#
# subjects = ['Math', 'Science', 'Urdu', 'English', 'Computers']
# marks = [100, 78, 88, 75, 95]
#
# Write a function MarksPrinter that iterates through the lists using recursion
# and shows individual scores like this:
#
# Computers 95
# English 75
# Urdu 88
# Science 78
# Math 100
#
# It should work for lists of any length
subjects = ['Math', 'Science', 'Urdu', 'English', 'Computers']
marks = [100, 78, 88, 75, 95]
def MarksPrinter(subjects, marks, number):
if number == -1:
return
print(subjects[number], marks[number])
MarksPrinter(subjects, marks, number-1)
MarksPrinter(subjects, marks, len(subjects)-1)
# ---------------
# Question 2 (15 Marks)
# ---------------
#
# in order to get value of sin and cos, you need to import math file like this:
#
# import math
# print (math.sin(30*math.pi/180)) #for 30 degrees
#
# write a recursive function SineVales that writes all the sin values from 0 to 360 degrees
# line by line in a text file called sin.txt
import math
def SineValues(degrees, file):
if degrees == 361:
return
txt = str(math.sin(degrees*math.pi/180))
file.write(txt + '\n')
SineValues(degrees+1, file)
file = open("sin.txt", "w")
SineValues(0, file)
file.close()
# ---------------
# Question 3 (10 Marks)
# ---------------
#
# In question 2, you produce a file named sin.txt:
#
# create a function Reverse that that reads from sin.txt and writes another file reverse.txt
# but prints all the values in reverse order
# for example, if there are only 3 lines in sin.txt like this:
#
# 1
# 2
# 3
#
# then reverse.txt should contain
# 3
# 2
# 1
#
# Tip: Recursion not needed
def Reverse(file):
filer = open(file, 'r')
txt = filer.read()
data = txt.split('\n')
filer.close()
filew = open('reverse.txt', 'w')
for i in range(len(data)):
filew.write(data[(len(data)-1)-i] + '\n')
filew.close()
Reverse("sin.txt")
| [
"noreply@github.com"
] | noreply@github.com |
85d269c58a8b916c4b0d941cf97263956df9043a | 12d7d62a81981d4854ebb352184ff2f855abda68 | /Sunday/sunday_0023.py | fd0e58e6ddb3f680a217285e1ad303d5666b5bf8 | [
"MIT"
] | permissive | Morek999/OMSCS_Taiwan_Leetcode | e200ccf25915164b1c34161f7d0c6a2572962579 | 8ec18e08e9313bc3326846ca6ef6e569380a133f | refs/heads/master | 2020-12-06T18:55:15.071029 | 2020-04-12T02:45:56 | 2020-04-12T02:45:56 | 232,529,877 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | # refer to https://leetcode.com/problems/merge-k-sorted-lists/solution/
#
# Time: O(N log k) (k is the number of lists)
# Space: O(1)
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
amount = len(lists)
interval = 1
while interval < amount:
for i in range(0, amount - interval, interval * 2):
lists[i] = self.merge2Lists(lists[i], lists[i + interval])
interval *= 2
return lists[0] if amount > 0 else lists
def merge2Lists(self, l1, l2):
head = point = ListNode(0)
while l1 and l2:
if l1.val <= l2.val:
point.next = l1
l1 = l1.next
else:
point.next = l2
l2 = l1
l1 = point.next.next
point = point.next
if not l1:
point.next=l2
else:
point.next=l1
return head.next
| [
"noreply@github.com"
] | noreply@github.com |
a32bc2aa2d40e3bc7216a0a034b9f3f89efa6c59 | b131df8cce5fa514a9849f8ced8fef1b7afd057b | /mlob.py | f4ccc184ecf057f67e9d21b2967e61916f8db5ea | [
"MIT"
] | permissive | eyaffe/moving_loads | a09c0c39c6bc35854c911b558c66355d75539afd | a83944a3f44d3072aac33e255ce47794b612feb7 | refs/heads/master | 2021-01-22T23:07:24.555568 | 2017-03-20T15:54:37 | 2017-03-20T15:54:37 | 85,613,348 | 0 | 0 | null | 2017-03-20T18:42:53 | 2017-03-20T18:42:52 | null | UTF-8 | Python | false | false | 11,200 | py | import pdb
import timeit
def analyze_vehicle(axle_spacing, axle_wt, span_length1, span_length2,
num_nodes, space_to_trailing_load,
distributed_load):
V_max1 = []
V_min1 = []
M_max1 = []
V_max2 = []
V_min2 = []
M_max2 = []
span1_begin = 0.0
span1_end = span_length1
span2_begin = span_length1
span2_end = span_length1 + span_length2
node_loc_ltr = node_location(span1_begin, span1_end, span2_begin,
span2_end, num_nodes)
node_loc_rtl = list(reversed(node_loc_ltr))
add_trailing_load(axle_spacing, axle_wt, space_to_trailing_load,
distributed_load, span1_begin, span2_end)
axle_spacing.insert(0, 0.0) #insert a dummy spacing for the first axle
num_axles = len(axle_wt)
axle_num = get_axle_num(num_axles)
for node_loc,direction in zip([node_loc_ltr, node_loc_rtl], ["ltr", "rtl"]):
pdb.set_trace()
if direction == "ltr":
start_pt = span1_begin
elif direction == "rtl":
start_pt = span2_end
for x in node_loc:
Vmax1 = 0.0
Vmin1 = 0.0
Mmax1 = 0.0
Vmax2 = 0.0
Vmin2 = 0.0
Mmax2 = 0.0
Rmax_pier = 0.0
for axle_id in axle_num:
if axle_id == 1:
cur_axle_loc = get_abs_axle_location(axle_spacing, x,
direction)
else:
prev_axle_loc = cur_axle_loc
cur_axle_loc = move_axle_loc(x, axle_spacing, axle_id,
prev_axle_loc, num_axles,
direction)
Pt1, xt1, Pl1, xl1, Pr1, xr1 = calc_load_and_loc(cur_axle_loc,
axle_wt, x, span1_begin, span1_end, num_axles)
Pt2, xt2, Pl2, xl2, Pr2, xr2 = calc_load_and_loc(cur_axle_loc,
axle_wt, x, span2_begin, span2_end, num_axles)
Rpier = calc_pier_reaction(Pt1, xt1, Pt2, xt2, span1_begin,
span1_end, span2_begin, span2_end)
Rmax_pier = envelope_pier_reaction(Rmax_pier, Rpier)
if x >= span1_begin and x <= span1_end:
Rb1, Re1 = calc_reactions(Pt1, xt1, span1_begin, span1_end)
Vb1, Ve1 = calc_shear(Rb1, Re1, Pr1, Pl1)
Vmax1, Vmin1 = envelope_shear(Vmax1, Vmin1, Vb1, Ve1)
M1 = calc_moment(x, xl1, span1_begin, Rb1, Pl1)
Mmax1 = envelope_moment(Mmax1, M1)
V_max1.append(Vmax1)
V_min1.append(Vmin1)
M_max1.append(Mmax1)
if span_length2 != 0.0 and x >= span2_begin and x <= span2_end:
Rb2, Re2 = calc_reactions(Pt2, xt2, span2_begin, span2_end)
Vb2, Ve2 = calc_shear(Rb2, Re2, Pr2, Pl2)
Vmax2, Vmin2 = envelope_shear(Vmax2, Vmin2, Vb2, Ve2)
M2 = calc_moment(x, xl2, span2_begin, Rb2, Pl2)
Mmax2 = envelope_moment(Mmax2, M2)
V_max2.append(Vmax2)
V_min2.append(Vmin2)
M_max2.append(Mmax2)
return V_max1, V_min1, M_max1, V_max2, V_min2, M_max2, Rmax_pier
def output(V_max1, V_min1, M_max1, V_max2, V_min2, M_max2, Rmax_pier,
analysis_time):
"""Format and print output."""
print max(V_max1)/2
print min(V_min1)/2
print max(M_max1)/2
if V_max2 != []:
print max(V_max2)/2
print min(V_min2)/2
print max(M_max2)/2
print Rmax_pier/2
print "Runtime: " + str(analysis_time) + " sec"
def calc_reactions(Pt, xt, span_begin, span_end):
"""Calculate reactions."""
span_length = span_end - span_begin
if span_length == 0.0:
Rb = 0.0
Re = 0.0
else:
Rb = Pt*(span_end - xt)/span_length
Re = Pt*(xt - span_begin)/span_length
return Rb, Re
def calc_pier_reaction(Pt1, xt1, Pt2, xt2, span1_begin, span1_end, span2_begin,
span2_end):
"""Calculate the interior pier (floorbeam) reaction."""
span_length1 = span1_end - span1_begin
span_length2 = span2_end - span2_begin
if span_length2 == 0.0:
Rpier = 0.0
else:
Rpier = Pt1*(xt1 - span1_begin)/span_length1 + Pt2*(span2_end - xt2)/span_length2
return Rpier
def envelope_pier_reaction(Rmax_pier, Rpier):
"""Envelope the maximum interior pier (floorbeam) reaction."""
if Rpier > Rmax_pier:
Rmax_pier = Rpier
return Rmax_pier
def calc_shear(Rb, Re, Pr, Pl):
"""Calculate shear on each side of the node."""
Vb = Re - Pr
Ve = Pl - Rb
return Vb, Ve
def envelope_shear(Vmax, Vmin, Vb, Ve):
"""Envelope the maximum and minimum shear at each node."""
if Vb < 0:
if Vb < Vmin:
Vmin = Vb
if Vb >= 0:
if Vb > Vmax:
Vmax = Vb
if Ve < 0:
if Ve < Vmin:
Vmin = Ve
if Ve >= 0:
if Ve > Vmax:
Vmax = Ve
return Vmax, Vmin
def calc_moment(x, xl, span_begin, Rb, Pl):
"""Calculate moment at node."""
el = x - xl
eb = x - span_begin
M = Rb*eb- Pl*el
return M
def envelope_moment(Mmax, M):
"""Envelope maximum positive moment at each node."""
if M > Mmax:
Mmax = M
return Mmax
def get_axle_num(num_axles):
"""Numbers the axles starting with 0."""
axle_num = []
for i in range(num_axles):
axle_num.append(i+1)
return axle_num
def get_abs_axle_location(axle_spacing, start_pt, direction):
"""Calculates the absolute location of the axles, left support is the
origin."""
abs_axle_location = []
loc = start_pt #initialize
for spacing in axle_spacing:
if direction == "ltr":
loc = loc - spacing
elif direction == "rtl":
loc = loc + spacing
abs_axle_location.append(loc)
return abs_axle_location
def move_axle_loc(x, axle_spacing, axle_id, prev_axle_loc,
num_axles, direction):
"""Steps the axles across the span placing each axle at each node."""
#calc current location of all axles on span with the
#axle_id axle over the current node
cur_axle_loc = []
for i in range(num_axles):
if direction == "ltr":
axle_loc = prev_axle_loc[i] + axle_spacing[axle_id-1]
elif direction == "rtl":
axle_loc = prev_axle_loc[i] - axle_spacing[axle_id-1]
cur_axle_loc.append(axle_loc)
return cur_axle_loc
def calc_load_and_loc(cur_axle_loc, axle_wt, x, begin_span, end_span, num_axles):
"""Calculates the total load and its location on the span, and the load and
its location to the left and right of the node (critical section)."""
Pt = 0.0
xt = 0.0
sum_Ptx = 0.0
Pl = 0.0
xl = 0.0
sum_Plx = 0.0
Pr = 0.0
xr = 0.0
sum_Prx = 0.0
for i in range(num_axles):
if cur_axle_loc[i] >= begin_span and cur_axle_loc[i] <= end_span:
Pt = Pt + axle_wt[i]
sum_Ptx = sum_Ptx + cur_axle_loc[i]*axle_wt[i]
if cur_axle_loc[i] >= begin_span and cur_axle_loc[i] <= x:
Pl = Pl + axle_wt[i]
sum_Plx = sum_Plx + cur_axle_loc[i]*axle_wt[i]
if cur_axle_loc[i] >= x and cur_axle_loc[i] <= end_span:
Pr = Pr + axle_wt[i]
sum_Prx = sum_Prx + cur_axle_loc[i]*axle_wt[i]
#avoid divide by zero error
if Pt == 0:
xt = 0
else:
xt = sum_Ptx/Pt
if Pl == 0:
xl = 0
else:
xl = sum_Plx/Pl
if Pr == 0:
xr = 0
else:
xr = sum_Prx/Pr
return Pt, xt, Pl, xl, Pr, xr
def add_trailing_load(axle_spacing, axle_wt, space_to_trailing_load,
distributed_load, span1_begin, span2_end):
"""Approximates the distributed trailing load as closely spaced point
loads."""
#approximate a distributed trailing load as closely spaced point loads
#each point load is the distributed load times the point load spacing
#the point load spacing is a function of the span lenght and number of
#divisions required
total_span_length = span2_end - span1_begin
pt_load_spacing = 0.5
num_loads = int(total_span_length/pt_load_spacing)
equivalent_pt_load = distributed_load*pt_load_spacing
axle_spacing.append(space_to_trailing_load)
axle_wt.append(equivalent_pt_load)
for x in range(num_loads):
axle_spacing.append(pt_load_spacing)
axle_wt.append(equivalent_pt_load)
def node_location(span1_begin, span1_end, span2_begin, span2_end, num_nodes):
span_length1 = span1_end - span1_begin
span_length2 = span2_end - span2_begin
node_loc = []
x1 = 0.0
dx1 = span_length1/(num_nodes - 1)
for i in range(num_nodes):
if i == 0:
node_loc.append(x1)
else:
x1 = x1 + dx1
node_loc.append(x1)
if span_length2 > 0:
x2 = span_length1
dx2 = span_length2/(num_nodes - 1)
for i in range(num_nodes):
if i == 0:
pass #second span beginning is end of first span
else:
x2 = x2 + dx2
node_loc.append(x2)
return node_loc
def manager(axle_spacing, axle_wt, span_length1, span_length2, num_nodes,
space_to_trailing_load, distributed_load):
start = timeit.default_timer()
V_max1, V_min1, M_max1, V_max2, V_min2, M_max2, Rmax_pier = \
analyze_vehicle(axle_spacing, axle_wt, span_length1, span_length2, num_nodes,
space_to_trailing_load, distributed_load)
stop = timeit.default_timer()
analysis_time = stop - start
output(V_max1, V_min1, M_max1, V_max2, V_min2, M_max2, Rmax_pier,
analysis_time)
if __name__ == "__main__":
#input
axle_spacing = [8.00, 5.00, 5.00, 5.00, 9.00, 5.00, 6.00, 5.00, 8.00, 8.00, 5.00, 5.00, 5.00, 9.00, 5.00, 6.00, 5.00]
axle_wt = [40.00, 80.00, 80.00, 80.00, 80.00, 52.00, 52.00, 52.00, 52.00, 40.00, 80.00, 80.00, 80.00, 80.00, 52.00, 52.00, 52.00, 52.00]
space_to_trailing_load = 5.00
distributed_load = 8.00
#axle_spacing = []
#axle_wt = [1.0]
span_length1 = 20.0
span_length2 = 0.0
"""
num_nodes should always be odd to place a node at midspan and at
each support
a minimum of 3 nodes should be used for analysis
"""
num_nodes = 5
manager(axle_spacing, axle_wt, span_length1, span_length2, num_nodes,
space_to_trailing_load, distributed_load)
| [
"mwhit74@gmail.com"
] | mwhit74@gmail.com |
7ce6c7f62f83a916f4de22bfa3028340b1b55c50 | ccf1a743a0e8bff0cce9c4424118f675ae9fdac8 | /Mundo 2 - Estruturas de controle/ex040-aquele classico da média.py | e90254678ba6274fc783a2918201df6c18a469b5 | [] | no_license | herbetyp/Exercicios_Python | 18138ed83473f0238381fbb2b5412b672cffe62b | d976a279b58124e3666d1b1cd57aa537c4b1b2bf | refs/heads/master | 2022-04-04T19:37:46.989484 | 2020-02-14T03:46:09 | 2020-02-14T03:46:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | nome = str(input('Digite o nome do aluno: ')).strip().lower()
nota1 = float(input('Digite a primera nota do aluno: '))
nota2 = float(input('Digite a segunda nota do aluno: '))
media = (nota1 + nota2) / 2
if media <= 4.9:
print('A média do aluno {} é de {}, e está abaixo de 5.0 aluno REPROVADO!'.format(nome, media))
elif media >= 7.0:
print('A média do aluno {} é de {}, o aluno está APROVADO! '.format(nome, media))
else:
print('A média do aluno {} é de {}, portanto o aluno está em RECUPERAÇÃO! '.format(nome, media))
| [
"herbetyp@gmail.com"
] | herbetyp@gmail.com |
074bb08ea06e02502d88908fb0e99f03eb1c5c83 | 3502f235e50f389fd95ecc6a0ec40866b238aa43 | /search_engine/migrations/0005_auto_20160523_0814.py | 1bcf65a17443e1adfba3904d74dc229268f4fd4e | [] | no_license | 2ncs/myapp | 4136733435e740d2771b6e64ad5de914aaa76967 | ff59ccd8e92095cd600eef0f77aa7454e0e8f327 | refs/heads/master | 2020-09-25T23:31:26.492762 | 2016-08-24T18:19:26 | 2016-08-24T18:19:26 | 66,489,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-23 05:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search_engine', '0004_afrikanikh_asiatikh_brunch_burger_burger_amerikanikh_ellhnikh_paradosiakh_falafes_fish_fusion_gallik'),
]
operations = [
migrations.AlterField(
model_name='rests',
name='rate',
field=models.FloatField(blank=True, null=True),
),
]
| [
"greekou@gmail.com"
] | greekou@gmail.com |
c31a6958795f46fcd902a7ae3611a35a54b6f85c | c93a9acddefa367d73ca9d8c3100d7ee4b9c1dfe | /Agri/Agri/urls.py | aa38120ada8be34eac0d8302b25fb2a97f27b140 | [] | no_license | carlos-nyaga/Redefined-Farming | 0a4c532b843f803750ec954f288acab7446b8f4d | 4bc5eccfdbb16dbbf13bafb829969c364d6d3563 | refs/heads/master | 2020-05-17T06:49:35.632373 | 2019-04-26T08:20:39 | 2019-04-26T08:20:39 | 183,566,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | """Agri URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.conf.urls import include
from rest_framework.documentation import include_docs_urls
from rest_framework.schemas import get_schema_view
from api import urls as api_urls
urlpatterns = [
url(r'^', include(api_urls, namespace="api")),
url(r'^admin/', admin.site.urls),
]
| [
"crlsnyaga@gmail.com"
] | crlsnyaga@gmail.com |
f324d68fe448c50900e3408e7eb40fe7646e5e82 | bed4131fbe6700af18f7e1a169854c5d4660e36f | /src/ppmf_reid/models.py | 5dd5c24a526912a2f6c8274b64d126f62f39ba70 | [
"MIT",
"BSD-3-Clause"
] | permissive | aflaxman/ppmf_12.2_reid | c0a29ce1b342becc6fddcfe92985ad75e2c15087 | ea4b29c1e87e2390f82abc8efdc159b8764cb6ba | refs/heads/main | 2023-06-19T19:46:58.436089 | 2021-07-19T20:32:21 | 2021-07-19T20:32:21 | 368,644,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,346 | py | import numpy as np
import pandas as pd
race_eth_cols = ['hispanic', 'racwht', 'racblk', 'racaian', 'racasn', 'racnhpi', 'racsor', 'racmulti']
def match_summary(df):
""" summarize the race and ethnicity information in the dataframe
Parameters
----------
df : pd.DataFrame including columns from race_eth_cols
"""
s = pd.Series(dtype=float)
s['n_match'] = df.pweight.sum()
for col in race_eth_cols:
if col in df.columns:
s[col] = np.sum(df.pweight * df[col]) / df.pweight.sum()
return s
def link_records(df_sim_commercial, df_ppmf):
"""merge summary of ppmf race/ethnicity data into simulated
commercial data
Parameters
----------
df_sim_commercial : pd.DataFrame, including geographic columns, and voting_age column
df_ppmf : pd.DataFrame, including geographic columns, voting_age column, and race/ethnicity columns
"""
# for faster merging and summarizing, caculate and summarize the info for each strata in the ppmf
df_ppmf_strata = df_ppmf.groupby(['state', 'county', 'tract', 'block', 'voting_age']
).apply(match_summary)
# merge in the pre-computed summary for each linked record
df_linked = pd.merge(df_sim_commercial, df_ppmf_strata,
left_on=['state', 'county', 'tract', 'block', 'voting_age'],
right_index=True, how='left')
# fill in the blanks with n_match value 0
df_linked['n_match'] = df_linked.n_match.fillna(0).astype(int)
return df_linked
def simple_impute_records(df_sim_commercial):
"""merge non-hispanic, white, of voting-age into simulated
commercial data
Parameters
----------
df_sim_commercial : pd.DataFrame, including geographic columns, and voting_age column
"""
df_linked = df_sim_commercial.copy()
for col in race_eth_cols:
df_linked[col] = 0
df_linked['racwht'] = 1
# fill in the blanks with n_match value 0
df_linked['n_match'] = 0
return df_linked
def load_and_link(state, state_fips, county_fips, n_chunks=1, chunk_i=0):
""" load and simulate data and link it
Parameters
----------
state : str, e.g. 'WA'
state_fips : int, e.g. 53 (note: redundant, but convenient)
county_fips : int, e.g. 53
n_chunks : int, to break up large counties
chunk_i : int, < n_chunks
"""
import ppmf_reid.data
df_synth = ppmf_reid.data.read_synth_data(state, county_fips)
df_synth = df_synth[df_synth.tract % n_chunks == chunk_i]
df_sim_commercial = ppmf_reid.data.simulate_commercial_data(df_synth)
df_test = ppmf_reid.data.generate_test_data(df_synth, df_sim_commercial)
assert np.all(df_sim_commercial.index == df_test.index)
df_ppmf_12 = ppmf_reid.data.read_ppmf_data(state_fips, county_fips)
df_ppmf_12 = df_ppmf_12[df_ppmf_12.tract % n_chunks == chunk_i]
#df_ppmf_4 = ppmf_reid.data.read_ppmf_data_4(state_fips, county_fips)
df_ppmf_inf = ppmf_reid.data.simulate_ppmf_epsilon_infinity(df_synth)
df_sim_ppmf = {}
for eps in [.01, 1, 10]:
df_sim_ppmf[f'sim_{eps:.02f}'] = ppmf_reid.data.simulate_ppmf_epsilon(df_synth, eps)
df_ppmf = df_sim_ppmf.copy()
df_ppmf.update({'12.2':df_ppmf_12,
'inf':df_ppmf_inf})
df_linked = {}
if len(df_sim_commercial) > 0:
df_linked['baseline'] = simple_impute_records(df_sim_commercial)
for key in df_ppmf.keys():
df_linked[key] = link_records(df_sim_commercial, df_ppmf[key])
return locals()
def summarize_results(results):
summary = pd.Series(dtype='object')
summary['state'] = results['state']
summary['state_fips'] = results['state_fips']
summary['county_fips'] = results['county_fips']
df_test = results['df_test']
for eps, df_linked in results['df_linked'].items():
summary[f'n_unique_match_eps_{eps}'] = (df_linked.n_match == 1).sum()
df_unique_impute = df_linked[np.all(df_linked[race_eth_cols]%1 == 0, axis=1)]
summary[f'n_unique_impute_all_eps_{eps}'] = len(df_unique_impute)
summary[f'n_correct_impute_all_eps_{eps}'] = np.all(df_unique_impute[race_eth_cols] == df_test.loc[df_unique_impute.index, race_eth_cols], axis=1).sum()
for col in ['hispanic', 'racwht', 'racblk', 'racaian', 'racasn', 'racnhpi', 'racsor', 'racmulti']:
summary[f'n_unique_impute_attribute_{col}_eps_{eps}'] = (df_linked[col] == 1).sum()
df_unique_impute = df_linked[(df_linked[col] == 1)]
s_correct_impute = (df_unique_impute[col] == df_test.loc[df_unique_impute.index, col])
summary[f'n_correct_impute_attribute_{col}_eps_{eps}'] = s_correct_impute.sum()
df_unique_match = df_linked[(df_linked.n_match == 1)]
s_correct_match = (df_unique_match[col] == df_test.loc[df_unique_match.index, col])
summary[f'n_unique_match_correct_impute_attribute_{col}_eps_{eps}'] = s_correct_match.sum()
# tally counts for non-majority re-id
summary[f'n_nonmajority'] = 0
summary[f'n_unique_impute_nonmajority_eps_{eps}'] = 0
summary[f'n_correct_impute_nonmajority_eps_{eps}'] = 0
for i, df_sim_commercial_i in results['df_sim_commercial'].groupby(['state', 'county', 'tract']):
df_test_i = df_test.loc[df_sim_commercial_i.index]
s_rac_eth_cnts = df_test_i.sum()
majority_race_eth = s_rac_eth_cnts.sort_values(ascending=False).index[0]
non_majority_rows = df_test_i[majority_race_eth] == 0
summary[f'n_nonmajority'] += np.sum(non_majority_rows)
df_unique_impute = df_linked[np.all(df_linked[race_eth_cols]%1 == 0, axis=1)&non_majority_rows]
summary[f'n_unique_impute_nonmajority_eps_{eps}'] += len(df_unique_impute)
summary[f'n_correct_impute_nonmajority_eps_{eps}'
] += np.all(df_unique_impute[race_eth_cols] == df_test_i.loc[df_unique_impute.index, race_eth_cols], axis=1).sum()
# add total and race/eth alone or in combination counts for convenience
for col in ['hispanic', 'racwht', 'racblk', 'racaian', 'racasn', 'racnhpi', 'racsor', 'racmulti']:
summary[f'n_{col}'] = df_test[col].sum()
summary['n_total'] = len(df_test)
return summary
| [
"abie@uw.edu"
] | abie@uw.edu |
9166a5025b83503317fc99cf5620f56acadc063c | 35fb652b0b20e7352cacdc078e23464fad40ccf3 | /web/controllers/food/food.py | ed79027f6fa2230bee2cb9150725d18254385a43 | [] | no_license | xiaoheng14/flask_wx_order | 52f8fe01a473855c22a43c2651b102c291dbde04 | be3314fdb0266eecf4ca7f5a55b2ea24078857c9 | refs/heads/master | 2020-08-23T03:59:19.006943 | 2018-11-19T12:21:25 | 2018-11-19T12:21:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # _*_ coding: utf-8 _*_
"""
__author__ = 'lawtech'
__date__ = '2018/10/27 3:14 PM'
"""
from flask import Blueprint
from common.libs.helper import ops_render
route_food = Blueprint('food_page', __name__)
@route_food.route("/index")
def index():
return ops_render("food/index.html")
@route_food.route("/info")
def info():
return ops_render("food/info.html")
@route_food.route("/set")
def set():
return ops_render("food/set.html")
@route_food.route("/cat")
def cat():
return ops_render("food/cat.html")
@route_food.route("/cat-set")
def catSet():
return ops_render("food/cat_set.html")
| [
"584563542@qq.com"
] | 584563542@qq.com |
f6f84dcc3656ac3c623fa8ecd4bfcedf2259c2ef | a22cc323b29f50da397d8363ac2521e3542a0fd7 | /tests/dpaycli/test_steem.py | d04f4ac3d9988e261ae2bc7c834ff2c3642c4d91 | [
"MIT"
] | permissive | dpays/dpay-cli | 1a58c7dae45218e3b05b7e17ff5ce03e918d27b9 | dfa80898e1faea2cee92ebec6fe04873381bd40f | refs/heads/master | 2020-04-01T09:26:43.200933 | 2018-10-15T08:03:06 | 2018-10-15T08:03:06 | 153,075,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,444 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
from builtins import super
import mock
import string
import unittest
from parameterized import parameterized
import random
import json
from pprint import pprint
from dpaycli import DPay, exceptions
from dpaycli.amount import Amount
from dpaycli.memo import Memo
from dpaycli.version import version as dpaycli_version
from dpaycli.wallet import Wallet
from dpaycli.witness import Witness
from dpaycli.account import Account
from dpaycligraphenebase.account import PrivateKey
from dpaycli.instance import set_shared_dpay_instance
from dpaycli.nodelist import NodeList
# Py3 compatibility
import sys
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.nodelist = NodeList()
cls.nodelist.update_nodes(dpay_instance=DPay(node=cls.nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
cls.bts = DPay(
node=cls.nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
cls.testnet = DPay(
node="https://testnet.dpaydev.com",
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
cls.account = Account("test", full=True, dpay_instance=cls.bts)
cls.account_testnet = Account("test", full=True, dpay_instance=cls.testnet)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_transfer(self, node_param):
if node_param == "normal":
bts = self.bts
acc = self.account
elif node_param == "testnet":
bts = self.testnet
acc = self.account_testnet
acc.dpay.txbuffer.clear()
tx = acc.transfer(
"test", 1.33, "BBD", memo="Foobar", account="test1")
self.assertEqual(
tx["operations"][0][0],
"transfer"
)
self.assertEqual(len(tx["operations"]), 1)
op = tx["operations"][0][1]
self.assertIn("memo", op)
self.assertEqual(op["memo"], "Foobar")
self.assertEqual(op["from"], "test1")
self.assertEqual(op["to"], "test")
amount = Amount(op["amount"], dpay_instance=bts)
self.assertEqual(float(amount), 1.33)
def test_create_account(self):
bts = DPay(node=self.nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
core_unit = "DWB"
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
key1 = PrivateKey()
key2 = PrivateKey()
key3 = PrivateKey()
key4 = PrivateKey()
key5 = PrivateKey()
bts.txbuffer.clear()
tx = bts.create_account(
name,
creator="test", # 1.2.7
owner_key=format(key1.pubkey, core_unit),
active_key=format(key2.pubkey, core_unit),
posting_key=format(key3.pubkey, core_unit),
memo_key=format(key4.pubkey, core_unit),
additional_owner_keys=[format(key5.pubkey, core_unit)],
additional_active_keys=[format(key5.pubkey, core_unit)],
additional_posting_keys=[format(key5.pubkey, core_unit)],
additional_owner_accounts=["test1"], # 1.2.0
additional_active_accounts=["test1"],
storekeys=False,
)
self.assertEqual(
tx["operations"][0][0],
"account_create"
)
op = tx["operations"][0][1]
role = "active"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
role = "posting"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
role = "owner"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
self.assertEqual(
op["creator"],
"test")
def test_create_account_password(self):
bts = DPay(node=self.nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
core_unit = "DWB"
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
key5 = PrivateKey()
bts.txbuffer.clear()
tx = bts.create_account(
name,
creator="test", # 1.2.7
password="abcdefg",
additional_owner_keys=[format(key5.pubkey, core_unit)],
additional_active_keys=[format(key5.pubkey, core_unit)],
additional_posting_keys=[format(key5.pubkey, core_unit)],
additional_owner_accounts=["test1"], # 1.2.0
additional_active_accounts=["test1"],
storekeys=False,
)
self.assertEqual(
tx["operations"][0][0],
"account_create"
)
op = tx["operations"][0][1]
role = "active"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
role = "owner"
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
format(key5.pubkey, core_unit),
[x[0] for x in op[role]["key_auths"]])
self.assertIn(
"test1",
[x[0] for x in op[role]["account_auths"]])
self.assertEqual(
op["creator"],
"test")
@parameterized.expand([
("normal"),
("testnet"),
])
def test_connect(self, node_param):
if node_param == "normal":
bts = self.bts
elif node_param == "testnet":
bts = self.testnet
bts.connect()
@parameterized.expand([
("normal"),
("testnet"),
])
def test_info(self, node_param):
if node_param == "normal":
bts = self.bts
elif node_param == "testnet":
bts = self.testnet
info = bts.info()
for key in ['current_witness',
'head_block_id',
'head_block_number',
'id',
'last_irreversible_block_num',
'current_witness',
'total_pow',
'time']:
self.assertTrue(key in info)
def test_finalizeOps(self):
bts = self.bts
acc = self.account
tx1 = bts.new_tx()
tx2 = bts.new_tx()
acc.transfer("test1", 1, "BEX", append_to=tx1)
acc.transfer("test1", 2, "BEX", append_to=tx2)
acc.transfer("test1", 3, "BEX", append_to=tx1)
tx1 = tx1.json()
tx2 = tx2.json()
ops1 = tx1["operations"]
ops2 = tx2["operations"]
self.assertEqual(len(ops1), 2)
self.assertEqual(len(ops2), 1)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_weight_threshold(self, node_param):
if node_param == "normal":
bts = self.bts
pkey1 = 'DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n'
pkey2 = 'DWB7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv'
elif node_param == "testnet":
bts = self.testnet
pkey1 = 'TST55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n'
pkey2 = 'TST7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv'
auth = {'account_auths': [['test', 1]],
'extensions': [],
'key_auths': [
[pkey1, 1],
[pkey2, 1]],
'weight_threshold': 3} # threshold fine
bts._test_weights_treshold(auth)
auth = {'account_auths': [['test', 1]],
'extensions': [],
'key_auths': [
[pkey1, 1],
[pkey2, 1]],
'weight_threshold': 4} # too high
with self.assertRaises(ValueError):
bts._test_weights_treshold(auth)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_allow(self, node_param):
if node_param == "normal":
bts = self.bts
acc = self.account
prefix = "DWB"
wif = "DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n"
elif node_param == "testnet":
bts = self.testnet
acc = self.account_testnet
prefix = "TST"
wif = "TST55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n"
self.assertIn(bts.prefix, prefix)
tx = acc.allow(
wif,
account="test",
weight=1,
threshold=1,
permission="owner",
)
self.assertEqual(
(tx["operations"][0][0]),
"account_update"
)
op = tx["operations"][0][1]
self.assertIn("owner", op)
self.assertIn(
[wif, '1'],
op["owner"]["key_auths"])
self.assertEqual(op["owner"]["weight_threshold"], 1)
def test_disallow(self):
acc = self.account
pkey1 = "DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n"
pkey2 = "DWB6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"
if sys.version > '3':
_assertRaisesRegex = self.assertRaisesRegex
else:
_assertRaisesRegex = self.assertRaisesRegexp
with _assertRaisesRegex(ValueError, ".*Changes nothing.*"):
acc.disallow(
pkey1,
weight=1,
threshold=1,
permission="owner"
)
with _assertRaisesRegex(ValueError, ".*Changes nothing!.*"):
acc.disallow(
pkey2,
weight=1,
threshold=1,
permission="owner"
)
def test_update_memo_key(self):
acc = self.account
prefix = "DWB"
pkey = 'DWB55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n'
self.assertEqual(acc.dpay.prefix, prefix)
acc.dpay.txbuffer.clear()
tx = acc.update_memo_key(pkey)
self.assertEqual(
(tx["operations"][0][0]),
"account_update"
)
op = tx["operations"][0][1]
self.assertEqual(
op["memo_key"],
pkey)
@parameterized.expand([
("normal"),
("testnet"),
])
def test_approvewitness(self, node_param):
if node_param == "normal":
w = self.account
elif node_param == "testnet":
w = self.account_testnet
w.dpay.txbuffer.clear()
tx = w.approvewitness("test1")
self.assertEqual(
(tx["operations"][0][0]),
"account_witness_vote"
)
op = tx["operations"][0][1]
self.assertIn(
"test1",
op["witness"])
def test_post(self):
bts = self.bts
bts.txbuffer.clear()
tx = bts.post("title", "body", author="test", permlink=None, reply_identifier=None,
json_metadata=None, comment_options=None, community="test", tags=["a", "b", "c", "d", "e"],
beneficiaries=[{'account': 'test1', 'weight': 5000}, {'account': 'test2', 'weight': 5000}], self_vote=True)
self.assertEqual(
(tx["operations"][0][0]),
"comment"
)
op = tx["operations"][0][1]
self.assertEqual(op["body"], "body")
self.assertEqual(op["title"], "title")
self.assertEqual(op["permlink"], "title")
self.assertEqual(op["parent_author"], "")
self.assertEqual(op["parent_permlink"], "a")
json_metadata = json.loads(op["json_metadata"])
self.assertEqual(json_metadata["tags"], ["a", "b", "c", "d", "e"])
self.assertEqual(json_metadata["app"], "dpaycli/%s" % (dpaycli_version))
self.assertEqual(
(tx["operations"][1][0]),
"comment_options"
)
op = tx["operations"][1][1]
self.assertEqual(len(op['extensions'][0][1]['beneficiaries']), 2)
def test_comment_option(self):
bts = self.bts
bts.txbuffer.clear()
tx = bts.comment_options({}, "@gtg/witness-gtg-log", account="test")
self.assertEqual(
(tx["operations"][0][0]),
"comment_options"
)
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["author"])
self.assertEqual('1000000.000 BBD', op["max_accepted_payout"])
self.assertEqual(10000, op["percent_dpay_dollars"])
self.assertEqual(True, op["allow_votes"])
self.assertEqual(True, op["allow_curation_rewards"])
self.assertEqual("witness-gtg-log", op["permlink"])
def test_online(self):
bts = self.bts
self.assertFalse(bts.get_blockchain_version() == '0.0.0')
def test_offline(self):
bts = DPay(node=self.nodelist.get_nodes(),
offline=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif})
bts.refresh_data()
self.assertTrue(bts.get_reserve_ratio(use_stored_data=False) is None)
self.assertTrue(bts.get_reserve_ratio(use_stored_data=True) is None)
self.assertTrue(bts.get_feed_history(use_stored_data=False) is None)
self.assertTrue(bts.get_feed_history(use_stored_data=True) is None)
self.assertTrue(bts.get_reward_funds(use_stored_data=False) is None)
self.assertTrue(bts.get_reward_funds(use_stored_data=True) is None)
self.assertTrue(bts.get_current_median_history(use_stored_data=False) is None)
self.assertTrue(bts.get_current_median_history(use_stored_data=True) is None)
self.assertTrue(bts.get_hardfork_properties(use_stored_data=False) is None)
self.assertTrue(bts.get_hardfork_properties(use_stored_data=True) is None)
self.assertTrue(bts.get_network(use_stored_data=False) is None)
self.assertTrue(bts.get_network(use_stored_data=True) is None)
self.assertTrue(bts.get_witness_schedule(use_stored_data=False) is None)
self.assertTrue(bts.get_witness_schedule(use_stored_data=True) is None)
self.assertTrue(bts.get_config(use_stored_data=False) is None)
self.assertTrue(bts.get_config(use_stored_data=True) is None)
self.assertEqual(bts.get_block_interval(), 3)
self.assertEqual(bts.get_blockchain_version(), '0.0.0')
@parameterized.expand([
("normal"),
("testnet"),
])
def test_properties(self, node_param):
if node_param == "normal":
bts = DPay(node=self.nodelist.get_nodes(),
nobroadcast=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
elif node_param == "testnet":
bts = DPay(node="https://testnet.dpaydev.com",
nobroadcast=True,
data_refresh_time_seconds=900,
keys={"active": wif, "owner": wif, "memo": wif},
num_retries=10)
self.assertTrue(bts.get_reserve_ratio(use_stored_data=False) is not None)
self.assertTrue(bts.get_feed_history(use_stored_data=False) is not None)
self.assertTrue(bts.get_reward_funds(use_stored_data=False) is not None)
self.assertTrue(bts.get_current_median_history(use_stored_data=False) is not None)
self.assertTrue(bts.get_hardfork_properties(use_stored_data=False) is not None)
self.assertTrue(bts.get_network(use_stored_data=False) is not None)
self.assertTrue(bts.get_witness_schedule(use_stored_data=False) is not None)
self.assertTrue(bts.get_config(use_stored_data=False) is not None)
self.assertTrue(bts.get_block_interval() is not None)
self.assertTrue(bts.get_blockchain_version() is not None)
def test_bp_to_rshares(self):
stm = self.bts
rshares = stm.bp_to_rshares(stm.vests_to_sp(1e6))
self.assertTrue(abs(rshares - 20000000000.0) < 2)
def test_rshares_to_vests(self):
stm = self.bts
rshares = stm.bp_to_rshares(stm.vests_to_sp(1e6))
rshares2 = stm.vests_to_rshares(1e6)
self.assertTrue(abs(rshares - rshares2) < 2)
def test_bp_to_bbd(self):
stm = self.bts
bp = 500
ret = stm.bp_to_bbd(bp)
self.assertTrue(ret is not None)
def test_bbd_to_rshares(self):
stm = self.bts
test_values = [1, 10, 100, 1e3, 1e4, 1e5, 1e6, 1e7]
for v in test_values:
try:
bbd = round(stm.rshares_to_bbd(stm.bbd_to_rshares(v)), 5)
except ValueError: # Reward pool smaller than 1e7 BBD (e.g. caused by a very low BEX price)
continue
self.assertEqual(bbd, v)
def test_rshares_to_vote_pct(self):
stm = self.bts
bp = 1000
voting_power = 9000
for vote_pct in range(500, 10000, 500):
rshares = stm.bp_to_rshares(bp, voting_power=voting_power, vote_pct=vote_pct)
vote_pct_ret = stm.rshares_to_vote_pct(rshares, dpay_power=bp, voting_power=voting_power)
self.assertEqual(vote_pct_ret, vote_pct)
def test_sign(self):
bts = self.bts
with self.assertRaises(
exceptions.MissingKeyError
):
bts.sign()
def test_broadcast(self):
bts = self.bts
bts.txbuffer.clear()
tx = bts.comment_options({}, "@gtg/witness-gtg-log", account="test")
# tx = bts.sign()
with self.assertRaises(
exceptions.MissingKeyError
):
bts.broadcast(tx=tx)
| [
"jaredricelegal@gmail.com"
] | jaredricelegal@gmail.com |
a80b08369aebe3a6b598b0881a03eee69adcdeb1 | f9478c3969996208783a26c3744092a43a53206e | /catkin_ws/build/test_mavros/catkin_generated/pkg.installspace.context.pc.py | eb0ddca05c17fcb2a0f7a2af11198d6cc7030959 | [] | no_license | cangozpinar/Drone_Project | 4485ac8b47f16d8f3d7d37ab39075e06dbba4498 | bdd61ac0aea69b1a6a67933a8e3352aee32a5ec0 | refs/heads/main | 2023-08-19T21:02:29.840882 | 2021-10-26T22:15:02 | 2021-10-26T22:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/usr/include".split(';') if "/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "control_toolbox;eigen_conversions;geometry_msgs;mavros;mavros_extras;roscpp;std_msgs;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "/usr/lib/x86_64-linux-gnu/libboost_system.so".split(';') if "/usr/lib/x86_64-linux-gnu/libboost_system.so" != "" else []
PROJECT_NAME = "test_mavros"
PROJECT_SPACE_DIR = "/home/can/catkin_ws/install"
PROJECT_VERSION = "1.9.0"
| [
"can@can-ubuntu-18.04"
] | can@can-ubuntu-18.04 |
3a163307f6e168f0d4592d769b3438132dc8a406 | 8b23e24dd0179f6b4d8920797f003d74169c6520 | /file1.py | fe93ccee8cce51cc3e82115ea67485541f21e13a | [] | no_license | alxthmsCO/lab4 | 62a2d82fd534e71f3be8fc2b9e11bfd36609c211 | 73ede6b0f4186a22e772bc903461f016509b821f | refs/heads/main | 2023-08-10T01:26:13.072683 | 2021-09-17T18:58:22 | 2021-09-17T18:58:22 | 407,381,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | import os
import sys
file = open(sys.argv[1], 'r')
Lines = file.readlines() | [
"90875291+alxthmsCO@users.noreply.github.com"
] | 90875291+alxthmsCO@users.noreply.github.com |
52fa7f6ab35d271fd30dbc1f96ddcee4a2df32b5 | e74c2e5b85b9af58a6f9b4b6eea160fb66f6bb08 | /aula11.py | a14bb8b989d099d4f7350a32cb0c4b75eb76c49b | [] | no_license | Nokutomi/AulaPython | 670cc27986aa3a12e528f5d1602929a524b632fc | 1e97e4821b12a0ad0a4438d682c1e4d61a10f61d | refs/heads/master | 2022-11-15T08:38:47.401055 | 2020-07-08T02:49:54 | 2020-07-08T02:49:54 | 275,640,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py |
lista = [1,10]
arquivo = open('teste.txt', 'r')
try:
texto = arquivo.read()
divisao = 10 / 0
# numero = lista[3]
# x = a
# print('Fechando arquivo')
# arquivo.close()
except ZeroDivisionError:
print('Nao e possivel realizar uma divisao por zero')
except ArithmeticError:
print('Houve um erro ao realizar uma operacao aritmetica')
except IndexError:
print('Erro ao acessar um indice invalido da lista')
except Exception as ex:
print('Erro desconhecido. Erro: {}'.format(ex))
else:
print('Executa quando nao ocorre excecao')
finally:
print('Sempre executa')
print('Fechando arquivo')
arquivo.close()
| [
"you@example.com"
] | you@example.com |
0cab5c5f3ee22758a17fae55680be1ded65e3ed6 | 00596c5f4e9547677046aea34dc4b55c4bb0b340 | /ref_model.py | e52696f61ccae311f55f836d1c04734b4b70d76d | [] | no_license | Xnsam/chabot_sequence_2_sequence | 6f6e41753e2341d8ed883064fec4675d3ede63cb | ab1d8f9d8aae03628458f4dec2540cb2c36908ba | refs/heads/master | 2020-05-17T00:26:55.983637 | 2019-04-25T09:18:09 | 2019-04-25T09:18:09 | 183,396,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,711 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn.translate import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights | [
"abcd@localhost.localdomain"
] | abcd@localhost.localdomain |
a02a2341ab021509e596e6ab801c9b00af24f937 | 988385035443e5d46d29d96b15179509fd1c782e | /addToArrayForm.py | ea09a01733d9a2d3d3b61c25a1837f7b7368545e | [] | no_license | mwoitek/leetcode-python3 | c120ee1b1eb8e17f3a301026f25c643be9852953 | eb9989d3768eba82275a57243c99796e74ccdd48 | refs/heads/master | 2022-12-28T21:19:51.215210 | 2020-10-18T06:17:27 | 2020-10-18T06:17:27 | 301,295,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
A_str = "".join([str(num) for num in A])
A_int = int(A_str)
ans = A_int + K
ans_list = [int(char) for char in str(ans)]
return ans_list
| [
"woitek@usp.br"
] | woitek@usp.br |
c14cca36fd70f17c4adf7cf1050a549b485a5112 | dd44e145ac547209f5f209bc9b1f09189bb8b5c7 | /Python-OOP-July2021/04.Classes_and_objects-E/05.To-do-list/project/section.py | 391f64c88e1e7f0db3acc9df9b8d20c2de06a156 | [] | no_license | todorovventsi/Software-Engineering | e3c1be8f0f72c85619518bb914d2a4dbaac270f8 | 64ffa6c80b190e7c6f340aaf219986f769f175ab | refs/heads/master | 2023-07-09T05:35:14.522958 | 2021-08-15T14:35:55 | 2021-08-15T14:35:55 | 336,056,643 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | class Section:
def __init__(self, name):
self.name = name
self.tasks = []
def add_task(self, new_task):
if new_task not in self.tasks:
self.tasks.append(new_task)
return f"Task {new_task.details()} is added to the section"
return f"Task is already in the section {self.name}"
def complete_task(self, task_name):
for task in self.tasks:
if task.name == task_name:
task.completed = True
return f"Completed task {task.name}"
return f"Could not find task with the name {task_name}"
def clean_section(self):
completed_tasks = 0
for task in self.tasks:
if task.completed:
completed_tasks += 1
self.tasks.remove(task)
return f"Cleared {completed_tasks} tasks."
def view_section(self):
first_row = f"Section {self.name}:\n"
next_rows = [f"{task.details()}\n" for task in self.tasks]
return f"{first_row}{''.join(next_rows)}"
| [
"todorov.ventsi@gmail.com"
] | todorov.ventsi@gmail.com |
592c999b313d564667eecf2bc25a16f9e9a44646 | 01c0426fdcd495fdb73fc708326de83a25ae9056 | /blog/views.py | c6628ff5d71f3c6583b3673a72ee43b3f6f82683 | [] | no_license | zlenyk/django_blog | 68a3db160d8ade5bffbd6e7f04b7e366b75ee3c8 | 1405b8d7857e71736b271e47fa323c1df8ad3f4f | refs/heads/master | 2021-01-21T12:53:34.325590 | 2016-04-25T11:59:33 | 2016-04-25T11:59:33 | 55,899,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | from django.shortcuts import render
from blog.models import Post
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
import users
def index(request):
all_posts = Post.objects.all().filter(classified=False).order_by('-publish_date')
context = {'post_list': all_posts}
return render(request,'blog/index.html',context)
@login_required
def classified(request):
all_posts = Post.objects.all().filter(classified=True).order_by('-publish_date')
context = {'post_list': all_posts}
return render(request,'blog/index.html',context)
def post(request,slug,id):
post = get_object_or_404(Post,pk=id)
all_posts = []
if post.classified == True:
if request.user.is_authenticated():
print(request.user)
all_posts = Post.objects.all().filter(classified=True).order_by('-publish_date')
else:
return users.views.login(request)
else:
all_posts = Post.objects.all().filter(classified=False).order_by('-publish_date')
context = {'post': post,'post_list': all_posts}
return render(request,'blog/post.html',context)
| [
"zygmuntlenyk@gmail.com"
] | zygmuntlenyk@gmail.com |
81237ecb5e03f5de33f8093fc6668ab42f73405b | bbc8dc59cd70af6f6a3116b1d58685e5fd553024 | /main_debug.py | ee94c2c0265867b79cd5a9d13ca0afed33b9cdec | [
"MIT"
] | permissive | m3h0w/transparent_blobs_detection | c58a9d168f677cc883c1a051c846ad7cf544557f | 5fe3ca6d9cba1fab3588e29751d423086dfc1ac0 | refs/heads/master | 2021-01-24T18:25:43.289178 | 2017-05-22T20:57:29 | 2017-05-22T20:57:29 | 84,433,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,870 | py | import cv2
import numpy as np
import trackbar as tb
import auxcv as aux
from matplotlib.pyplot import imshow, scatter, show, savefig
def shrink_rect(rect, scale = 0.8):
center, (width, height), angle = rect
width = width * scale
height = height * scale
rect = center, (width, height), angle
return rect
def clahe(img, clip_limit = 2.0):
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(5,5))
return clahe.apply(img)
def get_sobel(img, size = -1):
sobelx64f = cv2.Sobel(img,cv2.CV_64F,2,0,size)
abs_sobel64f = np.absolute(sobelx64f)
return np.uint8(abs_sobel64f)
img = cv2.imread("blobs4.jpg")
imgc = img.copy()
resize_times = 5
img = cv2.resize(img, None, img, fx = 1 / resize_times, fy = 1 / resize_times)
cv2.imshow("blobs", img)
cv2.waitKey(0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobel = get_sobel(img)
print("sobel_sum: ", np.sum(sobel)/(img.shape[0] * img.shape[1]))
clip_limit = (-2.556) * np.sum(sobel)/(img.shape[0] * img.shape[1]) + 26.557
print("clip_limit: ", clip_limit)
cv2.waitKey(0)
if(clip_limit < 1.0):
clip_limit = 0.1
if(clip_limit > 10.0):
clip_limit = 10
img = clahe(img, clip_limit)
img = aux.unsharp_mask(img)
#canny = tb.CannyTrackbar(img, "Canny")
img_blurred = (cv2.GaussianBlur(img.copy(), (2*2+1,2*2+1), 0))
canny = cv2.Canny(img_blurred, 35, 95)
#img_blurred = cv2.GaussianBlur(img.copy(), (4*2+1,4*2+1), 0)
#canny = cv2.Canny(img_blurred, 11, 33)
# CONTOURS
_, cnts, _ = cv2.findContours(canny.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
canvas = np.ones(img.shape, np.uint8)
#cv2.drawContours(canvas, cnts, -1, (255, 255, 255), 1)
for c in cnts:
l = cv2.arcLength(c, False)
x,y,w,h = cv2.boundingRect(c)
aspect_ratio = float(w)/h
#print(l)
#print(aspect_ratio)
# if l > 500:
# cv2.drawContours(canvas, [c], -1, (0, 0, 255), 2)
# print("here: " + str(l))
# if l < 20:
# cv2.drawContours(canvas, [c], -1, (0, 0, 255), 2)
# print("here: " + str(l))
# if aspect_ratio < 0.2:
# cv2.drawContours(canvas, [c], -1, (255, 0, 0), 2)
# if aspect_ratio > 5:
# cv2.drawContours(canvas, [c], -1, (255, 0, 0), 2)
# if l > 150 and (aspect_ratio > 10 or aspect_ratio < 0.1):
# cv2.drawContours(canvas, [c], -1, (255, 255, 255), 2)
if l > 500:
continue
if l < 20:
continue
if aspect_ratio < 0.2:
continue
if aspect_ratio > 5:
continue
if l > 150 and (aspect_ratio > 10 or aspect_ratio < 0.1):
continue
cv2.drawContours(canvas, [c], -1, (255, 255, 255), 2)
cv2.imshow("cnts", canvas)
cv2.waitKey(0)
cv2.imshow("contours1", canvas)
cv2.waitKey(0)
canvas = aux.close_image(canvas, (7,7))
img_blurred = cv2.GaussianBlur(canvas, (8*2+1,8*2+1), 0)
img_blurred = aux.smoother_edges(img_blurred, (9,9))
kernel = np.ones((3,3), np.uint8)
dilated = cv2.erode(img_blurred, kernel)
_, im_th = cv2.threshold(dilated, 50, 255, cv2.THRESH_BINARY)
cv2.imshow("contours1", im_th)
cv2.waitKey(0)
canny = cv2.Canny(im_th, 11, 33)
#canny = tb.CannyTrackbar(canvas, "canny")
cv2.imshow("canny", canny)
cv2.waitKey(0)
_, cnts, _ = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
sum_area = 0
rect_list = []
for i,c in enumerate(cnts):
rect = cv2.minAreaRect(c)
_, (width, height), _ = rect
area = width*height
sum_area += area
rect_list.append(rect)
mean_area = sum_area / len(cnts)
#print(mean_area)
for rect in rect_list:
_, (width, height), _ = rect
box = cv2.boxPoints(rect)
box = np.int0(box * 5)
area = width * height
if(area > mean_area*0.6):
rect = shrink_rect(rect, 0.8)
box = cv2.boxPoints(rect)
box = np.int0(box * resize_times)
cv2.drawContours(imgc, [box], 0, (0,255,0),1)
imgc = cv2.resize(imgc, None, imgc, fx = 0.5, fy = 0.5)
cv2.imshow("imgc", imgc)
cv2.waitKey(0)
# counter = 0
# # loop over the contours
# for c in cnts:
# # compute the center of the contour
# print(counter)
# M = cv2.moments(c)
# if M["m00"] != 0:
# cX = int(M["m10"] / M["m00"])
# cY = int(M["m01"] / M["m00"])
# #draw the contour and center of the shape on the image
# area = cv2.arcLength(c,True)
# if area > 50:
# #cv2.drawContours(img, [c], -1, (255, 255, 255), 1)
# x,y,w,h = cv2.boundingRect(c)
# aspect_ratio = float(w)/h
# if aspect_ratio < 1.4 and aspect_ratio > 0.6:
# cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
# #rect = cv2.minAreaRect(c)
# #box = cv2.boxPoints(rect)
# #box = np.int0(box)
# #cv2.drawContours(img,[box],0,(0,0,255),2)
# counter = counter + 1
# #cv2.waitKey(0)
# #cv2.circle(img, (cX, cY), 3, (255, 255, 255), -1)
cv2.imwrite("result1.png", imgc)
| [
"mga@smart-in.eu"
] | mga@smart-in.eu |
7a9bc7fc3c9c4c62b7a9d9b9fa4065a832025ac5 | 53a83821b3631313732abddcce8eb3646facfeb9 | /code/part2/generate_stopwords.py | 8d7ba4483b42b2d510b891144c19baedeb6c412a | [] | no_license | jschear/cs1951a-final | 6add03b39f1057d04b7bbacf706dee0c864798c5 | ada6c21690826173d088b83ea948ac213a1a4a51 | refs/heads/master | 2020-05-16T11:26:31.205023 | 2014-05-10T02:58:54 | 2014-05-10T02:58:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | from __future__ import division
from tokenizer import Tokenizer
import sys
from collections import defaultdict
import json
import pdb
def generate_stopwords(review_file, business_file, outfile, stopwords_file):
tokenizer = Tokenizer(stopwords_file)
occurances = defaultdict(int)
with open(review_file) as review_file:
for i, review in enumerate(review_file):
data = json.loads(review)
line = tokenizer(data['text'])
for token in line:
occurances[token] += 1
if i %1000 == 0:
print i
sorted_tokens = sorted(occurances.items(), key = lambda item: item[1])
def shouldIgnore(item):
word, count = item
if count <= 3:
return True
if count > 50000:
return True
return False
output = set(word for word, count in filter(shouldIgnore, sorted_tokens))
with open(outfile, "w") as towrite:
towrite.write("\n".join(output))
if __name__ == "__main__":
review_file, business_file, outfile,stopwords_file = sys.argv[1:]
generate_stopwords(review_file, business_file, outfile,stopwords_file)
| [
"elijah@Elijahs-MacBook-Air.local"
] | elijah@Elijahs-MacBook-Air.local |
2d84c157b9846a8aec28e139fd2ca2fe4863d293 | ed2d4c6dbdec9bb501b515e25b620917c6d0704e | /controllers/joystick/catkin_ws/devel/lib/python2.7/dist-packages/nodes/srv/_emitter_get_channel.py | ef884bae667b3ff78ca0afc5d285dbb85f25e3b3 | [] | no_license | pvarthol/aristeia1 | c069db900c9ce7d0bbcddfd284a74c2b67447354 | 2da2796673d801c01e5ddeeaccd1f1dcbab2e535 | refs/heads/master | 2020-05-21T11:42:29.247546 | 2015-07-13T16:59:20 | 2015-07-13T16:59:20 | 39,025,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,633 | py | """autogenerated by genpy from nodes/emitter_get_channelRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class emitter_get_channelRequest(genpy.Message):
_md5sum = "f9df5232b65af94f73f79fe6d84301bb"
_type = "nodes/emitter_get_channelRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """uint8 ask
"""
__slots__ = ['ask']
_slot_types = ['uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
ask
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(emitter_get_channelRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.ask is None:
self.ask = 0
else:
self.ask = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.ask))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.ask,) = _struct_B.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.ask))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.ask,) = _struct_B.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
"""autogenerated by genpy from nodes/emitter_get_channelResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class emitter_get_channelResponse(genpy.Message):
_md5sum = "eb7a2b2526f375d6adbe345262403220"
_type = "nodes/emitter_get_channelResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 channel
"""
__slots__ = ['channel']
_slot_types = ['int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
channel
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(emitter_get_channelResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.channel is None:
self.channel = 0
else:
self.channel = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_i.pack(self.channel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(self.channel,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_i.pack(self.channel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(self.channel,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_i = struct.Struct("<i")
class emitter_get_channel(object):
_type = 'nodes/emitter_get_channel'
_md5sum = '4fe6244a49819e84f825eb5d3ed9cd3b'
_request_class = emitter_get_channelRequest
_response_class = emitter_get_channelResponse
| [
"pvarthol@gmail.com"
] | pvarthol@gmail.com |
da3c6e83305d27014371f3d5c058202ceed0e107 | 68b7cb2dc29f7fe9d9b01a7b045d6e103cc7ae0b | /scripts/f024.py | bcd42ddd8dad04c23516945a0729c160ab96a4e8 | [
"MIT"
] | permissive | Eve-ning/aleph0 | 1507426e75c1c3ff61ae124e0b3b9809e0adf322 | 0a893f81328420ba4661cb920d3f9c39b3506d96 | refs/heads/master | 2022-12-04T02:18:36.907890 | 2020-07-29T06:26:04 | 2020-07-29T06:26:04 | 283,411,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuMap import OsuMap
def f024(m: OsuMap):
events = [*[SvOsuMeasureLineEvent(
firstOffset=i, lastOffset=9337,
startX=1, endX=0,
startY=-1, endY=0,
funcs=[
lambda x, j=j: -x + j for j in np.linspace(0, 1, 15)
]) for i in np.linspace(8857, 9337, 8)],
*[SvOsuMeasureLineEvent(
firstOffset=9337, lastOffset=11017,
startX=i, endX=i + 1,
startY=-1, endY=1,
funcs=[
lambda x: -x + 2
]) for i in np.linspace(0, 1, 50)],
*[SvOsuMeasureLineEvent(
firstOffset=9337, lastOffset=11017,
startX=i, endX=i + 1,
startY=-1, endY=1,
funcs=[
lambda x: x - 2
]) for i in np.linspace(0, 1, 50)],
]
f = svOsuMeasureLineMD(events,
scalingFactor=SCALE,
firstOffset=8857,
lastOffset=11017,
paddingSize=PADDING,
endBpm=250)
m.svs.extend(f[0])
m.bpms.extend(f[1])
| [
"johnchangqi@hotmail.com"
] | johnchangqi@hotmail.com |
28ce059d72621398c170362a8527bbe29d0309c0 | 14204526624b3132aa599bee2eaec425eed92e18 | /venv/model.py | 2139295663d667df209877fc7bbbe0550132fb2a | [] | no_license | RajRatanPote/House_prise | f9c4557f896deb8ca2aec508b28c6092b078b332 | e701b66bf478b146447276fcdd558a2315a47bea | refs/heads/master | 2023-04-02T23:00:58.300788 | 2021-04-11T09:59:50 | 2021-04-11T09:59:50 | 356,830,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | import numpy as np
import pandas as pd
from pandas import read_csv
from sklearn.datasets import load_boston
boston = load_boston()
data = pd.DataFrame(boston.data)
data.head()
df = pd.DataFrame(boston.data)
df.columns = boston.feature_names
df.head()
df.describe()
df['PRICE'] = boston.target
bins = [0,200,400,600,800]
gr_name=[200,400,600,800]
df['NEW_TAX']=pd.cut(df['TAX'],bins,labels=gr_name)
bins = [0,2,4,6,8,10]
gr_name=[2,4,6,8,10]
df['NEW_RM']=pd.cut(df['RM'],bins,labels=gr_name)
df = df[~(df['PRICE'] >= 40.0)] # removed the outliers
dr_x=['PRICE','RM','TAX']
dr_y=['RM','TAX']
x=df.drop(dr_x,axis=1)
y=df.drop(dr_y,axis=1)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size=0.3)
from sklearn.linear_model import LinearRegression
reg=LinearRegression()
reg.fit(x_train,y_train)
#predict the price
y_pred=reg.predict(x_test)
import pickle
pickle.dump(reg,open('boston.pkl','wb'))
model=pickle.load(open('boston.pkl','rb')) | [
"rajratan.pote@ksolves.com"
] | rajratan.pote@ksolves.com |
ef00af5d34e3dd65c260ca773cd1388636e3b07a | 83cd63f49e86c8945115c8a2ba31bb129e167983 | /hello.py | c87c820b5b7cd3ad5c54b9f3a83371bf41dd83b8 | [] | no_license | miniaishwarya/Python-stuff | 657d7c32463badfdf7818528e26ce65d465830ad | 291c015cb39821912a5c8856f83497fbf2415123 | refs/heads/master | 2020-12-13T22:54:13.523454 | 2020-01-17T13:37:26 | 2020-01-17T13:37:26 | 234,555,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py |
# Reading an excel file using Python
import xlrd
# Give the location of the file
loc = ("C:\Python")
# To open Workbook
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
# For row 0 and column 0
sheet.cell_value(0, 0)
| [
"noreply@github.com"
] | noreply@github.com |
3cfb100f580765a53f828f757e04126c0d5ba7ec | 5e5f5cd80de17b6f31682630c8b6826cbdea7b62 | /Lesson 05/requirements.py | 88b19d13c31da4d3813b50a5290e413a17320d14 | [] | no_license | maraja/ai-bootcamp | 3e813e8c5888f043357b0979a3eb62f70db6eda0 | f44f0da35814b83a4da38009eb1845c6700d8b46 | refs/heads/master | 2021-05-05T14:42:34.165044 | 2018-03-29T00:57:52 | 2018-03-29T00:57:52 | 118,500,856 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Required Python Machine learning Packages
import pandas as pd
import numpy as np
# For preprocessing the data
from sklearn.preprocessing import Imputer
from sklearn import preprocessing
# To split the dataset into train and test datasets
from sklearn.model_selection import train_test_split
# To model the Gaussian Navie Bayes classifier
from sklearn.naive_bayes import GaussianNB
# To calculate the accuracy score of the model
from sklearn.metrics import accuracy_score
# Loading the Dataset
adult_df = pd.read_csv('adult.data.txt',
header = None, delimiter=' *, *', engine='python')
# Adding Headers
adult_df.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'capital_loss',
'hours_per_week', 'native_country', 'income']
# Handling Missing Data
# Finding Null values
adult_df.isnull().sum() | [
"amit.maraj@gmail.com"
] | amit.maraj@gmail.com |
9999bf5d93fa20451f61973a2e0ae14307aded8d | 4b1cf07275a8f2abf30943b975d443485ef897ff | /data_generator.py | 3805e30c71100e78de5cec92ba0c561a77bb426d | [
"MIT"
] | permissive | gipsyblues/edge_ml_emotion_recognition | a0e1e0acc98d11f710542218b2603f72a8a93a4b | 028e9a9264e7df5c48a047677b48f0c15e059e6c | refs/heads/master | 2023-06-27T02:53:18.707806 | 2021-07-28T06:48:30 | 2021-07-28T06:48:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,108 | py | import numpy as np
import cv2
import os
import imgaug as ia
import logging
from imgaug import augmenters as iaa
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
def _create_augment_pipeline():
sometimes = lambda aug: iaa.Sometimes(0.1, aug)
aug_pipe = iaa.Sequential(
[
iaa.Fliplr(0.5),
#iaa.Flipud(0.2),
iaa.Affine(translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}),
iaa.OneOf([iaa.Affine(scale=(0.8, 1.2)),
iaa.Affine(rotate=(-10, 10)),
iaa.Affine(shear=(-10, 10))]),
sometimes(iaa.OneOf([
iaa.GaussianBlur((0, 3.0)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 11)),
])),
sometimes(iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5))),
sometimes(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5)),
sometimes(iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5),
iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
])),
sometimes(iaa.Add((-10, 10), per_channel=0.5)),
sometimes(iaa.Multiply((0.5, 1.5), per_channel=0.5)),
sometimes(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5))
],
random_order=True
)
return aug_pipe
def process_image_classification(image, desired_w = None, desired_h = None, aug_pipe = None):
# resize the image to standard size
if (desired_w and desired_h) or aug_pipe:
if (desired_w and desired_h):
# Rescale image
image = ia.imresize_single_image(image, (desired_w, desired_h))
if aug_pipe:
image = aug_pipe(image=image)
return image
class DataGenerator():
def __init__(self, X_train, y_train, batch_size=32, img_size = 48, prefix='appa-real/imgs/', shuffle=True, augment=None):
self.X_train = X_train
self.y_train = y_train
self.batch_size = batch_size
self.img_size = img_size
self.prefix = prefix
self.class_num = y_train.shape[1]
self.shuffle = shuffle
self.sample_num = len(X_train)
self.augment = augment
if self.augment:
logging.info("Using augmentation for {self.prefix}")
self.aug_pipe = _create_augment_pipeline()
def __call__(self):
while True:
indexes = self.__get_exploration_order()
itr_num = int(len(indexes) // (self.batch_size * 2))
for i in range(itr_num):
batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]
X, y = self.__data_generation(batch_ids)
yield X, y
def __get_exploration_order(self):
indexes = np.arange(self.sample_num)
if self.shuffle:
np.random.shuffle(indexes)
return indexes
def __data_generation(self, batch_ids):
X = np.zeros(shape=(self.batch_size, self.img_size, self.img_size, 3))
y = np.zeros(shape=(self.batch_size, self.class_num))
for i in range(self.batch_size):
img = cv2.imread(self.prefix + self.X_train[batch_ids[i]], 1)
try:
if self.augment:
img = process_image_classification(img, self.img_size, self.img_size, self.aug_pipe)
except Exception as e:
print(self.prefix + self.X_train[batch_ids[i]], e)
img = img.astype(np.float32)
img /= 255.
img -= 0.5
img *= 2.
img = img[:, :, ::-1]
X[i, ::] = img
y[i, :] = self.y_train[batch_ids[i]]
return np.array(X), y
| [
"dmitrywat@gmail.com"
] | dmitrywat@gmail.com |
8608e303a36bf3f85081086abacf05377c92e3f0 | 080029466f1885822b4f9ef3bb351bddd9bae826 | /src/Labyrinth.py | 47cf009350102d35da8752cff8759b9de046c15b | [] | no_license | kleeblatt007/Labyrinth | e9650a0eaf6fb34a12c718101ea895ecf32c7865 | 157c1edfa5ba320eff834d972677bca6aad53b8f | refs/heads/main | 2023-08-24T20:36:13.592199 | 2021-10-17T15:51:19 | 2021-10-17T15:51:19 | 371,376,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,378 | py | from Graph import Graph
from RandomPath import RandomPath
from DepthFirstPath import DepthFirstPath
from Coordinate import Coordinate
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
# from pprint import pprint
# import numpy as np
class Labyrinth(object):
def __init__(self, N, start):
self.N = N
self.graph = Graph(pow(N, 2))
self.start = start
self.build()
def grid(self):
'''
Baut ein Gitter, was für den RandomPath benötigt wird. Jeder Knoten (außer die Ränder) sind mit den Knoten
rechts, links, unten und über ihnen verbunden.
:return: Graph
'''
m = Graph(pow(self.N, 2))
s = 0
for y in range(self.N):
for x in range(self.N):
if y == 0:
if x == 0:
m.addEdge(s, s + 1)
m.addEdge(s, s + self.N)
elif x == self.N - 1:
m.addEdge(s, s - 1)
m.addEdge(s, s + self.N)
else:
m.addEdge(s, s + 1)
m.addEdge(s, s - 1)
m.addEdge(s, s + self.N)
elif y == self.N - 1:
if x == 0:
m.addEdge(s, s + 1)
m.addEdge(s, s - self.N)
elif x == self.N - 1:
m.addEdge(s, s - 1)
m.addEdge(s, s - self.N)
else:
m.addEdge(s, s + 1)
m.addEdge(s, s - 1)
m.addEdge(s, s - self.N)
else:
if x == 0:
m.addEdge(s, s + 1)
m.addEdge(s, s + self.N)
m.addEdge(s, s - self.N)
elif x == self.N - 1:
m.addEdge(s, s - 1)
m.addEdge(s, s + self.N)
m.addEdge(s, s - self.N)
else:
m.addEdge(s, s + 1)
m.addEdge(s, s - 1)
m.addEdge(s, s + self.N)
m.addEdge(s, s - self.N)
s += 1
#m.printGraph()
return m
def build(self):
'''
Bildet ein Graph als Labyrinth. Dabei wird über ein grid ein Pfad per zufälliger Tiefensuche gebildet.
:return:
'''
if self.graph.getNodes() <= 2:
return
G = RandomPath(self.start, self.graph)
G.path(self.grid(), self.start)
a = G.getEdgeTo()
for i in range(len(a)):
if i == self.start:
continue
self.graph.addEdge(i, a[i])
def hasEdge(self, v, w):
'''
Prüft, ob zwei Knoten durch eine Kante verbunden ist.
:param v: int
:param w: int
:return: boolean
'''
if self.graph.validNode(v) and self.graph.validNode(w):
for x in self.graph.adj(v):
if x == w:
return True
return False
def printPath(self, path,e):
'''
Stellt ein Graphen mit path, mit Hilfe von plt.plot, graphisch dar
:param path: vorher erstellter path
:param e: Endknoten
'''
edges = path.pathTo(e)
for n in range(self.graph.getNodes()):
for v in self.graph.getAdj(n):
c1 = self.nodeToCoordinate(n)
c2 = self.nodeToCoordinate(v)
plt.plot(c1.x,c1.y,'o')
x = [c1.X(), c2.X()]
y = [c1.Y(), c2.Y()]
plt.plot(x, y, "black", linewidth=3.0)
for x in range(len(edges)-1,1,-1):
c1 = self.nodeToCoordinate(edges[x])
c2 = self.nodeToCoordinate(edges[x-1])
# plt.plot(c1.x,c1.y,'o')
x = [c1.X(), c2.X()]
y = [c1.Y(), c2.Y()]
plt.plot(x, y, "red", linewidth=3.0)
plt.show()
def findWay(self, s, e):
'''
Ein Pfad wird durch Tiefensuche(DepthFirstPath) zwischen zwei Knoten gesucht
:param s: int
:param e: int
:return: list
'''
if not self.graph.validNode(s) or not self.graph.validNode(e):
return
path = DepthFirstPath(self.graph, s)
path.path(self.graph, s)
self.printPath(path,e)
return path.pathTo(e)
def nodeToCoordinate(self, n):
'''
Errechnet Koordinaten des Knoten
:param n: int
:return: Coordinate
'''
if n == 0:
c = Coordinate(0, self.N - 1)
return c
x = n % self.N
y = self.N - 1 - int(n / self.N)
c = Coordinate(x, y)
return c
def printLab(self):
'''
Stellt das Labyrint mit Hilfe von plt.plot graphisch dar
'''
# x = 0
# y = self.N-1
for n in range(self.graph.getNodes()):
# xArray = []
# yArray = []
# xArray.append(x)
# yArray.append(y)
for v in self.graph.getAdj(n):
c1 = self.nodeToCoordinate(n)
c2 = self.nodeToCoordinate(v)
plt.plot(c1.x,c1.y,'o')
x = [c1.X(), c2.X()]
y = [c1.Y(), c2.Y()]
plt.plot(x, y, "black", linewidth=3.0)
plt.show()
def printLabPlotly(self):
fig = go.Figure()
for n in range(self.graph.getNodes()):
nX = self.nodeToCoordinate(n).x
nY = self.nodeToCoordinate(n).y
for v in self.graph.getAdj(n):
xList = []
yList = []
xList.append(nX)
xList.append(self.nodeToCoordinate(v).x)
yList.append(nY)
yList.append(self.nodeToCoordinate(v).y)
df = pd.DataFrame(dict(
x=xList,
y=yList
))
fig.add_trace(go.Scatter(x=xList, y=yList, mode="lines"))
fig.show()
def labToTxt(self, end):
'''
Erstellt aus dem Labyrinth eine Abbildung auf ein zweidimensionales Array
und erstellt eine txt Datei. 1 = freier Weg, 0 = Wand, 2 = Ziel
:param end: Ziel-Knoten
'''
a = [[0 for i in range(self.N * 2 +1)] for x in range(self.N * 2 + 1)]
y = 1
for n in range(self.graph.getNodes()):
c = self.nodeToCoordinate(n)
x = c.X() * 2
if x == 0:
x = 1
a[y][x] = 1
for v in self.graph.getAdj(n):
i = v - n
if i == 1:
a[y][x + 1] = 1
elif i == -1:
a[y][x - 1] = 1
elif i > 0:
a[y + 1][x] = 1
else:
a[y - 1][x] = 1
if c.X() == self.N - 1:
y += 2
a[end[0]][end[1]] = 2
# pprint(a)
# b = np.array(a)
# np.savetxt('Lab.txt', b)
with open('Lab.txt', 'w') as file:
for row in a:
file.write(' '.join([str(c) for c in row]) + '\n')
| [
"clemens.bandrock@campus.tu-berlin.de"
] | clemens.bandrock@campus.tu-berlin.de |
7091c8bb4d092cb28c4a6f0d1fe1a329abcb2805 | 40b20d7e5f4381a64bd264a562c4ae6d6721b01c | /14-it-generator/sentence_gen.py | a17c48f6811da8c5180ec412bacbf4618080cabf | [
"MIT"
] | permissive | KyrieCham/example-code | 7d2f0d5901bf80b49dd6b1e9ae1c37c9cb6df7f5 | 3dd11744d1c0b1f00860e985ee2a0761e73ef7e7 | refs/heads/master | 2020-04-18T00:56:06.384756 | 2019-01-22T19:27:43 | 2019-01-22T19:27:43 | 167,098,245 | 1 | 0 | MIT | 2019-01-23T01:52:48 | 2019-01-23T01:52:47 | null | UTF-8 | Python | false | false | 446 | py | """
Sentence: iterate over words using a generator function
"""
import re
import reprlib
RE_WORD = re.compile('\w+')
class Sentence:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
def __iter__(self):
for word in self.words: # <1>
yield word # <2>
return # <3>
# done! <4>
| [
"luciano@ramalho.org"
] | luciano@ramalho.org |
c64f6276a76c1f9c5a452595cbcd25de501fd7f6 | e65a448da4f82d6e7c95cfadc5e8dfd06ed05c62 | /cinder/cinder/api/middleware/auth.py | cf898c9b07d780e57e877272a930772dd33360d5 | [
"Apache-2.0"
] | permissive | bopopescu/devstack | 7a9d11bcc37884f3686e7178ebc25c178a6da283 | 6b73b164af7e5895501f1ca5dafebbba90510846 | refs/heads/master | 2022-11-19T19:58:43.536574 | 2015-01-29T09:00:59 | 2015-01-29T09:00:59 | 282,101,378 | 0 | 0 | null | 2020-07-24T02:17:48 | 2020-07-24T02:17:47 | null | UTF-8 | Python | false | false | 6,014 | py | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
import os
from oslo.config import cfg
from oslo.serialization import jsonutils
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder import context
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.openstack.common.middleware import request_id
from cinder import wsgi as base_wsgi
use_forwarded_for_opt = cfg.BoolOpt(
'use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
CONF = cfg.CONF
CONF.register_opt(use_forwarded_for_opt)
LOG = logging.getLogger(__name__)
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
class InjectContext(base_wsgi.Middleware):
"""Add a 'cinder.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
req.environ['cinder.context'] = self.context
return self.application
class CinderKeystoneContext(base_wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
# get the roles
roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')]
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
explanation=_('Invalid service catalog json.'))
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog,
request_id=req_id)
req.environ['cinder.context'] = ctx
return self.application
class NoAuthMiddleware(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
os_url = os.path.join(req.url, project_id)
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['cinder.context'] = ctx
return self.application
| [
"swethapts@gmail.com"
] | swethapts@gmail.com |
35bcc77e349cc812c7bd54f2b0b870705d6cb17c | 2536c2235ccb701b11ff20054d13c1d988e81cd2 | /fuse.py | 9475020b7c732cee24262c077dfd12a4301ba7cb | [] | no_license | icco/clyde | f372f0685e12a6e2aeeb91b9d702ef54bfc6e597 | 4d011f45a7a00b8889e32821a2123fe784141d27 | refs/heads/master | 2020-06-08T06:03:11.215639 | 2009-05-31T00:56:43 | 2009-05-31T00:56:43 | 199,375 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,113 | py | # Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from ctypes import *
from ctypes.util import find_library
from errno import EFAULT
from functools import partial
from platform import machine, system
from traceback import print_exc
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
if _system == 'Darwin':
_libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
_machine = machine()
if _machine == 'i686':
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
elif machine() == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
raise NotImplementedError('Linux %s is not supported.' % _machine)
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp,
c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('init', c_voidp), # Use __init__
('destroy', c_voidp), # Use __del__
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))]
_libfuse = CDLL(find_library("fuse"))
def fuse_get_context():
"""Returns a (uid, gid, pid) tuple"""
p = _libfuse.fuse_get_context()
ctx = cast(p, POINTER(fuse_context)).contents
return ctx.uid, ctx.gid, ctx.pid
def time_of_timespec(ts):
return ts.tv_sec + 1.0 * ts.tv_nsec / 10 ** 9
def _operation_wrapper(func, *args, **kwargs):
"""Decorator for the methods of class FUSE"""
try:
return func(*args, **kwargs) or 0
except OSError, e:
return -(e.errno or e.message or EFAULT)
except:
print_exc()
return -EFAULT
class FUSE(object):
"""Assumes API version 2.6 or later.
Should not be subclassed under normal use."""
def __init__(self, operations, mountpoint, **kwargs):
self.operations = operations
args = ['fuse']
if kwargs.pop('foreground', False):
args.append('-f')
if kwargs.pop('debug', False):
args.append('-d')
if kwargs.pop('nothreads', False):
args.append('-s')
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(key if val == True else '%s=%s' % (key, val)
for key, val in kwargs.items()))
args.append(mountpoint)
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(_operation_wrapper, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
_libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
del self.operations # Invoke the destructor
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path)
memmove(buf, create_string_buffer(ret), bufsize)
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path, mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path, mode)
def unlink(self, path):
return self.operations('unlink', path)
def rmdir(self, path):
return self.operations('rmdir', path)
def symlink(self, source, target):
return self.operations('symlink', target, source)
def rename(self, old, new):
return self.operations('rename', old, new)
def link(self, source, target):
return self.operations('link', target, source)
def chmod(self, path, mode):
return self.operations('chmod', path, mode)
def chown(self, path, uid, gid):
return self.operations('chown', path, uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path, length)
def open(self, path, fi):
fi.contents.fh = self.operations('open', path, fi.contents.flags)
return 0
def read(self, path, buf, size, offset, fi):
ret = self.operations('read', path, size, offset, fi.contents.fh)
if ret:
memmove(buf, create_string_buffer(ret), size)
return len(ret)
def write(self, path, buf, size, offset, fi):
data = string_at(buf, size)
return self.operations('write', path, data, offset, fi.contents.fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path)
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fi):
return self.operations('flush', path, fi.contents.fh)
def release(self, path, fi):
return self.operations('release', path, fi.contents.fh)
def fsync(self, path, datasync, fi):
return self.operations('fsync', path, datasync, fi.contents.fh)
def setxattr(self, path, name, value, size, options, *args):
s = string_at(value, size)
return self.operations('setxattr', path, name, s, options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path, name, *args)
buf = create_string_buffer(ret)
if bool(value):
memmove(value, buf, size)
return len(ret)
def listxattr(self, path, namebuf, size):
ret = self.operations('listxattr', path)
if not ret:
return 0
buf = create_string_buffer('\x00'.join(ret))
if bool(namebuf):
memmove(namebuf, buf, size)
return len(buf)
def removexattr(self, path, name):
return self.operations('removexattr', path, name)
def opendir(self, path, fi):
fi.contents.fh = self.operations('opendir', path)
return 0
def readdir(self, path, buf, filler, offset, fi):
for name in self.operations('readdir', path, fi.contents.fh):
filler(buf, name, None, 0)
return 0
def releasedir(self, path, fi):
return self.operations('releasedir', path, fi.contents.fh)
def fsyncdir(self, path, datasync, fi):
return self.operations('fsyncdir', path, datasync, fi.contents.fh)
def access(self, path, amode):
return self.operations('access', path, amode)
def create(self, path, mode, fi):
fi.contents.fh = self.operations('create', path, mode)
return 0
def ftruncate(self, path, length, fi):
return self.operations('truncate', path, length, fi.contents.fh)
def fgetattr(self, path, buf, fi):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
fh = fi.contents.fh if fi else None
attrs = self.operations('getattr', path, fh)
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime'):
timespec = getattr(st, key + 'spec')
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
return 0
def lock(self, path, fi, cmd, lock):
return self.operations('lock', path, fi.contents.fh, cmd, lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path, times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path, blocksize, idx)
from errno import EACCES, ENOENT
from stat import S_IFDIR
class Operations:
"""This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise an OSError exception on
error.
When in doubt of what an operation should do, check the FUSE header
file or the corresponding system call man page."""
def __call__(self, op, *args):
if not hasattr(self, op):
raise OSError(EFAULT)
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise OSError(EACCES)
def chown(self, path, uid, gid):
raise OSError(EACCES)
def create(self, path, mode):
"""Returns a numerical file handle."""
raise OSError(EACCES)
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
st_atime, st_mtime and st_ctime should be floats."""
if path != '/':
raise OSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise OSError(ENOTSUP)
def link(self, target, source):
raise OSError(EACCES)
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise OSError(EACCES)
def mknod(self, path, mode, dev):
raise OSError(EACCES)
def open(self, path, flags):
"""Returns a numerical file handle."""
return 0
def opendir(self, path):
"""Returns a numerical file handle."""
return 0
def read(self, path, size, offset, fh):
"""Returns a string containing the data requested."""
raise OSError(EACCES)
def readdir(self, path, fh):
return ['.', '..']
def readlink(self, path):
raise OSError(EACCES)
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise OSError(ENOTSUP)
def rename(self, old, new):
raise OSError(EACCES)
def rmdir(self, path):
raise OSError(EACCES)
def setxattr(self, path, name, value, options, position=0):
raise OSError(ENOTSUP)
def statfs(self, path):
"""Returns a dictionary with keys identical to the statvfs C structure
of statvfs(3). The f_frsize, f_favail, f_fsid and f_flag fields are
ignored by FUSE though."""
return {}
def symlink(self, target, source):
raise OSError(EACCES)
def truncate(self, path, length, fh=None):
raise OSError(EACCES)
def unlink(self, path):
raise OSError(EACCES)
def utimens(self, path, times=None):
"""Times is a (atime, mtime) tuple. If None use current time."""
return 0
def write(self, path, data, offset, fh):
raise OSError(EACCES)
class LoggingMixIn:
def __call__(self, op, path, *args):
print '->', op, path, repr(args)
ret = '[Unknown Error]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError, e:
ret = '[Errno %s]' % (e.errno or e.message)
raise
finally:
print '<-', op, repr(ret) | [
"nat@natwelch.com"
] | nat@natwelch.com |
249f92441ee8c9dd0bac700ca628226db862ef82 | e120f942d706b755382d0367e2f1c43bb81a3b40 | /crawling.py | 47909207af58fb56d4c42b37ef20ba407ba62a62 | [] | no_license | samiant/projectcraw | 740594d2b49e7d857a5dae28c1eba35217bbbdfe | 09a8ff45a2d1b98761f6a9579163d5f11e1b17f9 | refs/heads/main | 2023-01-12T19:57:56.267225 | 2020-11-12T02:21:52 | 2020-11-12T02:21:52 | 312,144,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import requests
from bs4 import BeautifulSoup
url = "https://scholar.google.com/citations?user=p4-7UvwAAAAJ&hl=en&oi=ao"
page = requests.get(url).text
soup_expatistan = BeautifulSoup(page)
expatistan_table = soup_expatistan.find("table", class_="comparison")
expatistan_titles = expatistan_table.find_all("tr", class_="expandable")
for expatistan_title in expatistan_titles:
published_date = expatistan_title.find("th", class_="percent")
print(published_date.span.string) | [
"noreply@github.com"
] | noreply@github.com |
3d4b9065d5b37d5328bf1b457f014f33e0b50b42 | 493c14e7747884015271c0050f1c0dccda4faea5 | /app/auth/email.py | f2dca25f4a6f14c2c50762ffb4ae5af0c7c740a6 | [] | no_license | antoniovmonge/datapark | 4636a7b491245e7dbedd4a01aa609df521dfceda | 2a347dda85235e2cf43342ae6841b686cd37c805 | refs/heads/master | 2023-09-01T04:39:06.379066 | 2021-10-28T17:49:04 | 2021-10-28T17:49:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from flask import render_template, current_app
from app.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Megablog] Reset Your Password',
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token)) | [
"antoniovmonge@gmail.com"
] | antoniovmonge@gmail.com |
b7b033005c4d6c2fa2526203b4ff0fb9024fa215 | 45fe398064731ce4f3b9c9f6d1970b25c3bd63c2 | /Deps/Argh/conanfile.py | d6dd943dd8dde8725f01b7cd261aa208609361f8 | [
"BSD-3-Clause",
"MIT"
] | permissive | 15831944/AutoCADLI | 4977e7410fbd01bdc14b9ab97d8c81a02a37c35d | 6836c73bab89898ee789818e32fdef021664e09b | refs/heads/master | 2021-09-19T08:10:25.013455 | 2018-07-25T11:51:22 | 2018-07-25T11:51:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from conans import ConanFile
import os
class ArghConan(ConanFile):
name = "argh"
version = "1.2.1"
url = "https://github.com/adishavit/argh"
description = "Argh! A minimalist argument handler."
license = "BSD 3-Clause"
exports = ["LICENSE"]
exports_sources = "argh.h"
def package(self):
self.copy(pattern="LICENSE", dst="license")
self.copy(pattern="argh.h", dst="include")
| [
"33334607+rena0157@users.noreply.github.com"
] | 33334607+rena0157@users.noreply.github.com |
e5132aeded7b1de370ce27c619521a91c10ac1ea | 75a6c56e4273d2c681898e77ee0c4e60d38b3032 | /scripts/run_yolo.py | 666a0d793a35fa6ffb416c73ed2c5ab32304d68b | [
"Apache-2.0"
] | permissive | tmralmeida/tensorrt-yolov4 | 23283568042949d78de325997bc9a3d4df83bbc2 | 320ffaf9cfe8fb3e787b16b03bb5de3a5e1ce5b9 | refs/heads/master | 2022-11-29T05:46:29.508389 | 2020-08-11T16:41:43 | 2020-08-11T16:41:43 | 286,789,494 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,725 | py |
import os
import argparse
import sys
import time
import cv2
import numpy as np
import pycuda.autoinit # This is needed for initializing CUDA driver
from utils.camera import add_camera_args, Camera
from utils.yolo_classes import get_cls_dict
from utils.yolo import TrtYOLO
from utils.display import open_window, set_display, show_fps
from utils.visualization import BBoxVisualization
sys.path.append(os.path.realpath('../'))
WINDOW_NAME = "TrtYOLO"
def parse_args():
"""Parse input arguments."""
desc = ('Capture and display live camera video, while doing '
'real-time object detection with TensorRT optimized '
'YOLO model on Jetson')
parser = argparse.ArgumentParser(description=desc)
parser = add_camera_args(parser)
parser.add_argument(
'--model', type=str, required=True,
help=('[yolov3|yolov3-tiny|yolov3-spp|yolov4|yolov4-tiny|yolov4-bdd]-'
'[{dimension}], where dimension could be a single '
'number (e.g. 288, 416, 608) or WxH (e.g. 416x256)'))
parser.add_argument(
'--category_num', type=int, default=80,
help='number of object categories [80]')
args = parser.parse_args()
return args
def loop_and_detect(cam, trt_yolo, conf_th, vis):
"""Continuously capture images from camera and do object detection.
# Arguments
cam: the camera instance (video source).
trt_yolo: the TRT YOLO object detector instance.
conf_th: confidence/score threshold for object detection.
vis: for visualization.
"""
full_scrn = False
fps = 0.0
tic = time.time()
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
break
img = cam.read()
if img is not None:
boxes, confs, clss = trt_yolo.detect(img, conf_th)
img = vis.draw_bboxes(img, boxes, confs, clss)
img = show_fps(img, fps)
cv2.imshow(WINDOW_NAME, img)
toc = time.time()
curr_fps = 1.0 / (toc - tic)
# calculate an exponentially decaying average of fps number
fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05)
tic = toc
key = cv2.waitKey(1)
if key == 27: # ESC key: quit program
break
elif key == ord('F') or key == ord('f'): # Toggle fullscreen
full_scrn = not full_scrn
set_display(WINDOW_NAME, full_scrn)
def main():
args = parse_args()
if args.category_num <= 0:
raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num)
if not os.path.isfile('../yolo/%s.trt' % args.model):
raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model)
cam = Camera(args)
cam.open()
if not cam.is_opened:
raise SystemExit('ERROR: failed to open camera!')
cls_dict = get_cls_dict(args.category_num)
yolo_dim = args.model.split('-')[-1]
if 'x' in yolo_dim:
dim_split = yolo_dim.split('x')
if len(dim_split) != 2:
raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
w, h = int(dim_split[0]), int(dim_split[1])
else:
h = w = int(yolo_dim)
if h % 32 != 0 or w % 32 != 0:
raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
trt_yolo = TrtYOLO(args.model, (h, w), args.category_num)
# trt_yolo = 1
cam.start()
open_window(WINDOW_NAME, args.image_width, args.image_height,
'Camera TensorRT YOLO Demo')
vis = BBoxVisualization(cls_dict)
loop_and_detect(cam, trt_yolo, conf_th=0.3, vis=vis)
cam.stop()
cam.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main() | [
"tm.almeida@ua.pt"
] | tm.almeida@ua.pt |
ca9547928ab7a957dabd169f16fc201dc6d06efe | b83ff584bfcd9fce7a337ba1253287fc9afd03c7 | /cmdline_fluency_countdown.py | c6564c0a2aa5dcf88e15805c147edba2570aebac | [] | no_license | houstonhunt/fluencycountdown | 6166eaf625f6e348213dcd5be8045ee218159900 | d555b83972e05d09e1caafca61ea465c4ca3770c | refs/heads/master | 2021-01-23T23:12:17.392090 | 2015-05-23T18:48:24 | 2015-05-23T18:48:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | #!/usr/bin/python
# cmdline_fluency_countdown.py
import pickle # used to save user progress (currently supporting 1 primary user)
import ConfigParser, os # used to parse language file
def init():
state = 0
try:
pickle.load(open("save.p", "rb"))
print "SUCCESS: loaded save file!"
state = 1
except:
config = ConfigParser.ConfigParser()
config.read('lang.cfg')
print "WELCOME: no save file found!"
print "Type a [language] you want to learn (example: English),"
print " or [list] then press [ENTER]"
selected_lang = raw_input()
# joke
if selected_lang == "English":
print "You already know English!"
quit()
elif selected_lang == "list":
list(selected_lang, config)
elif selected_language ==
def list(what, cp):
if what == "list":
print "list what? [all] [easy] [medium] [hard] [other] [about]"
selected_lang = raw_input()
if selected_lang == "all":
list1(cp)
list2(cp)
list3(cp)
listo(cp)
elif selected_lang == "easy":
list1(cp)
elif selected_lang == "medium":
list2(cp)
elif selected_lang == "hard":
list3(cp)
elif selected_lang == "other":
listo(cp)
elif selected_lang == "about":
print "Coded by Houston Hunt"
print "Times to mastering a language for English speakers"
print "is given by " + str(cp.get('Reference', 'reference'))
def list1(cp):
print cp.get('Languages', 'desc1')
print str(cp.get('Languages', 'cat1'))
def list2(cp):
print str(cp.get('Languages', 'desc2'))
print str(cp.get('Languages', 'cat2'))
def list3(cp):
print str(cp.get('Languages', 'desc3'))
print str(cp.get('Languages', 'cat3'))
def listo(cp):
print str(cp.get('Languages', 'desco'))
print str(cp.get('Languages', 'other'))
init()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
c8fa31a07d27a230d6c516d6c6bb9ea68dd49118 | 09b6791321b907d1142bccfbbd2ee37293e802be | /birthdb.py | 6999ab96a07bf7ec0e4690c6d28db08a20ef6174 | [] | no_license | motionfit/whale-sighting-gae-ready | f5c48821d0a8504b605c91a425a6bdee3fa7a93f | d5494116e8b41a7ad9e07aab7f358cf2aff31a94 | refs/heads/master | 2021-01-11T20:01:35.703274 | 2017-01-19T12:31:08 | 2017-01-19T12:31:08 | 79,450,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from google.appengine.ext import db
class BirthDetails(db.Model):
name = db.StringProperty()
date_of_birth = db.DateProperty()
time_of_birth = db.TimeProperty()
| [
"taparaidhamkih@gmail.com"
] | taparaidhamkih@gmail.com |
0fd1232b94b7356cada32efeaadd592028c4b8f4 | 649fa544cbf60c5f281fba693d3eb0aafcf691f3 | /apps/register/urls.py | be65eb74d72167573ecc170605e1d2260df63b8b | [] | no_license | Kallou0/smartlurning-registration | 55de4b5b043da368bc78020a2c2eeef51d97cc7c | 3c495e58c90312508e07780fde2225d07db006ca | refs/heads/master | 2022-12-15T19:07:44.359410 | 2020-09-10T18:17:23 | 2020-09-10T18:17:23 | 294,484,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('register', views.register, name='register'),
path('login', views.login, name='login'),
path('success', views.success, name='success')
] | [
"nzenzamk@gmail.com"
] | nzenzamk@gmail.com |
09db4be45d5d63793dcd85353daabc9d84d3ac5d | 08ca7028e0488c420fff8c831e9d4fd3e32ee292 | /models/wideresnet.py | 59ba6496518eab9bc92f85bceb9a2459910e4762 | [] | no_license | yogeshbalaji/Adversarial-training | 0ee53fdbef2742788cbbc73ca592738347076fe2 | 3593c836f39c1313545fcc71e5ba8afa6f427326 | refs/heads/master | 2020-07-15T03:00:26.425582 | 2019-09-04T19:59:51 | 2019-09-04T19:59:51 | 205,464,494 | 12 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from utils import data_normalize
def init_weight(*args):
return nn.Parameter(nn.init.kaiming_normal_(torch.zeros(*args), mode='fan_out', nonlinearity='relu'))
class Block(nn.Module):
"""
Pre-activated ResNet block.
"""
def __init__(self, width):
super().__init__()
self.bn0 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv0', init_weight(width, width, 3, 3))
self.bn1 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv1', init_weight(width, width, 3, 3))
def forward(self, x):
h = F.conv2d(F.relu(self.bn0(x)), self.conv0, padding=1)
h = F.conv2d(F.relu(self.bn1(h)), self.conv1, padding=1)
return x + h
class DownsampleBlock(nn.Module):
"""
Downsample block.
Does F.avg_pool2d + torch.cat instead of strided conv.
"""
def __init__(self, width):
super().__init__()
self.bn0 = nn.BatchNorm2d(width // 2, affine=False)
self.register_parameter('conv0', init_weight(width, width // 2, 3, 3))
self.bn1 = nn.BatchNorm2d(width, affine=False)
self.register_parameter('conv1', init_weight(width, width, 3, 3))
def forward(self, x):
h = F.conv2d(F.relu(self.bn0(x)), self.conv0, padding=1, stride=2)
h = F.conv2d(F.relu(self.bn1(h)), self.conv1, padding=1)
x_d = F.avg_pool2d(x, kernel_size=3, padding=1, stride=2)
x_d = torch.cat([x_d, torch.zeros_like(x_d)], dim=1)
return x_d + h
class WRN(nn.Module):
"""
Implementation of modified Wide Residual Network.
Differences with pre-activated ResNet and Wide ResNet:
* BatchNorm has no affine weight and bias parameters
* First layer has 16 * width channels
* Last fc layer is removed in favor of 1x1 conv + F.avg_pool2d
* Downsample is done by F.avg_pool2d + torch.cat instead of strided conv
First and last convolutional layers are kept in float32.
"""
def __init__(self, depth, width, num_classes):
super().__init__()
widths = [int(v * width) for v in (16, 32, 64)]
n = (depth - 2) // 6
self.register_parameter('conv0', init_weight(widths[0], 3, 3, 3))
self.group0 = self._make_block(widths[0], n)
self.group1 = self._make_block(widths[1], n, downsample=True)
self.group2 = self._make_block(widths[2], n, downsample=True)
self.bn = nn.BatchNorm2d(widths[2], affine=False)
self.register_parameter('conv_last', init_weight(num_classes, widths[2], 1, 1))
self.bn_last = nn.BatchNorm2d(num_classes)
self.mean = [125.3 / 255.0, 123.0 / 255.0, 113.9 / 255.0]
self.std = [63.0 / 255.0, 62.1 / 255.0, 66.7 / 255.0]
def _make_block(self, width, n, downsample=False):
def select_block(j):
if downsample and j == 0:
return DownsampleBlock(width)
return Block(width)
return nn.Sequential(OrderedDict(('block%d' % i, select_block(i)) for i in range(n)))
def forward(self, x):
x = data_normalize(x, self.mean, self.std)
h = F.conv2d(x, self.conv0, padding=1)
h = self.group0(h)
h = self.group1(h)
h = self.group2(h)
h = F.relu(self.bn(h))
h = F.conv2d(h, self.conv_last)
h = self.bn_last(h)
return F.avg_pool2d(h, kernel_size=h.shape[-2:]).view(h.shape[0], -1)
| [
"yogesh22@ramawks95.umiacs.umd.edu"
] | yogesh22@ramawks95.umiacs.umd.edu |
990da46c82a76999fbddaa493ef4ca54ab52a049 | 982321c3b0cd09a47e54a63c922251731e1c07bd | /twoway.py | 92d2dc1c99dd413438b2d7b95b47d3d6d88608f2 | [] | no_license | Trung-IoT2020/IOT-Thi-t-B- | f49bb0ef551327724caf77732d689a8972566bc7 | f803aec9af2b2cb52593eacf2733997d2efb845e | refs/heads/master | 2023-02-05T14:01:22.480756 | 2020-12-31T06:46:27 | 2020-12-31T06:46:27 | 325,436,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,217 | py | import os
import time
import sys
import paho.mqtt.client as mqtt
import json
import serial
import requests
THINGSBOARD_HOST = '172.16.1.57'
ACCESS_TOKEN = '6pKSfXP7Xas580xnzkpx'
# Data capture and upload interval in seconds. Less interval will eventually hang the DHT22.
INTERVAL=2
ser = serial.Serial(port='/dev/ttyS4',
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1)
url2 = 'http://192.168.7.2:8081/GateWay/updatedata.php'
url3 = 'http://192.168.7.2:8081/GateWay/view.php'
next_reading = time.time()
client = mqtt.Client()
# Set access token
client.username_pw_set(ACCESS_TOKEN)
# Connect to ThingsBoard using default MQTT port and 60 seconds keepalive interval
client.connect(THINGSBOARD_HOST, 1883, 60)
client.loop_start()
def on_connect(client, userdata, rc, msg):
print('Connected with result code ' + str(rc))
# Subscribing to receive RPC requests
client.subscribe('v1/devices/me/rpc/request/+')
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print ('Topic: ' + msg.topic + '\nMessage: ' + str(msg.payload))
# Decode JSON request
data = json.loads(msg.payload)
if("params"in data):
if data['params'] == True:
a=(data['method']+'QC17ok')
# ser.write(str.encode(a))
# ser.flush()
time.sleep(1)
for i in data['method']:
data4 = {'GR': i[0:1],'NODE':i[0:2],'In1':"ON",'Out1': "ON"}
c_6 = json.dumps(data4,indent=2)
res1 = requests.put(url2, data=c_6, json=c_6)
json_res=requests.get(url3)
b_dict = json.loads(json_res.text)
a_dict = json.dumps(b_dict, indent=2)
c_dict = json_res.text
#print(b_dict)
for i in b_dict:
a3={i['NODE']: i["Out1"]}
a4 ={data['method']:i['Out1']}
print(a3)
client.publish('v1/devices/me/telemetry', json.dumps(a4), 1)
elif data['params'] == False:
a=(data['method']+'QC12ok')
# ser.write(str.encode(a))
# ser.flush()
time.sleep(1)
for i in data['method']:
data4 = {'GR': i[0:1],'NODE':i[0:2],'In1':"OFF",'Out1': "OFF"}
c_6 = json.dumps(data4,indent=2)
res1 = requests.put(url2, data=c_6, json=c_6)
json_res=requests.get(url3)
b_dict = json.loads(json_res.text)
a_dict = json.dumps(b_dict, indent=2)
c_dict = json_res.text
#print(b_dict)
for i in b_dict:
a3={i['NODE']: i["Out1"]}
a4 ={data['method']:i['Out1']}
client.publish('v1/devices/me/telemetry', json.dumps(a4), 1)
else:
print("nothing")
client.on_connect = on_connect
client.on_message = on_message
try:
while True:
json_res = requests.get(url3)
b_dict = json.loads(json_res.text)
c_dict = json.dumps(b_dict)
with open('compare.json', 'r') as op:
js_ob = json.loads(op.read())
for i in range(2):
if {js_ob[i]["NODE"], js_ob[i]["Out1"]} == {b_dict[i]["NODE"], b_dict[i]["Out1"]}:
# print({js_ob[i]["NODE"], js_ob[i]["Out1"]})
# print({b_dict[i]["NODE"], b_dict[i]["Out1"]})
time.sleep(1)
#print(" ")
else:
# print({b_dict[i]["NODE"], b_dict[i]["Out1"]})
# print({js_ob[i]["NODE"], js_ob[i]["Out1"]})
#print(" ")
time.sleep(1)
a2 = {b_dict[i]["NODE"]: b_dict[i]["Out1"]}
client.publish('v1/devices/me/telemetry', json.dumps(a2), 1)
time.sleep(1)
with open("compare.json", "w") as of:
of.write(c_dict)
time.sleep(2)
a2 = {b_dict[i]["NODE"]: b_dict[i]["Out1"]}
#client.publish('v1/devices/me/rpc/response/', json.dumps(a2), 1)
except KeyboardInterrupt:
pass
client.loop_stop()
client.disconnect() | [
"vantrungD17.com"
] | vantrungD17.com |
45308f396a473523b83588deb000f8e14411debe | 5c97a3e8e83bb044b42edf32173ca4ac3f0b3afd | /scripts/run_oof_inference.py | 70a2bf377fa0fdce6c518ee88005f4f72e39a432 | [] | no_license | MahdiMohamadi/OAProgression | 6f61afccb6d12af99182bf3550207cecc83c2718 | 8240c306c33d5150696299fc58a8348f71bb0cbe | refs/heads/master | 2020-07-03T22:50:52.062696 | 2019-04-16T05:23:43 | 2019-04-16T05:23:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,198 | py | import argparse
import gc
import os
import pickle
import pprint
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from oaprogression.evaluation import tools
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', default='')
parser.add_argument('--metadata_root', default='')
parser.add_argument('--bs', type=int, default=32)
parser.add_argument('--n_threads', type=int, default=12)
parser.add_argument('--snapshots_root', default='')
parser.add_argument('--snapshot', default='')
parser.add_argument('--save_dir', default='')
parser.add_argument('--from_cache', default=False)
args = parser.parse_args()
with open(os.path.join(args.snapshots_root, args.snapshot, 'session.pkl'), 'rb') as f:
session_snapshot = pickle.load(f)
preds_prog = []
preds_kl = []
ids = []
if not args.from_cache:
for fold_id in range(session_snapshot['args'][0].n_folds):
features, fc_prog, fc_kl = tools.init_fold(fold_id, session_snapshot, args, return_fc_kl=True)
_, val_index = session_snapshot['cv_split_train'][0][fold_id]
x_val = session_snapshot['metadata'][0].iloc[val_index]
loader = tools.init_loader(x_val, args)
for batch_id, sample in enumerate(tqdm(loader, total=len(loader),
desc='Prediction from fold {}'.format(fold_id))):
probs_prog_batch, probs_kl_batch = tools.eval_batch(sample, features, fc_prog, fc_kl)
preds_prog.append(probs_prog_batch)
preds_kl.append(probs_kl_batch)
ids.extend(sample['ID_SIDE'])
gc.collect()
preds_prog = np.vstack(preds_prog)
preds_kl = np.vstack(preds_kl)
res = pd.DataFrame(data={'ID': list(map(lambda x: x.split('_')[0], ids)),
'Side': list(map(lambda x: x.split('_')[1], ids)),
'prog_pred_0': preds_prog[:, 0],
'prog_pred_1': preds_prog[:, 1],
'prog_pred_2': preds_prog[:, 2],
'kl_pred_0': preds_kl[:, 0],
'kl_pred_1': preds_kl[:, 1],
'kl_pred_2': preds_kl[:, 2],
'kl_pred_3': preds_kl[:, 3],
})
res.to_pickle(os.path.join(args.save_dir, 'oof_results.pkl'))
else:
res = pd.read_pickle(os.path.join(args.save_dir, 'oof_results.pkl'))
metadata = session_snapshot['metadata'][0]
metadata.ID = metadata.ID.astype(str)
res.ID = res.astype(str)
res = pd.merge(res, session_snapshot['metadata'][0], on=('ID', 'Side'))
val_metrics = tools.calc_metrics(res.Progressor.values, res.KL.values,
res[['prog_pred_0', 'prog_pred_1', 'prog_pred_2']].values,
res[['kl_pred_0', 'kl_pred_1', 'kl_pred_2', 'kl_pred_3']].values, )
pprint.pprint(val_metrics)
| [
"aleksei.tiulpin@protonmail.ch"
] | aleksei.tiulpin@protonmail.ch |
1459c64f2b0faa668eab98a381cf4631a4f046c5 | f51bf78ebea69a0b125a68b427aa7b30b3d4549c | /print_all_links.py | b51f5967b51d7a448894cb9ccdf40f4b4f74d094 | [] | no_license | ariakerstein/Python-Udacity | 9b8449599be2d6e0b232f73b08a34db7a2f614ac | b182072a2734b83e6562be0ff7188ba6e99a737e | refs/heads/master | 2020-06-30T04:13:36.514688 | 2015-01-11T18:35:03 | 2015-01-11T18:35:03 | 29,083,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def print_all_links(page):
while True:
url, endpos = get_next_target(page)
if url:
print url
page = page[endpos:]
else:
break
get_next_target('http://xkcd.com/353') | [
"ariakerstein@gmail.com"
] | ariakerstein@gmail.com |
6db1b3d1edda9e897114c64e57690d551d14479d | 442f8354bd79a3284826beddc68b8f018988ef0f | /app.py | d14a75221c02d686d625ab64973ad578095a3bdc | [] | no_license | arkusuma/ftsearch-server | d311c71c7d4449f03528ac662ce1af0e24c520a6 | 9741c8c8e871081e290ebef73d868b2930f2cb34 | refs/heads/master | 2016-09-06T01:42:42.412964 | 2013-09-19T07:41:13 | 2013-09-19T07:41:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,524 | py | # -*- coding: utf-8 -*-
"""
Copyright (c) 2012, Anugrah Redja Kusuma <anugrah.redja@gmail.com>
Utilization of the works is permitted provided that this
instrument is retained with the works, so that any entity
that utilizes the works is notified of this instrument.
DISCLAIMER: THE WORKS ARE WITHOUT WARRANTY.
"""
import bottle
from bottle import request, response
import sys
import json
import re
from urllib2 import urlopen
from bs4 import BeautifulSoup
app = bottle.Bottle()
DEFAULT_HOSTING = '23,1,15,13,22,27,25,34,12,8,10,9,28,2,31,36,35,37,40,42,11,47,50,51,55,54,59,60,61,62,63,64,65,67,68,69,70,71,72,73,74,75,77,79,80,81,82,83,84,85,86'
@app.route('/')
def home():
return '<h2>Hello World!</h2><p>Nothing to be viewed here.</p>'
@app.route('/api/search')
def search():
try:
# load search result
query = request.query_string
if request.query.hosting == '':
query = '%s&hosting=%s' % (query, DEFAULT_HOSTING)
resp = urlopen('http://www.filestube.com/query.html?%s' % query)
html = resp.read().decode('utf-8').replace(' ', ' ')
# parse
soup = BeautifulSoup(html, 'lxml')
tags = soup.select('.book3 span')
index = int(tags[0].string.split(' - ')[0])
total = int(tags[1].string)
items = []
for tag in soup.select('.resultsLink'):
if 'class' in tag.parent.attrs:
continue
text = tag.find_next_sibling('div').div.span.get_text().strip()
m = re.match(r'(\S+)\s+ext:\s+\.(\S+)(\s+parts:\s+(\d+))?\s+(\d+ [KMG]B)\s+date:\s+(\S+)', text)
if m:
site = m.group(1)
m2 = re.search(r'\.([^.]+\.[^.]+)$', site)
if m2:
site = m2.group(1)
item = {}
item['id'] = tag['href'].replace('http://www.filestube.com/', '')
item['title'] = tag.get_text()
item['site'] = site
item['ext'] = m.group(2)
item['parts'] = int(m.group(4)) if m.group(4) else 1
item['size'] = m.group(5)
item['date'] = m.group(6)
items.append(item)
result = {'total': total, 'index': index, 'items': items}
except:
result = {'total': 0, 'index': 0, 'items': []}
response.content_type = 'application/json'
return json.dumps(result)
@app.route('/api/link/<id:path>')
def link(id):
try:
# load download link
resp = urlopen('http://www.filestube.com/%s' % id)
html = resp.read().decode('utf-8')
# parse
soup = BeautifulSoup(html, 'lxml')
names = [tag['title'] for tag in soup.select('.mainltb2 a')]
sizes = [tag.string for tag in soup.select('.tright')]
links = re.split(r'\s+', soup.pre.string.strip())
result = [{'name': name, 'size': size, 'link': link} \
for name, size, link in zip(names, sizes, links)]
except:
result = []
response.content_type = 'application/json'
return json.dumps(result)
if __name__ == '__main__':
# Parse command line
host = '0.0.0.0'
port = 8080
reload = False
for arg in sys.argv[1:]:
if arg == 'debug':
bottle.debug(True)
elif arg == 'reload':
reload = True
elif arg.isdigit():
port = int(arg)
else:
host = arg
# Run server
bottle.run(app, host=host, port=port, reloader=reload)
| [
"anugrah.redja@gmail.com"
] | anugrah.redja@gmail.com |
f6e400373186312a9fcf3e60bc466491e7ced87f | 780b6cca690a213ac908b1cd5faef5366a18dc4e | /276_newbie_bite/save1_passed.py | 7568b69b77be52f1d12ae46c2c3d5cec4cd7fba1 | [] | no_license | katkaypettitt/pybites-all | 899180a588e460b343c00529c6a742527e4ea1bc | 391c07ecac0d92d5dc7c537bcf92eb6c1fdda896 | refs/heads/main | 2023-08-22T16:33:11.171732 | 2021-10-24T17:29:44 | 2021-10-24T17:29:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # Hint: Don't forget the 4 leading spaces to
# indicate your code is within the function.
a = 10
b = 5
def multiply_numbers(a, b):
return a * b
def enter_name():
username = input("What is your name?")
return username | [
"70788275+katrinaalaimo@users.noreply.github.com"
] | 70788275+katrinaalaimo@users.noreply.github.com |
c597b2434ade753840e6bf1086500f102a3f7799 | 5f18d03466732eeb90f0ce8d07b3272161936e2c | /venv/03_asking_questions_v3.py | 693cc36af686f5a1bf71643cf20e4a767f9858e6 | [] | no_license | FrancesMangos/03_quiz_assessment | ddd19064c749ae509881bebcd349a5477fcd795b | 29cdd029a8d441ee1cd1aefe20667980d3523211 | refs/heads/master | 2023-09-02T17:47:59.910894 | 2021-11-15T00:29:16 | 2021-11-15T00:29:16 | 405,781,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | play_game = True
test_list = [["What is the most used streaming service?", "netflix", "disney+", "hulu"],
["What is the smallest planet in our solar system?", "mercury", "venus", "earth"],
["In which country did Lego originate from?", "denmark", "germany", "england"],
["What chases the player's character in Pacman?", "ghosts", "goblins", "inner demons and self doubt"]]
correct_answers = ["netflix", "mercury", "denmark", "ghosts"]
question = 1
while play_game == True and len(test_list) != 0:
print("Question {}".format(question))
print(test_list[0][0])
print("a. {}".format(test_list[0][1]))
print("b. {}".format(test_list[0][2]))
print("c. {}".format(test_list[0][3]))
guess = input("What is your answer?")
if guess in correct_answers:
print("CORRECT!")
else:
print("INCORRECT. :(")
del test_list[0]
question += 1
print()
play_game = False
if play_game == False:
print("Game Over")
| [
"magno.franc@awatapu.school.nz"
] | magno.franc@awatapu.school.nz |
e240efd4b800cd678c6f60b6c25898416bff474f | ed84cd95a6aa589cfb995c81c8ccf4aa1f9a0f40 | /apps/main.py | caf814fee0a2d80e12e1ee2a38e64d9b8e7f4d82 | [] | no_license | mchevro/FLASK-JWT | 9f61cb15310b9ecc2b6c6f5c60f81b53d32c4596 | 9e26d81616d26ca4a8fa0277bc272666ed3a4eda | refs/heads/main | 2023-03-29T07:29:18.159956 | 2021-03-31T18:29:47 | 2021-03-31T18:29:47 | 353,452,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | #Source From : https://www.youtube.com/watch?v=e-_tsR0hVLQ
from flask import Flask, jsonify, request, session, render_template, flash, make_response
from functools import wraps
import jwt
import datetime
app = Flask(__name__)
app.config['SECRET_KEY'] = b'_\x00\xc0\x90\x08\x88C\xfb\xf8\x13\xe8\\\xb3\x1f4\x14'
def check_for_token(func): #Fungsi untuk cek parameter token
@wraps(func)
def wrapped(*args, **kwargs):
token = request.args.get('token')
if not token: #Jika tidak ada parameter token maka responya seperti ini
return jsonify({'message': 'Missing Token'}), 403
try: #mencoba melakukan decode token
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms="HS256")
except: #Jika token salah maka responya seperti ini
return jsonify({'message': 'Invalid Token'}), 403
return func(*args, **kwargs)
return wrapped
@app.route('/')
def index():
if not session.get('logged_in'):
return render_template('login.html')
else:
return 'Currently Logged In'
@app.route('/login', methods=['POST']) #Proses Login
def login():
if request.form['username'] and request.form['password'] == 'password':
session['logged_in'] = True
token = jwt.encode({ #Membuat Token
'user': request.form['username'],
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=60) #Masa berlaku token 60 Detik
},
app.config['SECRET_KEY'], algorithm="HS256")
return jsonify({'token': token})
else:
return make_response('Unable to verivy', 403, {'WWW-Authenticate': 'Basic realm: "Login"'})
@app.route('/logout')
def logout():
session.pop('logged_in', None)
return 'Hapus Session'
@app.route('/public')
def public():
return 'For Public' #Bisa dilihat tanpa token
@app.route('/auth')
@check_for_token
def authorized():
return 'This is only viewble with a token' #Wajib mengggunakan parameter token untuk melihatnya
if __name__ == '__main__':
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
a2c68ce90de87d5877c312dba618fe1424b344e3 | 00dbe07732fec1d24bc86d436d4f448e3c8494eb | /卫生监督1024.py | c842e6669ae621ebf844f16ee17944920a27ea2d | [] | no_license | fx19870419/WSJD1024 | 263e8614747caa8a160a2724601e436b3660511b | 500c76cb13c9289b0b93c249f003ab2c67293677 | refs/heads/master | 2020-09-06T22:54:04.086110 | 2019-12-05T08:54:20 | 2019-12-05T08:54:20 | 220,581,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,397 | py | import datetime
import time
import os
import shutil
from selenium import webdriver
from selenium.webdriver.support.select import Select
import sys
import openpyxl
#系统环境变量中增加‘C:\\python’
env_path = os.getenv('path') + ';C:\\python'
os.environ['Path'] = env_path
#获取yyyymm
yyyymm = input('请输入数据年月(格式yyyy-mm),当月请直接按回车键:')
if yyyymm == '':
today = datetime.date.today()
yyyy = str(today.year)
mm = str(today.month)
mm_2019 = (int(yyyy) - 2019) * 12 + int(mm)
yyyymm = yyyy + '-' + mm
else :
yyyy = yyyymm[0:4]
mm = yyyymm[5:]
mm_2019 = (int(yyyy) - 2019) * 12 + int(mm)
#读取信息表格中的内容
info_xlsx = openpyxl.load_workbook('信息表格.xlsx',data_only = True)
info_sht = info_xlsx['Sheet1']
PROSAS_path = info_sht.cell(1,4).value
path_read = info_sht.cell(2,4).value
path_schedul = info_sht.cell(3,4).value
path_sum = info_sht.cell(4,4).value
dir_read = os.path.join(path_read,yyyymm)
dir_save = os.path.join(path_read,yyyymm,'已填报')
while os.path.exists(dir_save) == False:
os.makedirs(dir_save)
username = info_sht['B1'].value
password = info_sht['B2'].value
shop_name_id = {}
for i in range(4,info_sht.max_row+1):
shop_name_id[info_sht.cell(i,1).value] = info_sht.cell(i,2).value
info_xlsx.save('信息表格.xlsx')
#打开进度表、读取sht
schedul_xlsx = openpyxl.load_workbook(path_schedul,data_only = True)
schedul_sht = schedul_xlsx['卫生监督进度表']
#读取月份文件夹下的所有文件
files_xlsx = []
files_save = []
for root,dirs,files in os.walk(dir_read,topdown = False):
for file in files:
files_xlsx.append(os.path.join(root,file))
if files_xlsx != []:
while ('已填报' in files_xlsx[0]) == True:
del files_xlsx[0]
if files_xlsx == []:
break
#判断符合或者不符合的函数,x是项的序号,y是符合或者不符合或者合理缺项:
def trueorfalse(x,y):
if y == "符合":
return typ_list[int(x/2-1)]
if y == "合理缺项":
return '99'
if y == "不符合":
return '0'
#填入结果的函数:
def result(score,i):
if score!='0':
el_score=browser.find_element_by_css_selector(name_score+value_score)
browser.execute_script("arguments[0].scrollIntoView();",el_score)
browser.execute_script("arguments[0].click();",el_score)
else:
el_score=browser.find_element_by_css_selector(name_score+value_score)
browser.execute_script("arguments[0].scrollIntoView();",el_score)
browser.execute_script("arguments[0].click();",el_score)
el_explain=browser.find_element_by_css_selector(input_score)
el_explain.send_keys(list_score[i+1])
if files_xlsx == []:
print('未发现卫生监督记录,即将进行卫生监督统计...')
else:
tian_or_not = input('是否启动PROSAS填报?(y/n)')
if tian_or_not == 'n':
print('即将进行卫生监督统计,但部分监督记录尚未填报,该记录将不会纳入统计,请注意!')
elif tian_or_not != 'n' and tian_or_not != 'y':
print('您的输入有误,程序终止,请重新启动程序并正确输入!')
sys.exit()
elif tian_or_not == 'y':
#开浏览器、打开网页
browser = webdriver.Firefox()
browser.maximize_window()
browser.get(PROSAS_path)
#登录账号密码
while 1:
try:
el_username=browser.find_element_by_id('username')
el_username.send_keys(username)#输入用户名
print('输入账号………………成功')
el_password=browser.find_element_by_id('password')
el_password.send_keys(password)#输入密码
print('输入密码………………成功')
submit=browser.find_element_by_name('submit')
submit.click()#登录按钮
print('登录……………………成功')
break
except:
print('登录失败,请检查网络')
#找到监督评分→点击
el_ywjg=browser.find_element_by_id('heTab105')
el_ywjg.click()#点击“卫生监督”按钮
el_rcwsjd=browser.find_element_by_partial_link_text('日常卫生监督')
el_rcwsjd.click()#点击日常卫生监督按钮
el_jdpf=browser.find_element_by_partial_link_text('监督评分')
el_jdpf.click()#点击监督评分按钮
for file_name in files_xlsx:
#加载文件
wb=openpyxl.load_workbook(file_name)
sheet=wb['Sheet1']
#判断卫生监督类型赋值给typ_jd和typ_list变量并确定各项的分值
if '餐饮' in file_name:
typ_jd='餐饮服务'
typ_list=['※','※','※','2','5','※','2','2','2','2','※','※','※','2','2','1','5','1','2','5','5','2','※','5','10','5','5','1','2','5','2','5','2','※','5','2','※','2','5','2','2','5','5','5','2','※','2','2','2','2','2','1','2','5','2','5','2','2','2','2','2','2','2','※','2','2','5','5','※','2','5','5','5','2']
elif '生产' in file_name:
typ_jd='食品生产'
typ_list=['※','※','※','2','5','※','5','5','2','2','※','※','※','※','5','5','5','2','※','※','5','5','2','2','5','2','2','5','10','5','※','※','5','10','10','5','※','5','10','※','5','5','5','2','2','※','2','2','2','5','10','5','※','※','5','5','5']
elif '饮用水' in file_name:
typ_jd='饮用水供应'
typ_list=['※','※','※','5','10','5','5','5','5','5','※','※','5','5','5','※','5','2','5','5','2','5','5','10','10','5','※','2','※','5','5','※','2','2','10','2']
elif '销售' in file_name:
typ_jd='食品销售'
typ_list=['※','※','※','5','5','5','5','5','5','※','※','10','10','5','5','5','※','5','5','2','5','2','2','※','5','5','10','5','5','5','5','10','5']
elif '住宿' in file_name:
typ_jd='住宿业'
typ_list=['※','※','※','5','10','5','5','2','※','5','10','3','5','3','※','2','10','※','10','10','5','5','※','5','5','5','2','3','3','3','3','3','2','3','5']
elif '候车(机、船)室' in file_name:
typ_jd='候车(机、船)室'
typ_list=['※','※','※','5','10','5','5','2','※','5','10','3','5','3','※','2','10','※','10','10','5','5','※','5','5','5','5','※','10']
elif '文化娱乐场所' in file_name:
typ_jd='文化娱乐场所'
typ_list=['※','※','※','5','10','5','5','2','※','5','10','3','5','3','※','2','10','※','10','10','5','5','※','5','5','5','3','10']
elif '美容美发场所' in file_name:
typ_jd='美容美发场所'
typ_list=['※','※','※','5','10','5','5','2','※','5','10','3','5','3','※','2','10','※','10','10','5','5','※','5','5','5','5','6','※']
elif '沐浴场所' in file_name:
typ_jd='沐浴场所'
typ_list=['※','※','※','5','10','5','5','2','※','5','10','3','5','3','※','2','10','※','10','10','5','5','※','5','5','5','5','2','5','5','3']
elif '游泳场所' in file_name:
typ_jd='游泳场所'
typ_list=['※','※','※','5','10','5','5','2','※','5','10','3','5','3','※','2','10','※','10','10','5','5','※','5','5','5','2','3','3']
#将一次卫生监督结果存入list_score变量,然后将变量写入浏览器表单
for r in range(2,sheet.max_row+1):
list_score=[]
for c in range(1,sheet.max_column+1):
list_score.append(sheet.cell(r,c).value)
for i in range(len(list_score)):
if list_score[i] == '不符合' and list_score[i+1] == None:
list_score[i+1]=' '
time.sleep(8)
browser.switch_to.default_content()
el_frame=browser.find_element_by_class_name('iframeClass')
browser.switch_to.frame(el_frame)
el_No=browser.find_element_by_name('cardNo')
el_No.clear()
el_No.send_keys(shop_name_id[list_score[1]])
el_startDate=browser.find_element_by_name('startDate')
sDate=datetime.datetime.now()-datetime.timedelta(days=365)#起始日期(当前时间往前推365天)
browser.execute_script('arguments[0].removeAttribute(\"readonly\")',el_startDate)
el_startDate.clear()
el_startDate.send_keys(str(sDate.year)+'-'+str(sDate.month)+'-'+str(sDate.day))#输入起始日期(当前时间往前推365天)
el_submit=browser.find_element_by_xpath("//input[@value='查询']")
el_submit.click()
time.sleep(5)
el_add=browser.find_element_by_xpath("//i[@title='监督打分']")
el_add.click()
time.sleep(3)
el_type=browser.find_element_by_id('itemCode')
Select(el_type).select_by_visible_text(typ_jd)
el_typechange=browser.find_element_by_xpath("//a[contains(text(),'修改监督评分表类型')]")
el_typechange.click()
time.sleep(0.3)
browser.switch_to.default_content()
el_frame_type=browser.find_element_by_css_selector("[src='/prosas/dailySup/listNoQuery.html?menuId=8B4C90F4861945B59DD330DA2378B103']")
browser.switch_to.frame(el_frame_type)
#el_typesubmit=browser.find_element_by_css_selector("button")
el_typesubmit=browser.find_element_by_css_selector("button[class='aui_state_highlight'][type='button']")
#el_typesubmit.click()
browser.execute_script("$(arguments[0]).click()",el_typesubmit)
for i in range(2,len(list_score),2):
i=int(i)
time.sleep(0.5)
score=trueorfalse(i,list_score[i])
if typ_jd=='餐饮服务':
if 0<i<9:
name_score="[name='score01"+(str(int(i/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input01"+(str(int(i/2)).rjust(2,'0'))+"']"
result(score,i)
elif 9<i<23:
name_score="[name='score02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 23<i<31:
name_score="[name='score03"+(str(int((i-22)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input03"+(str(int((i-22)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 31<i<35:
name_score="[name='score04"+(str(int((i-30)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input04"+(str(int((i-30)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 35<i<45:
name_score="[name='score05"+(str(int((i-34)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input05"+(str(int((i-34)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 45<i<53:
name_score="[name='score06"+(str(int((i-44)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input06"+(str(int((i-44)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 53<i<63:
name_score="[name='score07"+(str(int((i-52)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input07"+(str(int((i-52)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 63<i<67:
name_score="[name='score08"+(str(int((i-62)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input08"+(str(int((i-62)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 67<i<73:
name_score="[name='score09"+(str(int((i-66)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input09"+(str(int((i-66)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 73<i<91:
name_score="[name='score10"+(str(int((i-72)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input10"+(str(int((i-72)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 91<i<111:
name_score="[name='score11"+(str(int((i-90)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input11"+(str(int((i-90)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 111<i<123:
name_score="[name='score12"+(str(int((i-110)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input12"+(str(int((i-110)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 123<i<137:
name_score="[name='score13"+(str(int((i-122)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input13"+(str(int((i-122)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 137<i<143:
name_score="[name='score14"+(str(int((i-136)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input14"+(str(int((i-136)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 143<i<147:
name_score="[name='score15"+(str(int((i-142)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input15"+(str(int((i-142)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 147<i<149:
name_score="[name='score16"+(str(int((i-146)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input16"+(str(int((i-146)/2)).rjust(2,'0'))+"']"
result(score,i)
elif typ_jd=='食品生产':
if 0<i<9:
name_score="[name='score01"+(str(int(i/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input01"+(str(int(i/2)).rjust(2,'0'))+"']"
result(score,i)
elif 9<i<21:
name_score="[name='score02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 21<i<23:
name_score="[name='score03"+(str(int((i-20)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input03"+(str(int((i-20)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 23<i<47:
name_score="[name='score04"+(str(int((i-22)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input04"+(str(int((i-22)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 47<i<57:
name_score="[name='score05"+(str(int((i-46)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input05"+(str(int((i-46)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 57<i<79:
name_score="[name='score06"+(str(int((i-56)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input06"+(str(int((i-56)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 79<i<85:
name_score="[name='score07"+(str(int((i-78)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input07"+(str(int((i-78)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 85<i<95:
name_score="[name='score08"+(str(int((i-84)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input08"+(str(int((i-84)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 95<i<101:
name_score="[name='score09"+(str(int((i-94)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input09"+(str(int((i-94)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 101<i<105:
name_score="[name='score10"+(str(int((i-100)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input10"+(str(int((i-100)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 105<i<115:
name_score="[name='score11"+(str(int((i-104)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input11"+(str(int((i-104)/2)).rjust(2,'0'))+"']"
result(score,i)
elif typ_jd=='饮用水供应':
if 0<i<9:
name_score="[name='score01"+(str(int(i/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input01"+(str(int(i/2)).rjust(2,'0'))+"']"
result(score,i)
elif 9<i<21:
name_score="[name='score02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 21<i<23:
name_score="[name='score03"+(str(int((i-20)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input03"+(str(int((i-20)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 23<i<25:
name_score="[name='score04"+(str(int((i-22)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input04"+(str(int((i-22)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 25<i<33:
name_score="[name='score05"+(str(int((i-24)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input05"+(str(int((i-24)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 33<i<37:
name_score="[name='score06"+(str(int((i-32)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input06"+(str(int((i-32)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 37<i<43:
name_score="[name='score07"+(str(int((i-36)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input07"+(str(int((i-36)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 43<i<63:
name_score="[name='score08"+(str(int((i-42)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input08"+(str(int((i-42)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 63<i<65:
name_score="[name='score09"+(str(int((i-62)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input09"+(str(int((i-62)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 65<i<75:
name_score="[name='score10"+(str(int((i-64)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input10"+(str(int((i-64)/2)).rjust(2,'0'))+"']"
result(score,i)
elif typ_jd=='食品销售':
if 0<i<9:
name_score="[name='score01"+(str(int(i/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input01"+(str(int(i/2)).rjust(2,'0'))+"']"
result(score,i)
elif 9<i<19:
name_score="[name='score02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 19<i<21:
name_score="[name='score03"+(str(int((i-18)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input03"+(str(int((i-18)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 21<i<39:
name_score="[name='score04"+(str(int((i-20)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input04"+(str(int((i-20)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 39<i<47:
name_score="[name='score05"+(str(int((i-38)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input05"+(str(int((i-38)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 47<i<67:
name_score="[name='score06"+(str(int((i-46)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input06"+(str(int((i-46)/2)).rjust(2,'0'))+"']"
result(score,i)
elif typ_jd=='住宿业':
if 0<i<9:
name_score="[name='score01"+(str(int(i/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input02"+(str(int(i/2)).rjust(2,'0'))+"']"
result(score,i)
elif 9<i<19:
name_score="[name='score02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 19<i<27:
name_score="[name='score03"+(str(int((i-18)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input03"+(str(int((i-18)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 27<i<33:
name_score="[name='score04"+(str(int((i-26)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input04"+(str(int((i-26)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 33<i<37:
name_score="[name='score05"+(str(int((i-32)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input05"+(str(int((i-32)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 37<i<41:
name_score="[name='score06"+(str(int((i-36)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input06"+(str(int((i-36)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 41<i<45:
name_score="[name='score07"+(str(int((i-40)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input07"+(str(int((i-40)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 45<i<47:
name_score="[name='score08"+(str(int((i-44)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input08"+(str(int((i-44)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 47<i<51:
name_score="[name='score09"+(str(int((i-46)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input09"+(str(int((i-46)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 51<i<53:
name_score="[name='score10"+(str(int((i-50)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input10"+(str(int((i-50)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 53<i<71:
name_score="[name='score11"+(str(int((i-52)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input11"+(str(int((i-52)/2)).rjust(2,'0'))+"']"
result(score,i)
elif typ_jd=='候车(机、船)室':
if 0<i<9:
name_score="[name='score01"+(str(int(i/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input01"+(str(int(i/2)).rjust(2,'0'))+"']"
result(score,i)
elif 9<i<19:
name_score="[name='score02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input02"+(str(int((i-8)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 19<i<27:
name_score="[name='score03"+(str(int((i-18)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input03"+(str(int((i-18)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 27<i<33:
name_score="[name='score04"+(str(int((i-26)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input04"+(str(int((i-26)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 33<i<37:
name_score="[name='score04"+(str(int((i-32)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input04"+(str(int((i-32)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 37<i<41:
name_score="[name='score05"+(str(int((i-36)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input05"+(str(int((i-36)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 41<i<45:
name_score="[name='score06"+(str(int((i-40)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input06"+(str(int((i-40)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 45<i<51:
name_score="[name='score07"+(str(int((i-44)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input07"+(str(int((i-44)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 51<i<53:
name_score="[name='score08"+(str(int((i-50)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input08"+(str(int((i-50)/2)).rjust(2,'0'))+"']"
result(score,i)
elif 53<i<77:
name_score="[name='score09"+(str(int((i-52)/2)).rjust(2,'0'))+"']"
value_score="[value='"+trueorfalse(i,list_score[i])+"']"
input_score="[name='input09"+(str(int((i-52)/2)).rjust(2,'0'))+"']"
result(score,i)
el_sum=browser.find_element_by_xpath("//a[contains(text(),'计算结果')]")
el_sum.click()
#填入监督日期
el_supdate=browser.find_element_by_name('supScores.supDate')
browser.execute_script('arguments[0].removeAttribute(\"readonly\")',el_supdate)
el_supdate.clear()
#el_supdate.send_keys(list_score[152][0:4]+'-'+list_score[152][5:7]+'-'+list_score[152][8:10]+' '+) #输入监督日期(当前时间往前推200天)
if typ_jd=='餐饮服务':
el_supdate.send_keys(list_score[153])
if typ_jd=='食品生产':
el_supdate.send_keys(list_score[119])
elif typ_jd=='饮用水供应':
el_supdate.send_keys(list_score[79])
elif typ_jd=='食品销售':
el_supdate.send_keys(list_score[71])
elif typ_jd=='住宿业':
el_supdate.send_keys(list_score[75])
elif typ_jd=='候车(机、船)室':
el_supdate.send_keys(list_score[81])
el_pfjgclick=browser.find_element_by_xpath("//label[contains(text(),'评分结果')]")
el_pfjgclick.click()
el_save=browser.find_element_by_xpath("//button[contains(text(),'保存')]")
el_save.click()
el_sumbit_2=browser.find_element_by_xpath("//a[contains(text(),'确定')]")
el_sumbit_2.click()
print(list_score[1].ljust(20,'…') + '已完成录入')
#录完一家做记录并向后填写‘-’
shop_row = 3
while schedul_sht.cell(shop_row,1).value != list_score[1]:
shop_row += 1
schedul_sht.cell(shop_row,(mm_2019 * 3)).value = '√'
if schedul_sht.cell(shop_row,2).value == 'A级':
for i in range(1,6):
schedul_sht.cell(shop_row,((mm_2019 + i) * 3)).value = '-'
elif schedul_sht.cell(shop_row,2).value == 'B级':
for i in range(1,3):
schedul_sht.cell(shop_row,((mm_2019 + i) * 3)).value = '-'
elif schedul_sht.cell(shop_row,2).value == '未定级':
for i in range(1,2):
schedul_sht.cell(shop_row,((mm_2019 + i) * 3)).value = '-'
schedul_xlsx.save(path_schedul)
#把该文件放入下一层文件夹中
shutil.move(file_name,dir_save)
print('已将文件' + file_name + '移入' + dir_save)
print('======================================================')
#遍历进度表,如果有单位监督记录为空,先根据等级向前找√记录,再填-,如果还是空,则提示
for mm_row in range(3,schedul_sht.max_row+1):
mm_col = mm_2019
while (schedul_sht.cell(mm_row,(mm_col * 3)).value == None) or (schedul_sht.cell(mm_row,(mm_col * 3)).value == '-'):
mm_col -= 1
if mm_col == 0:
print(path_schedul + '中未发现' + schedul_sht.cell(mm_row,1).value + '的卫生监督记录,请填写至少一次,否则无法纳入统计!')
mm_row += 1
break
if schedul_sht.cell(mm_row,2).value == 'A级':
for i in range(1,6):
schedul_sht.cell(mm_row,((mm_col + i) * 3)).value = '-'
elif schedul_sht.cell(mm_row,2).value == 'B级':
for i in range(1,3):
schedul_sht.cell(mm_row,((mm_col + i) * 3)).value = '-'
elif schedul_sht.cell(mm_row,2).value == '未定级':
for i in range(1,2):
schedul_sht.cell(mm_row,((mm_col + i) * 3)).value = '-'
schedul_xlsx.save(path_schedul)
print('======================================================')
#读取进度表,显示提示
shop_todo = []
shop_nottodo = []
shop_finish = []
for mm_row in range(3,schedul_sht.max_row + 1):
if schedul_sht.cell(mm_row,(mm_2019 * 3)).value == '√':
shop_finish.append(schedul_sht.cell(mm_row,1).value)
elif schedul_sht.cell(mm_row,(mm_2019 * 3)).value == '-':
shop_nottodo.append(schedul_sht.cell(mm_row,1).value)
elif schedul_sht.cell(mm_row,(mm_2019 * 3)).value == None:
shop_todo.append(schedul_sht.cell(mm_row,1).value)
print(yyyy + '年' + mm + '月' + '卫生监督情况如下:')
print('不必监管:')
for i in shop_nottodo:
print(i)
print('===================')
print('本月已完成:')
for i in shop_finish:
print(i)
print('===================')
print('本月需监管:')
for i in shop_todo:
print(i)
print('======================================================')
#编写总结
for root,dirs,files in os.walk(dir_save):
for file in files:
files_save.append(os.path.join(root,file))
shop_name = []
shop_coun = 0
wenti_coun = 0
employee_coun = 0
for file in files_save:
wb_save = openpyxl.load_workbook(file)
wb_save_sht = wb_save['Sheet1']
for c in range(1,wb_save_sht.max_column + 1):
if wb_save_sht.cell(1,c).value == '员工数':
yg_c = c
for r in range(2,wb_save_sht.max_row + 1):
if wb_save_sht.cell(r,2).value in shop_name:
print('监测到重复的单位名称,统计结果可能不准确,请确认!')
print('重复单位名称为:' + wb_save_sht.cell(r,2).value)
shop_name.append(wb_save_sht.cell(r,2).value)
shop_coun += 1
employee_coun += wb_save_sht.cell(r,yg_c).value
for c in range (1,wb_save_sht.max_column + 1):
if wb_save_sht.cell(r,c).value == '不符合':
wenti_coun += 1
wb_save.save(file)
shop_coun = str(shop_coun)
wenti_coun = str(wenti_coun)
employee_coun = str(employee_coun)
txt = '本月总结:\n\
企业共X家,\n\
开展卫生监督'+ shop_coun + '次,\n\
监管' + employee_coun + '人,\n\
快速检测X次X个项目,\n\
发现阳性问题' + wenti_coun +'个。\n\
采样送检X批次。\n\n\
开展开展鼠类夹夜法、鼠笼法及蚤类、寄生蜱、螨类监测一次、\n\
蚊类二氧化碳诱蚊灯监测两次、\n\
蚊类诱卵器监测一次、\n\
蠓类紫外灯监测一次,\n\
捕获均为0。\n\n\
对西宁机场T1、T2航站楼及贵宾厅开展公共场所空气质量监测,\n\
共对X个点位进行X项监测,\n\
发现不合格X项\n\n\n'
txt_write = open(os.path.join(path_sum,(yyyymm + '汇总.txt')),'a',encoding = 'utf-8')
txt_write.write(txt)
txt_write.close()
print(txt)
print('已生成文件' + os.path.join(path_sum,(yyyymm + '汇总.txt')))
input('按回车键退出')
| [
"fx19870419@163.com"
] | fx19870419@163.com |
e2e10c2c8149bfa47b105a6177f72db01b0f512a | 7961640734faa157c4e55e5a5a68142a09dc5b24 | /12_return_statement.py | a838d889734fcbaa572938d4f69805034e3ab75c | [] | no_license | SergoMartynov/python-course-mikedane | b0ed8e0a61342da8972714c59585e00c9beb0361 | 8dcd9aba18c05f99dbae637631d4e9372ee3d163 | refs/heads/master | 2022-12-09T02:34:10.294095 | 2020-09-11T12:44:52 | 2020-09-11T12:44:52 | 294,692,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | def cube(num):
return pow(num, 3)
result = cube(3)
print(result)
| [
"sergo.martynov@gmail.com"
] | sergo.martynov@gmail.com |
6c2d3a9bb2d2d44039b292add4f1df3997783e1e | 554c17471f288f92d9ff98d0ee60b6d283832c77 | /WarGexe 0.0.4.py | bfe2d2d7b109875e844a3148c6b67d03d5a65776 | [] | no_license | EdouardVincent/WarGame | bf911a580c1b88bb131021c9857fdba1f08e9b1f | 5501f7bbbf45de43f28eb4dab615a0538cccfebe | refs/heads/main | 2023-08-29T04:34:14.485509 | 2021-11-01T12:23:17 | 2021-11-01T12:23:17 | 423,443,977 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,897 | py | #WarGame
import pygame, sys, time, random
from pygame.locals import *
pygame.init()
pygame.mixer.init()
nb_joysticks = pygame.joystick.get_count()
#Et on en crée un s'il y a en au moins un
if nb_joysticks > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init() #Initialisation
fenetre= pygame.display.set_mode ((1000,400))
fond = pygame.font.Font('freesansbold.ttf', 18)
fond2 = pygame.font.Font('freesansbold.ttf', 50)
fond3 = pygame.font.Font('freesansbold.ttf', 30)
image_droite_rambo = pygame.image.load('C:\Jeu personnages\chasseur.png')
bullet = pygame.image.load('C:\Jeu personnages\Z.gif')
bg=pygame.image.load('C:\Jeu personnages\OIP.png')
cs = pygame.image.load('C:\Jeu personnages\OIP (2).png')
papi = pygame.image.load('C:\Jeu personnages\OIP (1).png')
sanglier_image = pygame.image.load('C:\Jeu personnages\OIP (3).png')
Icone = pygame.image.load('C:\Jeu personnages\OIP.ico')
coeur1 = pygame.image.load('C:\Jeu personnages\OIP (6).png')
coeur2 = pygame.image.load('C:\Jeu personnages\OIP (6).png')
soldier_image = pygame.image.load('C:\Jeu personnages\OIP (4).png')
space_soldier_image = pygame.image.load('C:\Jeu personnages\OIP (5).png')
james_image = pygame.image.load('C:\Jeu personnages\OIP (7).png')
icone_balle = pygame.image.load('C:\Jeu personnages\OIP (8).png')
menu_img = pygame.image.load('C:\Jeu personnages\OIP (13).png')
bouton_jouer = pygame.image.load('C:\Jeu personnages\OIP (12).png')
bouton_quitter = pygame.image.load('C:\Jeu personnages\OIP (14).png')
play = pygame.image.load('C:\Jeu personnages\OIP (15).png')
terroriste_image = pygame.image.load('C:\Jeu personnages\OIP (17).png')
kamikaze = pygame.image.load('C:\Jeu personnages\OIP (18).png')
colonel = pygame.image.load('C:\Jeu personnages\OIP (19).png')
dialogue_colonel_1 = pygame.image.load('C:\Jeu personnages\OIP (20).png')
dialogue_colonel_2 = pygame.image.load('C:\Jeu personnages\OIP (21).png')
bouton_commencer = pygame.image.load('C:\Jeu personnages\OIP (22).png')
stop = pygame.image.load('C:\Jeu personnages\OIP (16).png')
Kaboul = pygame.image.load('C:\Jeu personnages\OIP (23).png')
nouvelle_partie = pygame.image.load('C:\Jeu personnages\OIP (24).png')
continuer_partie = pygame.image.load('C:\Jeu personnages\OIP (25).png')
capitaine_Mitchell_image = pygame.image.load('C:\Jeu personnages\OIP (26).png')
foret = pygame.image.load('C:\Jeu personnages\OIP (27).png')
balle_gauche_image = pygame.image.load('C:\Jeu personnages\OIP (28).png')
ground = pygame.image.load('C:\Jeu personnages\OIP (29).png')
échelle_image = pygame.image.load('C:\Jeu personnages\OIP (31).png')
passerelle = pygame.image.load('C:\Jeu personnages\OIP (32).png')
passerelle_spéciale = pygame.image.load('C:\Jeu personnages\OIP (33).png')
zone_vide = pygame.image.load('C:\Jeu personnages\OIP (34).png')
detection_ennemi = pygame.image.load('C:\Jeu personnages\OIP (35).png')
dialogue_colonel_3 = pygame.image.load('C:\Jeu personnages\OIP (36).png')
arrow_image = pygame.image.load('C:\Jeu personnages\OIP (37).png')
black_animation = pygame.image.load('C:\Jeu personnages\OIP (38).png')
bouton_oui = pygame.image.load('C:\Jeu personnages\OIP (39).png')
bouton_non = pygame.image.load('C:\Jeu personnages\OIP (40).png')
Kaboul_level = pygame.image.load('C:\Jeu personnages\OIP (41).png')
terroriste_parachute = pygame.image.load('C:\Jeu personnages\OIP (56).png')
menu_musique = pygame.mixer.Sound('C:\Jeu personnages\OIP (4).mp3')
Kaboul_musique = pygame.mixer.Sound ('C:\Jeu personnages\oip (5).mp3')
singe = pygame.mixer.Sound('C:\Jeu personnages\OIP (7).mp3')
pygame.mixer.music.load('C:\Jeu personnages\OIP (6).mp3')
pygame.display.set_icon(Icone)
RED = 255, 0, 0 #création de couleurs
YELLOW = 220, 220, 0
BLACK = 0, 0, 0
#Créations de variables qui cont etre utile tout au long du programme :
menu = 1
son_joué = 1
choose_soldier = 0
image_son = play
menu_Kaboul=0
tir = 0
close = 0
Won = 0
training = 0
lvl1 = 0
lvl2 = 0
die_terrorist = 0
loop = 0
tombee = 0
joueur_echelle = 0
detection_soldat = 0
tir_tero = 0
congratulations = 0
grade = ''
animation_victoire = 0
nouvelle_fenetre = 0
son_joué = 1
bouton_continuer = continuer_partie.get_rect(topleft = (300, 240))
quitter = bouton_quitter.get_rect(topleft = (362, 300))
bouton_son_play = play.get_rect(topleft = (0, 0))
bouton_son_quitter = stop.get_rect(topleft = (0,0))
bouton_commencer_rect = bouton_jouer.get_rect(topleft = (370, 200))
bouton_nouvelle_partie = nouvelle_partie.get_rect(topleft = (310, 182))
class Soldat () :
def __init__ (self) :
self.x = 0
self.y = 0
self.image = image_droite_rambo
self.increment = 15
def affichage (self) :
fenetre.blit(self.image,(self.x, self.y))
def deplacement_haut (self) :
self.y -= self.increment
def deplacement_bas (self) :
self.y += self.increment
def deplacement_droite (self) :
self.x += self.increment
def deplacement_gauche (self) :
self.x -= self.increment
class Balle () :
def __init__(self) :
self.x = 0
self.y = 0
self.image = bullet
self.increment = 2
def affichage (self) :
fenetre.blit(self.image,(self.x, self.y))
def deplacement (self) :
self.x += self.increment
class animal () :
def __init__(self) :
self.x = 1000
self.y = 0
self.image = sanglier_image
self.increment = 1.25
def affichage(self) :
fenetre.blit(self.image,(self.x, self.y))
def deplacement_gauche (self) :
self.x -= self.increment
class Terroriste () :
def __init__(self) :
self.x = 1500
self.y = 105
self.image = terroriste_image
self.increment = 5
def affichage (self) :
fenetre.blit(self.image,(self.x, self.y))
def deplacement_gauche(self) :
self.x -= self.increment
def deplacement_droite(self) :
self.x += self.increment
def deplacement_bas(self) :
self.y += self.increment
class Sol () :
def __init__(self) :
self.x = 0
self.y = 360
self.image = ground
def affichage(self) :
fenetre.blit(self.image,(self.x, self.y))
class Echelle () :
def __init__(self) :
self.x = 100
self.y = 300
self.image = échelle_image
def affichage (self) :
fenetre.blit(self.image,(self.x, self.y))
class Plateforme () :
def __init__ (self) :
self.x = 0
self.y = 200
self.image = passerelle
def affichage(self) :
fenetre.blit(self.image,(self.x, self.y))
def draw (texte, x, y) :
fenetre.blit(texte,(x, y))
barbu_rect = papi.get_rect(topleft = (0,0))
chasseur_rect = image_droite_rambo.get_rect(topleft = (200,0))
cs_rect = cs.get_rect(topleft = (400,0))
soldier_rect = soldier_image.get_rect(topleft = (600,0))
space_soldier_rect = space_soldier_image.get_rect(topleft = (800,0))
james_rect = james_image.get_rect(topleft = (0, 200))
pygame.key.set_repeat(1,20)
while menu: # boucle principale
if son_joué == 1 :
menu_musique.play(999,0,0)
pygame.display.set_caption('WAR GAME')
for event in pygame.event.get():
if event.type == QUIT :
pygame.quit()
sys.exit()
if event.type == KEYDOWN :
if event.key == K_ESCAPE :
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN and event.button == 1 :
if bouton_continuer.collidepoint(event.pos) :
with open("Niveau.txt","r") as level :
texte = level.readlines()
menu = 0
if texte == ['niveau 0'] :
training = 1
Won = 0
lvl1 = 0
elif texte == ['congratulations'] :
congratulations = 1
Won = 0
lvl1 = 0
training = 0
elif texte == ['level 1'] :
choose_soldier = 1
Won = 1
training = 0
lvl1 = 1
elif texte == ['level 2'] :
choose_soldier = 1
Won = 0
training = 0
menu_Kaboul = 0
lvl1 = 0
lvl2 = 1
if quitter.collidepoint(event.pos) :
pygame.quit()
sys.exit()
if bouton_son_play.collidepoint(event.pos):
image_son = stop
son_joué = 0
if bouton_nouvelle_partie.collidepoint(event.pos) :
menu = 0
choose_soldier = 1
training = 1
Won = 0
with open("Niveau.txt","w") as level :
level.write("niveau 0")
draw(bg,0,0)
draw(menu_img,0,0)
draw(image_son,0,0)
draw(continuer_partie,300,240)
draw(bouton_quitter,362, 300)
draw(nouvelle_partie,310, 182)
with open("Niveau.txt","r") as RankTexte :
rank = RankTexte.read()
if rank == 'niveau 0' :
grade = 'soldat 2nde classe'
if rank == 'level 1' :
grade = 'soldat 2nde classe'
if rank == 'level 2' :
grade = 'soldat 2nde classe'
if rank == 'congratulations' :
grade = 'soldat 1ère classe'
RankSurf = fond3.render('Grade : %s' % (grade), True, RED)
draw(RankSurf, 260,0)
pygame.display.flip()
if choose_soldier == 1:
menu_musique.stop()
joueur = Soldat()
balle = Balle()
sanglier = animal()
vie_sanglier = [1, 1, 1, 1, 1]
vie_chasseur = [1]
nb_balles = 10
while choose_soldier :
for event in pygame.event.get() :
if event.type == QUIT :
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN and event.button == 1:
if barbu_rect.collidepoint(event.pos) :
joueur.image = papi
choose_soldier = 0
training = 1
if chasseur_rect.collidepoint(event.pos) :
joueur.image = image_droite_rambo
choose_soldier = 0
training = 1
if cs_rect.collidepoint(event.pos) :
joueur.image = cs
choose_soldier = 0
training = 1
if soldier_rect.collidepoint(event.pos) :
joueur.image = soldier_image
choose_soldier = 0
training = 1
if space_soldier_rect.collidepoint(event.pos) :
joueur.image = space_soldier_image
choose_soldier = 0
training = 1
if james_rect.collidepoint(event.pos) :
joueur.image = james_image
choose_soldier = 0
training = 1
draw(bg,0,0)
draw(papi,0,0)
draw(image_droite_rambo,200,0)
draw(cs,400,0)
draw(soldier_image,600,0)
draw(space_soldier_image,800,0)
draw(james_image,0,200)
pygame.display.flip()
if choose_soldier == 0 :
with open("Niveau.txt","r") as level :
texte = level.readlines()
if texte == ['niveau 0'] :
training = 1
Won = 0
if training == 1 :
if texte != ['niveau 0'] :
training = 0
Won = 1
menu_Kaboul = 1
if training == 1 :
with open("Niveau.txt","w") as level :
level.write("niveau 0")
menu_musique.stop()
pygame.display.set_caption('training')
vie_chasseur =[1]
vie_sanglier = [1, 1, 1, 1, 1]
sanglier.x = 1000
sanglier.y = random.randint(0,300)
fenetre.blit(bg, (0,0))
Level1Surf = fond2.render('ENTRAINEMENT', True, RED)
draw(Level1Surf, 280, 100)
pygame.display.flip()
time.sleep(2)
KillSurf = fond2.render('TUEZ LE SANGLIER !', True, RED)
draw(KillSurf, 220, 200)
pygame.display.flip()
time.sleep(3)
while training :
if Won == 1 :
training = 0
menu = 1
for event in pygame.event.get() :
if event.type == QUIT :
pygame.quit()
sys.exit()
if event.type == KEYDOWN :
if event.key == K_RETURN and tir == 0 and nb_balles > 0:
if son_joué == 1 :
pygame.mixer.music.play(1, 0.9, 0)
nb_balles-=1
tir=1
if event.key == K_UP :
joueur.deplacement_haut()
if event.key == K_DOWN :
joueur.deplacement_bas()
elif event.type == JOYAXISMOTION:
if event.axis == 1 and event.value < -0.5:
joueur.deplacement_haut()
elif event.axis == 1 and event.value > 0.5:
joueur.deplacement_bas()
elif event.type == JOYBUTTONDOWN :
if event.button == 2 and tir == 0 and nb_balles > 0:
if son_joué == 1 :
pygame.mixer.music.play(1, 0.9, 0)
nb_balles-=1
tir=1
sanglier.deplacement_gauche()
sanglier_rect = sanglier_image.get_rect(topleft = (sanglier.x, sanglier.y))
if balle.x > 1000 :
balle.x = joueur.x
balle.y = joueur.y
tir = 0
if tir == 1 :
balle.deplacement()
if tir == 0 :
balle.x = joueur.x +50
balle.y = joueur.y +50
if joueur.y <= -10:
joueur.deplacement_bas()
if joueur.y >= 315:
joueur.deplacement_haut()
if sanglier.x <= -50 :
sanglier.x = 1000
sanglier.y = random.randint(0,300)
if sanglier_rect.collidepoint(balle.x, balle.y) and tir == 1 :
vie_sanglier.pop()
tir = 0
if sanglier_rect.collidepoint(joueur.x, joueur.y) :
vie_chasseur.pop()
if len(vie_chasseur) == 0 :
YouLose = fond2.render('YOU LOSE !', True, RED)
draw(YouLose, 350, 100)
pygame.display.flip()
time.sleep(2)
training = 0
menu = 1
if len(vie_sanglier) == 0 :
YouWon = fond2.render('YOU WON !', True, RED)
draw(YouWon, 350, 100)
pygame.display.flip()
time.sleep(2)
Won = 1
training =0
with open("Niveau.txt","w") as level :
level.write("level 1")
ColonelSurf = fond.render('COLONEL GEORGES', True, BLACK) #discours du colonel
draw(bg,0,0)
draw(colonel,0,100)
draw(dialogue_colonel_1,80,10)
draw(ColonelSurf,0,220)
pygame.display.flip()
time.sleep(10)
draw(bg,0,0)
draw(colonel,0,100)
draw(dialogue_colonel_2,80,10)
draw(ColonelSurf,0,220)
pygame.display.flip()
time.sleep(10)
draw(bg, 0,0)
scoreSurfChasseur = fond.render('Vie : %s' % (len(vie_chasseur)), True, RED)
scoreSurfSanglier = fond.render('Vies : %s' % (len(vie_sanglier)), True, RED)
nb_balles_surf = fond.render('balles : %s' % nb_balles,True,YELLOW)
draw(scoreSurfChasseur ,0 ,3)
draw(scoreSurfSanglier ,895 ,3)
draw(coeur1 ,60 ,0)
draw(coeur2 ,965 ,0)
draw(icone_balle ,100 ,25)
draw(nb_balles_surf,0 ,30)
if tir == 1 :
balle.affichage()
joueur.affichage()
sanglier.affichage()
pygame.display.flip()
if Won == 1 :
menu_musique.stop()
if son_joué == 1 :
Kaboul_musique.play(999,0,0)
Level1Surf = fond3.render('LEVEL 1 : Position : Kaboul, Local hour : 7:00 AM', True, YELLOW)
draw(Kaboul,0,0)
draw(Level1Surf, 120, 100)
pygame.display.flip()
time.sleep(5)
menu_Kaboul = 1
while menu_Kaboul :
for event in pygame.event.get() :
if event.type == QUIT :
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN and event.button == 1 :
if bouton_commencer_rect.collidepoint(event.pos) :
Kaboul_musique.stop()
with open("Niveau.txt","r") as RankTexte :
rank = RankTexte.read()
if rank == 'congratulations' :
lvl1 = 0
lvl2 = 0
congratulations = 1
if rank == 'level 2' :
lvl1 = 0
lvl2 = 1
if rank == 'level 1' :
lvl1 = 1
lvl2 = 0
if rank == 'niveau 0' :
lvl1 = 1
lvl2 = 0
menu_Kaboul=0
if quitter.collidepoint(event.pos) :
Kaboul_musique.stop()
menu_Kaboul = 0
menu = 1
son_joué = 1
choose_soldier = 0
Won = 0
draw(Kaboul,0,0)
draw(Level1Surf, 120, 100)
draw(bouton_commencer, 370,200)
draw(bouton_quitter, 362, 300)
pygame.display.flip()
if lvl1 == 1 :
terroriste = Terroriste()
sol1 = Sol()
sol2 = Sol()
echelle1 = Echelle()
echelle2 = Echelle()
echelle3 = Echelle()
echelle4 = Echelle()
balle_tero = Balle()
Mitchell = Soldat()
passerelle1 = Plateforme()
passerelle2 = Plateforme()
passerelle3 = Plateforme()
passerelle4 = Plateforme()
passerelle5 = Plateforme()
passerelle6 = Plateforme()
passerelle7 = Plateforme()
passerelle8 = Plateforme()
passerelle9 = Plateforme()
passerelle10 = Plateforme()
passerelle11 = Plateforme()
passerelle12 = Plateforme()
passerelle13 = Plateforme()
passerelle14 = Plateforme()
passerelle15 = Plateforme()
terroristes_restants = 15
tir = 0
nb_balles = 30
balle_tero.image = balle_gauche_image
balle_tero.increment = -15
Mitchell.image = capitaine_Mitchell_image
Mitchell.x = 80
Mitchell.y = 300
sol2.x = 716
sol2.y = 360
joueur.x = 0
joueur.y = 300
echelle2.x = 100
echelle2.y = 195
echelle3.x = 143
echelle3.y = 300
echelle4.x = 143
echelle4.y = 195
balle_tero.x = terroriste.x + 50
balle_tero.y = terroriste.y + 50
terroriste.y = 105
passerelle2.x = 71
passerelle3.x = 142
passerelle4.x = 213
passerelle5.x = 284
passerelle6.x = 355
passerelle7.x = 426
passerelle8.x = 497
passerelle9.x = 568
passerelle10.x = 639
passerelle11.x = 710
passerelle12.x = 781
passerelle13.x = 852
passerelle14.x = 923
passerelle14.image = passerelle_spéciale
black_x = -1000
black_y = 0
obj1 = fond3.render('TUEZ TOUS LES TERRORISTES !', True, RED)
draw(obj1,300,0)
pygame.display.flip()
balle.increment = 15
if son_joué == 1 :
singe.play(999,0,0)
Kaboul_musique.stop()
while lvl1:
for event in pygame.event.get() :
if event.type == QUIT :
pygame.quit()
sys.exit()
if event.type == KEYDOWN :
if event.key == K_RIGHT and joueur.x < 915 and not joueur_echelle == 1:
joueur.deplacement_droite()
if event.key == K_LEFT and joueur.x > 0 and not joueur_echelle == 1:
joueur.deplacement_gauche()
if event.key == K_UP :
if (echelle1_rect.collidepoint(joueur.x, joueur.y) or echelle2_rect.collidepoint(joueur.x, joueur.y))or (echelle3_rect.collidepoint(joueur.x, joueur.y) or echelle4_rect.collidepoint(joueur.x, joueur.y)):
joueur.deplacement_haut()
joueur_echelle = 1
elif joueur.y <= 180 and joueur.y > 117:
joueur.deplacement_haut()
joueur_echelle = 0
if event.key == K_DOWN and joueur.y < 300 :
if zone_vide_rect.collidepoint(joueur.x, joueur.y) :
joueur.deplacement_bas()
joueur_echelle = 1
elif (echelle1_rect.collidepoint(joueur.x, joueur.y) or echelle2_rect.collidepoint(joueur.x, joueur.y)) :
joueur.deplacement_bas()
joueur_echelle = 1
if event.key == K_RETURN and tir == 0 and nb_balles > 0 :
if son_joué == 1 :
pygame.mixer.music.play(1, 0.9, 0)
nb_balles-=1
tir=1
elif event.type == JOYAXISMOTION:
if event.axis == 0 and event.value > 0.5 and joueur.x < 915 and not joueur_echelle == 1:
joueur.deplacement_droite()
elif event.axis == 0 and event.value < -0.5 and joueur.x > 0 and not joueur_echelle == 1:
joueur.deplacement_gauche()
elif event.axis == 1 and event.value > 0.5 and joueur.y < 300 :
if zone_vide_rect.collidepoint(joueur.x, joueur.y) :
joueur.deplacement_bas()
joueur_echelle = 1
elif (echelle1_rect.collidepoint(joueur.x, joueur.y) or echelle2_rect.collidepoint(joueur.x, joueur.y)) :
joueur.deplacement_bas()
joueur_echelle = 1
elif event.axis == 1 and event.value < -0.5:
if (echelle1_rect.collidepoint(joueur.x, joueur.y) or echelle2_rect.collidepoint(joueur.x, joueur.y))or (echelle3_rect.collidepoint(joueur.x, joueur.y) or echelle4_rect.collidepoint(joueur.x, joueur.y)):
joueur.deplacement_haut()
joueur_echelle = 1
elif joueur.y <= 180 and joueur.y > 117:
joueur.deplacement_haut()
joueur_echelle = 0
elif event.type == JOYBUTTONDOWN :
if event.button == 2 and tir == 0 and nb_balles > 0:
if son_joué == 1 :
pygame.mixer.music.play(1, 0.9, 0)
nb_balles-=1
tir=1
tero_rect = terroriste_image.get_rect(topleft = (terroriste.x, terroriste.y))
echelle1_rect = échelle_image.get_rect(topleft = (echelle1.x, echelle1.y))
echelle2_rect = échelle_image.get_rect(topleft = (echelle2.x, echelle2.y))
echelle3_rect = échelle_image.get_rect(topleft = (echelle3.x, echelle3.y))
echelle4_rect = échelle_image.get_rect(topleft = (echelle4.x, echelle4.y))
zone_vide_rect = zone_vide.get_rect(topleft = (100,90))
joueur_rect = joueur.image.get_rect(topleft = (joueur.x, joueur.y))
if detection_soldat == 1 and die_terrorist == 0:
tir_tero = 1
detection_soldat = 0
if terroriste.y == joueur.y and terroristes_restants > 0 :
detection_soldat = 1
if joueur.y == 300 :
joueur_echelle = 0
if terroriste.x > 915 :
terroriste.deplacement_gauche()
if tir == 1 :
balle.deplacement()
if tir_tero == 1 :
balle_tero.deplacement()
if balle.x > 1000 :
balle.x = joueur.x
balle.y = joueur.y
tir = 0
if balle_tero.x < 0 :
balle_tero.x = terroriste.x
balle_tero.y = terroriste.y
tir_tero = 0
if tir == 0 :
balle.x = joueur.x +50
balle.y = joueur.y +50
if tir_tero == 0 :
balle_tero.x = terroriste.x + 50
balle_tero.y = terroriste.y + 50
if tero_rect.collidepoint(balle.x, balle.y) and tir == 1 :
die_terrorist = 1
if terroristes_restants > 0 :
terroristes_restants -= 1
tir = 0
if joueur_rect.collidepoint(balle_tero.x, balle_tero.y) and tir_tero == 1 and len(vie_chasseur) > -1 :
vie_chasseur = []
tir_tero = 0
if die_terrorist == 1 :
if random.randint(0,1) :
loop += 1
if loop == 20 :
terroriste.x = 1500
terroriste.y = 105
die_terrorist= 0
loop = 0
if random.randint(0,1) :
loop += 1
if loop == 20 :
terroriste.x = 1500
terroriste.y = 300
die_terrorist= 0
loop = 0
scoreSurfChasseur = fond.render('Vie : %s' % (len(vie_chasseur)), True, RED)
terroristes_restantsSurf = fond.render('Terroristes restants: %s' % terroristes_restants, True, RED)
nb_balles_surf = fond.render('balles : %s' % nb_balles,True,YELLOW)
draw(foret,0,0)
draw(obj1,300,0)
draw(scoreSurfChasseur ,0 ,3)
draw(coeur1 ,60 ,0)
draw(icone_balle ,100 ,25)
draw(nb_balles_surf,0 ,30)
draw(terroristes_restantsSurf,790,0)
if detection_soldat == 1 :
draw(detection_ennemi, terroriste.x-30, terroriste.y)
if tir == 1 :
balle.affichage()
if tir_tero == 1 :
balle_tero.affichage()
if vie_chasseur == [] :
singe.stop()
YouLose = fond2.render('YOU LOSE !', True, RED)
draw(YouLose, 350, 100)
pygame.display.flip()
time.sleep(2)
lvl1 = 0
menu = 1
Won = 0
if terroristes_restants > 0 :
terroriste.affichage()
elif terroristes_restants == 0 :
draw(arrow_image, 835,40)
if joueur.y == 105 and joueur.x == 915 :
animation_victoire = 1
joueur.affichage()
echelle1.affichage()
echelle2.affichage()
echelle3.affichage()
echelle4.affichage()
sol1.affichage()
sol2.affichage()
passerelle1.affichage()
passerelle2.affichage()
passerelle3.affichage()
passerelle4.affichage()
passerelle5.affichage()
passerelle6.affichage()
passerelle7.affichage()
passerelle8.affichage()
passerelle9.affichage()
passerelle10.affichage()
passerelle11.affichage()
passerelle12.affichage()
passerelle13.affichage()
passerelle14.affichage()
if animation_victoire == 1 :
draw(black_animation,black_x,black_y)
black_x += 10
if black_x > -50 :
black_x = -1000
black_y = 0
time.sleep(1.5)
animation_victoire = 0
lvl1 = 0
lvl2 = 1
Won = 0
pygame.display.flip()
if lvl2 == 1 :
singe.stop()
Kaboul_musique.stop()
with open("Niveau.txt","w") as level :
level.write("level 2")
terroriste = Terroriste()
balle.increment = 15
balle.x = joueur.x+350
balle.y = joueur.y+350
balle_tero = Balle()
balle_tero.x = terroriste.x + 350
balle_tero.y = terroriste.y + 350
balle_tero.image = balle_gauche_image
balle_tero.increment = -15
joueur.x = 0
joueur.y = 300
nb_balles = 20
tir = 0
terroristes_restants = 10
black_x = -1000
black_y = 0
vie_chasseur = [1]
die_terrorist = 1
animation_victoire = 1
while lvl2 :
for event in pygame.event.get() :
if event.type == QUIT :
pygame.quit()
sys.exit()
if event.type == KEYDOWN :
if event.key == K_ESCAPE :
pygame.quit()
sys.exit()
if event.key == K_RIGHT and joueur.x < 915 :
joueur.deplacement_droite()
if event.key == K_LEFT and joueur.x > 0 :
joueur.deplacement_gauche()
if event.key == K_RETURN and tir == 0 and nb_balles > 0 :
if son_joué == 1 :
pygame.mixer.music.play(1, 0.9, 0)
nb_balles-=1
tir=1
if event.type == JOYAXISMOTION :
if event.axis == 0 and event.value > 0.5 and joueur.x < 915 and not joueur_echelle == 1:
joueur.deplacement_droite()
elif event.axis == 0 and event.value < -0.5 and joueur.x > 0 and not joueur_echelle == 1:
joueur.deplacement_gauche()
elif event.type == JOYBUTTONDOWN :
if event.button == 2 and tir == 0 and nb_balles > 0:
if son_joué == 1 :
pygame.mixer.music.play(1, 0.9, 0)
nb_balles-=1
tir=1
tero_rect = terroriste_image.get_rect(topleft = (terroriste.x, terroriste.y))
joueur_rect = joueur.image.get_rect(topleft = (joueur.x, joueur.y))
if detection_soldat == 1 and die_terrorist == 0:
tir_tero = 1
detection_soldat = 0
if terroriste.y == joueur.y and terroristes_restants > 0 :
detection_soldat = 1
if terroriste.x > 915 :
terroriste.deplacement_gauche()
if tir == 1 :
balle.deplacement()
if tir_tero == 1 :
balle_tero.deplacement()
if balle.x > 1000 :
balle.x = joueur.x
balle.y = joueur.y
tir = 0
if balle_tero.x < 0 :
balle_tero.x = terroriste.x
balle_tero.y = terroriste.y
tir_tero = 0
if tir == 0 :
balle.x = joueur.x +50
balle.y = joueur.y +50
if tir_tero == 0 :
balle_tero.x = terroriste.x + 50
balle_tero.y = terroriste.y + 50
if tero_rect.collidepoint(balle.x, balle.y) and tir == 1 :
die_terrorist = 1
if terroristes_restants > 0 :
terroristes_restants -= 1
tir = 0
if joueur_rect.collidepoint(balle_tero.x, balle_tero.y) and tir_tero == 1 and len(vie_chasseur) > -1 :
vie_chasseur = []
tir_tero = 0
if die_terrorist == 1 :
terroriste.x = random.randint(100, 800)
terroriste.y = -105
die_terrorist = 0
if terroriste.y >= -105 and terroriste.y != 300 :
terroriste.deplacement_bas()
terroriste.image = terroriste_parachute
if terroriste.y >= 280 :
terroriste.image = terroriste_image
if vie_chasseur == [] :
YouLose = fond2.render('YOU LOSE !', True, RED)
draw(YouLose, 350, 100)
pygame.display.flip()
time.sleep(2)
lvl2 = 0
menu = 1
Won = 0
#affichage :
draw(Kaboul_level,0,0)
joueur.affichage()
if terroristes_restants > 0 :
terroriste.affichage()
if tir == 1 :
balle.affichage()
if tir_tero == 1 :
balle_tero.affichage()
if terroristes_restants > 0 :
terroriste.affichage()
scoreSurfChasseur = fond.render('Vie : %s' % (len(vie_chasseur)), True, RED)
terroristes_restantsSurf = fond.render('Terroristes restants: %s' % terroristes_restants, True, RED)
nb_balles_surf = fond.render('balles : %s' % nb_balles,True,YELLOW)
draw(scoreSurfChasseur ,0 ,3)
draw(coeur1 ,60 ,0)
draw(icone_balle ,100 ,25)
draw(nb_balles_surf,0 ,30)
draw(terroristes_restantsSurf,790,0)
obj1 = fond3.render('ATTENTION AUX PARACHUTISTES !', True, RED)
draw(obj1,300,0)
if terroristes_restants == 0 and animation_victoire == 1 :
draw(black_animation,black_x,black_y)
black_x += 10
if black_x > -50 :
animation_victoire = 0
time.sleep(1.5)
lvl1 = 0
lvl2 = 1
congratulations = 1
Won = 0
pygame.display.flip()
if congratulations == 1 :
with open("Niveau.txt","w") as level :
level.write("congratulations")
ColonelSurf = fond.render('COLONEL GEORGES', True, BLACK)
draw(bg,0,0)
draw(colonel,0,100)
draw(dialogue_colonel_3,80,10)
draw(ColonelSurf,0,220)
pygame.display.flip()
time.sleep(10)
congratulations = 0
lvl2 = 0
menu = 1
rank = 'soldat 1ère classe'
| [
"noreply@github.com"
] | noreply@github.com |
485ad8e6baaed3e0c1597b50422b67fef5348a91 | 6c224dd1fe33d69bbbf9241ebc0b8aecc238f654 | /eval_kinetic_models/exceptions.py | 1f3a313487c4f4b1a5b253d1911b4086aee4b6d5 | [
"MIT"
] | permissive | Lyle-zhang/eval_kinetic_models | 4ca39c06ab408355ad5e26fcc6e108f91200ba28 | 186980a78c618c8887c7188915da156adf50235e | refs/heads/master | 2021-01-18T17:12:29.761079 | 2016-03-10T01:17:58 | 2016-03-10T01:17:58 | 53,903,934 | 0 | 1 | null | 2016-03-15T01:13:47 | 2016-03-15T01:13:47 | null | UTF-8 | Python | false | false | 1,204 | py | """Exceptions for ReSpecTh Parser.
.. moduleauthor:: Kyle Niemeyer <kyle.niemeyer@gmail.com>
"""
class ParseError(Exception):
"""Base class for errors."""
pass
class KeywordError(ParseError):
"""Raised for errors in keyword parsing."""
def __init__(self, *keywords):
self.keywords = keywords
def __str__(self):
return repr('Error: {}.'.format(self.keywords))
class UndefinedElementError(KeywordError):
"""Raised for undefined elements."""
def __str__(self):
return repr('Error: Element not defined.\n{}'.format(self.keywords))
class MissingElementError(KeywordError):
"""Raised for missing required elements."""
def __str__(self):
return repr('Error: Required element {} is missing.'.format(
self.keywords))
class MissingAttributeError(KeywordError):
"""Raised for missing required attribute."""
def __str__(self):
return repr('Error: Required attribute {} is missing.'.format(
self.keywords))
class UndefinedKeywordError(KeywordError):
"""Raised for undefined keywords."""
def __str__(self):
return repr('Error: Keyword not defined.\n{}'.format(self.keywords))
| [
"kyle.niemeyer@gmail.com"
] | kyle.niemeyer@gmail.com |
e34145873aede1b65f5e55265e1505cc6bde3391 | 387cf5f72ed6679a4d9e04bddd16998a190c4caf | /problems/programmers/lv3/pgs-67258-sweep-slow.py | 6a3ef6ae150570c9680bfdc5e53635a2e6635517 | [] | no_license | CodyBuilder-dev/Algorithm-Coding-Test | db4ee1e7565fbcef3140192225167eff42ad5c02 | cca5c4ba8bc31679ab00aceccfd8d9d39c232f72 | refs/heads/master | 2021-07-24T00:34:41.888289 | 2021-07-21T14:29:00 | 2021-07-21T14:29:00 | 219,123,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | """
제목 :보석 쇼핑
아이디어 :
"""
def solution(gems):
s = set(gems)
hash = {}
#interval_list = [] # 리스트에 넣고 저장
best_answer = [123456,456789]
for i,gem in enumerate(gems):
if gem not in hash:
hash[gem] = 0
hash[gem] = i
if len(hash) ==len(s):
temp_answer = [min(hash.values()) + 1, max(hash.values()) + 1]
if temp_answer[1] - temp_answer[0] < best_answer[1] - best_answer[0]:
best_answer = temp_answer
elif temp_answer[1] - temp_answer[0] == best_answer[1] - best_answer[0] \
and temp_answer[0] < best_answer[0]:
best_answer = temp_answer
return best_answer
print(solution(["DIA", "RUBY", "RUBY", "DIA", "DIA", "EMERALD", "SAPPHIRE", "DIA"]))
print(solution(["AA", "AB", "AC", "AA", "AC"]))
print(solution(["XYZ", "XYZ", "XYZ"]))
print(solution(["ZZZ", "YYY", "NNNN", "YYY", "BBB"]))
print(solution(["DIA", "EM", "EM", "RUB", "DIA"]))
print(solution(["A", "A", "B"])) #5 #10
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AD", "AB"]))
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AD", "AB", "AZ","AB","AC","AA"]))
print(solution(["AD","AA", "AB", "AC", "AA", "AC", "AC", "AD", "AB","AZ","AB","AD","AC","AA","AB","AZ","AA"]))
| [
"imspecial1@u.sogang.ac.kr"
] | imspecial1@u.sogang.ac.kr |
2652a081fa0c39d0a95dd3dcfb026c1471796d73 | c6c17fc7c7de104017ebada385e1d26861239ca7 | /treetraversal.py | ecb517564170c553c6fec804d2c8ef67be0c39c5 | [] | no_license | shrikantnarvekar/Algorithims-and-Data-Structures | d7f5c1ba41616eb5d16ea1ccf34332020e031411 | 9525958d4b7f6434e72ffa4316c96039c012ab5c | refs/heads/master | 2020-03-07T22:13:23.035833 | 2018-04-02T11:43:57 | 2018-04-02T11:43:57 | 127,748,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | class Mytree:
def __init__(self,data):
self.left = None
self.right = None
self.data = data
def inOrder(root):
if root:
inOrder(root.left)
print (root.data)
inOrder(root.right)
def preOrder(root):
if root:
print (root.data)
preOrder(root.left)
preOrder(root.right)
def postOrder(root):
if root:
postOrder(root.left)
postOrder(root.right)
print (root.data)
def conversepre(root):
if root:
print(root.data)
conversepre(root.right)
conversepre(root.left)
def conversepost(root):
if root:
conversepre(root.right)
print(root.data)
conversepre(root.left)
def conversein(root):
if root:
conversepre(root.right)
conversepre(root.left)
print(root.data)
#making the tree
root = Mytree("A")
root.left = Mytree("B")
root.right = Mytree("C")
root.left.left = Mytree("D")
root.left.right = Mytree("E")
print("inorder")
print (inOrder(root))
print("preorder")
print (preOrder(root))
print("postorder")
print (postOrder(root))
print("conversepreorder:\n",conversepre(root))
print("conversepostorder:\n",conversepost(root))
print("converseinorder:\n",conversein(root))
| [
"noreply@github.com"
] | noreply@github.com |
790f7806b7f537150ccb4a127bd799627afad0e4 | 1f8344813458f669bdf77059220290a3b2a3cdd0 | /tutorials-docs/thinking-in-coroutines/8_run_in_default_executor.py | 81a53d28f3690104d9512aac1b837e073a2f0b81 | [] | no_license | gridl/asyncio-study-group | 7c03e8640070ebe8d1103f27bc3c3da37a5a661f | 1ba9cf90e21b5174518032d467e89526da219576 | refs/heads/master | 2020-07-02T07:03:12.364097 | 2017-12-26T20:55:09 | 2017-12-26T20:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import time
import datetime
import asyncio
def blocking_call(seconds):
print(seconds, datetime.datetime.now())
time.sleep(seconds)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.call_later(5, loop.stop)
for i in range(1,4):
#по умолчанию используется concurrent.futures.ThreadPoolExecutor
# для этого надо передать executor = None
#количество потоков по умолчанию:
#number of processors on the machine, multiplied by 5
loop.run_in_executor(None, blocking_call, i)
try:
loop.run_forever()
finally:
loop.close()
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
f648c8693ae1b2a639ae03c0646405a290ec3a8d | e697bb1dd2c96049225b1615df8c613b8c366489 | /LPI/Unidade 03/Aula08.py | 0883f63fe29c073a3be63b6c83c9c4e410d882a8 | [] | no_license | zTaverna/Cursos-Python | 9a4f67091c26e59ed0cb1534e72bc855688beb9b | 28c089229c9b63b2f6739fe52646c6d154a2adc8 | refs/heads/main | 2023-05-08T15:51:15.331995 | 2021-06-04T20:27:08 | 2021-06-04T20:27:08 | 365,034,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | def media(n1,n2,n3):
media=(n1+n2+n3)/3
print(media)
num1=int(input('Digite um número: '))
num2=int(input('Digite um número: '))
num3=int(input('Digite um número: '))
media(num1,num2,num3)
| [
"noreply@github.com"
] | noreply@github.com |
223935c2c586bfc9888c7a7f122c4d47be11ce5d | d2d879e7ec46665e01932bbbd2ff113f381089b8 | /tipe_source_geogeo/main2.py | 69366ac85a70fee1158fc567317e567a37876db7 | [] | no_license | theolavigne/TIPE | 320f2e641143b2373e033485d99890ab51ccea6f | 86c52a266c4a556475acd56d8c0175f6e6b56860 | refs/heads/master | 2023-01-24T00:33:24.041888 | 2020-12-04T09:29:35 | 2020-12-04T09:29:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,198 | py | from random import *
import matplotlib.pyplot as plt
N = 10 #taille de la ville
n = 3 #nombre de caserne
Ville = [[[i,j] for i in range(N)] for j in range(N)] #ville
class caserne():
def __init__(self):
self.x = randint(0,N-1)
self.y = randint(0,N-1)
def nexplace(self,x,y):
self.x = x
self.y = y
def __repr__(self):
return "caserne , x : " + str(self.x) + " , y : " + str(self.y)
def distance(caserne,maison):
return abs(caserne.x-maison[0]) + abs(caserne.y-maison[1])
def Di(caserne,Ville): #distance par rapport a chaque point de la ville
D = []
for ligne in Ville:
for maison in ligne:
D += [distance(caserne,maison)**2] #distance mis au carre
return D
def moyenne(liste):
somme = 0
for p in liste:
somme += p
return somme/len(liste)
def moyenne_distance_regiment(regiment,Ville):
M = []
for caserne in regiment:
M += [moyenne(Di(caserne,Ville))]
return moyenne(M)
def distance_moyenne_entre_regiment(regiment):
S = 0
for caserne in regiment:
for caserne2 in regiment:
if caserne != caserne2:
S += (abs(caserne.x-caserne2.x) + abs(caserne.y-caserne2.y))**2
return S/len(regiment)
"""def moyenne_distance_entre_caserne(regiment):
M = []
if len(regiment) == 1:
return 1 #A REDEFIIR
else:
for caserne in regiment:
for caserne2 in regiment:
if caserne != caserne2 :
M += [(abs(caserne.x-caserne2.x) + abs(caserne.y-caserne2.y))**2]
return moyenne(M)"""
"""def aire_superpose(regiment,R):
air_inutile = 0
for caserne in regiment:
for caserne2 in regiment:
if caserne != caserne2:
dx = abs(caserne.x - caserne2.x)
dy = abs(caserne.y - caserne2.y)
if dx < R and dy < R:
air_inutile += R**2 - dx*R - dy*(R-dx)
return air_inutile/len(regiment)"""
def best_regiment():
R = (N+1)/n
meilleur_regiment_de_caserne = [caserne() for i in range(n)]
meilleur_moyenne_distance_regiment = moyenne_distance_regiment(meilleur_regiment_de_caserne,Ville)
#meilleur_air_occupe = aire_superpose(meilleur_regiment_de_caserne,R)
meilleur_distance_moyenne_entre_regiment = distance_moyenne_entre_regiment(meilleur_regiment_de_caserne)
meilleur_rapport = meilleur_moyenne_distance_regiment/meilleur_moyenne_distance_regiment
gen = 1000
for i in range(gen):
regiment = [caserne() for i in range(n)]
try:
moyenne_distance_regiment_ = moyenne_distance_regiment(regiment,Ville)
distance_moyenne_entre_regiment_ = distance_moyenne_entre_regiment(regiment)
#aire_superpose_ = aire_superpose(regiment,R)
rapport = moyenne_distance_regiment_/distance_moyenne_entre_regiment_
if rapport < meilleur_rapport:
meilleur_rapport = rapport
meilleur_regiment_de_caserne = regiment
meilleur_moyenne_distance_regiment = moyenne_distance_regiment_
#meilleur_air_occupe = aire_superpose_
meilleur_distance_moyenne_entre_regiment = distance_moyenne_entre_regiment_
#print(meilleur_rapport,rapport,distance_moyenne_entre_regiment_,moyenne_distance_regiment_)
except ZeroDivisionError as err:
pass
return meilleur_regiment_de_caserne
############################################################################
fig, ax = plt.subplots()
densite = [[0 for i in range(N)] for j in range(N)] #ville
for i in range(100):
regiment = best_regiment()
print(i)
for caserne_ in regiment:
i = caserne_.x
j = caserne_.y
densite[i][j] += 1
plt.imshow(densite, extent=[0,N,0,N], aspect="auto")
plt.title("taille " + str(N) + " et " + str(n) + " casernes")
plt.colorbar()
plt.show()
"""for i in range(10):
print(i)
for caserne in :
i = caserne.y
j = caserne.x
#Densite[i][j] += 1
fig, ax = plt.subplots()
ax.scatter(i, j, edgecolors='none')
plt.show()""" | [
"noreply@github.com"
] | noreply@github.com |
ac560db83517e5922bce87d453c58192843352b6 | 0e822323071d972c4a2a8b3c4e778a9144b2b5b4 | /tests/unit/extractor/restapi/test_rest_api_extractor.py | f1fd97f5a3e6752c6f85a56e4adba19bbfa509b4 | [
"Apache-2.0"
] | permissive | duyet/amundsendatabuilder | f3bed53c93d19bfda5ae7df1bd456214e442012f | 21a763add3c00c34b4f4c2d9809f59e50fb264c8 | refs/heads/master | 2023-04-09T05:08:07.816951 | 2020-07-01T16:34:56 | 2020-07-01T16:34:56 | 277,763,943 | 0 | 0 | Apache-2.0 | 2023-04-03T23:05:59 | 2020-07-07T08:43:44 | null | UTF-8 | Python | false | false | 1,746 | py | import unittest
from pyhocon import ConfigFactory # noqa: F401
from databuilder.extractor.restapi.rest_api_extractor import RestAPIExtractor, REST_API_QUERY, MODEL_CLASS, \
STATIC_RECORD_DICT
from databuilder.models.dashboard.dashboard_metadata import DashboardMetadata
from databuilder.rest_api.base_rest_api_query import RestApiQuerySeed
class TestRestAPIExtractor(unittest.TestCase):
def test_static_data(self):
# type: (...) -> None
conf = ConfigFactory.from_dict(
{
REST_API_QUERY: RestApiQuerySeed(seed_record=[{'foo': 'bar'}]),
STATIC_RECORD_DICT: {'john': 'doe'}
}
)
extractor = RestAPIExtractor()
extractor.init(conf=conf)
record = extractor.extract()
expected = {'foo': 'bar', 'john': 'doe'}
self.assertDictEqual(expected, record)
def test_model_construction(self):
conf = ConfigFactory.from_dict(
{
REST_API_QUERY: RestApiQuerySeed(
seed_record=[{'dashboard_group': 'foo',
'dashboard_name': 'bar',
'description': 'john',
'dashboard_group_description': 'doe'}]),
MODEL_CLASS: 'databuilder.models.dashboard.dashboard_metadata.DashboardMetadata',
}
)
extractor = RestAPIExtractor()
extractor.init(conf=conf)
record = extractor.extract()
expected = DashboardMetadata(dashboard_group='foo', dashboard_name='bar', description='john',
dashboard_group_description='doe')
self.assertEqual(expected.__repr__(), record.__repr__())
| [
"noreply@github.com"
] | noreply@github.com |
ac52931b05105e9c8f60e324e6789180322f2fba | 54f14725bd2d8192583f20136293b6604e781641 | /store_io/migrations/0013_auto_20190508_2241.py | 86055527086063123e5b55de8366e0bf547f6be7 | [] | no_license | ViktorPetreski/StoreIO | 92e093514b8d05f7093c9dddc4e10b60b8e46150 | 3fd59c223efe5e094af7df27ac8b2d554cfeb2b6 | refs/heads/master | 2022-11-22T19:47:02.734528 | 2019-12-24T17:31:10 | 2019-12-24T17:31:10 | 229,988,248 | 0 | 0 | null | 2022-11-22T03:59:43 | 2019-12-24T18:29:07 | Python | UTF-8 | Python | false | false | 530 | py | # Generated by Django 2.2 on 2019-05-08 20:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store_io', '0012_auto_20190507_1529'),
]
operations = [
migrations.RemoveField(
model_name='storeproductquantity',
name='product',
),
migrations.AddField(
model_name='storeproductquantity',
name='product',
field=models.ManyToManyField(to='store_io.Product'),
),
]
| [
"vpetreski96@gmail.com"
] | vpetreski96@gmail.com |
e24667856f38a278bffc70cfe8227a4e542ff4b2 | 059c5275226104883e2665fde2a40303f864203c | /utils/metrics.py | 2f626f66127c0a7752cac7f22565eae0f4e340c2 | [
"MIT"
] | permissive | marcelo-santos-12/lbp_paper | e54f873720c48100c5685f6a794b553e2af29ecd | 56d2457dce2c97a16de9e034b1a87ef0ceb9446a | refs/heads/main | 2023-03-05T05:06:48.498206 | 2021-02-21T17:36:34 | 2021-02-21T17:36:34 | 340,678,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,419 | py | '''
Modulo que implementa o GridSearch
'''
import numpy as np
import os
from itertools import product
from sklearn.metrics import (roc_curve, f1_score, auc,
accuracy_score, matthews_corrcoef,
confusion_matrix)
from sklearn.metrics._plot.base import _get_response
from sklearn.base import clone
from sklearn.neural_network import MLPClassifier
from sklearn.utils import shuffle
class MyGridSearch():
def __init__(self, classifier, grid_parameters):
self.classifier = classifier
self.grid_parameters = grid_parameters
self.all_parameter_comb = self._get_all_parameter_comb()
def fit(self, x_train, y_train, x_test, y_test):
self.results = {}
self.results['best_matthews'] = -1
is_mlp = isinstance(self.classifier, MLPClassifier)
if is_mlp:
x_train, y_train = shuffle(x_train, y_train, random_state=100)
for (i, parameters_i) in enumerate(self.all_parameter_comb):
if is_mlp and len(parameters_i)==5:
parameters_i[0] = (parameters_i[0], parameters_i[1])
parameters_i.pop(1)
parameters_to_clf = dict(zip(self.grid_parameters.keys(), parameters_i))
clf_i = clone(self.classifier)
clf_i.set_params(**parameters_to_clf)
clf_i = clf_i.fit(X=x_train, y=y_train)
y_pred = clf_i.predict(x_test)
matthews = matthews_corrcoef(y_test, y_pred)
if self.results['best_matthews'] < matthews:
y_pred_roc, _ = _get_response(x_test, clf_i, 'auto', pos_label=None)
fpr, tpr, _ = roc_curve(y_test, y_pred_roc, pos_label=None, sample_weight=None, drop_intermediate=True)
self.results['best_matthews'] = matthews
self.results['f1score'] = f1_score(y_test, y_pred)
self.results['auc'] = auc(fpr, tpr)
self.results['accuracy'] = accuracy_score(y_test, y_pred)
self.results['confusion_matrix'] = confusion_matrix(y_test, y_pred)
self.results['best_parameter'] = dict(zip(self.grid_parameters.keys(), parameters_i))
self.results['best_clf'] = clf_i
return self.results
def _get_all_parameter_comb(self,):
list_comb = []
for i, k in enumerate(self.grid_parameters.keys()):
if i == 0:
list_comb = self.grid_parameters[k]
continue
list_comb = [(x,y) for x, y in product(list_comb, self.grid_parameters[k])]
def formated(tuple_arr):
def just_list(_list):
if isinstance(_list, (list, tuple)):
return [sub_elem for elem in _list for sub_elem in just_list(elem)]
else:
return [_list]
_list_formated = []
for _list in tuple_arr:
_list_formated.extend(just_list(_list))
return _list_formated
return list(map(formated, list_comb))
if __name__ == '__main__':
mlp_parameters = {
'hidden_layer_sizes': [(5, 5), (10, 10), (20, 20)],
'solver': ['adam', 'sgd'],
'activation': ['relu', 'identity', 'logistic', 'tanh'],
'max_iter': [50, 100, 200]
}
gs = MyGridSearch(classifier=(MLPClassifier()), grid_parameters=mlp_parameters)
| [
"marcelo@debian-marcelo.debian"
] | marcelo@debian-marcelo.debian |
5911a202cf08140b09bab95aaf2d07223a3a2e73 | 07889fa5be5aab8fbdb7eacb35bcaf51b550c9f9 | /refactor_scripts/manufacturer_prefix.py | 8287382fcfd6d9fcea2fec1620ea5bfd722a5a7f | [
"Apache-2.0"
] | permissive | rcbuild-info/scrape | 5615d8ffca9b03a7998c708203940a2b2aca7e00 | 88376e26b6465546996e71a78843c91cd05961d3 | refs/heads/master | 2020-12-29T01:30:26.537906 | 2015-11-23T06:08:02 | 2015-11-23T06:08:02 | 35,204,077 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | #!/usr/bin/python
import os
import os.path
import sys
import json
def stripManufacturerPrefix(manufacturer, destinations, test = False):
correct = manufacturer
if ":" in manufacturer:
s = manufacturer.split(":")
manufacturer = s[0]
correct = s[1]
for destination in destinations:
if not os.path.isfile(destination) or os.path.islink(destination):
continue
with open(destination, "r") as f:
part = json.load(f)
if not part["name"].startswith(manufacturer):
continue
part["name"] = part["name"][len(manufacturer):].strip()
part.update({"manufacturer": correct})
new_destination = part["manufacturer"].replace(" ", "-") + "/" + part["name"].replace(" ", "-").replace("/", "-") + ".json"
if not test:
with open(new_destination, "w") as f:
f.write(json.dumps(part, indent=1, sort_keys=True, separators=(',', ': ')))
os.remove(destination)
else:
print(new_destination)
print(part)
print
if __name__ == "__main__":
manufacturer = sys.argv[1].decode("utf-8")
destinations = sys.argv[2:]
stripManufacturerPrefix(manufacturer, destinations)
| [
"scott.shawcroft@gmail.com"
] | scott.shawcroft@gmail.com |
80fd86660bc0bee2c434abb54b4cbfa9ef7f9be7 | dbfc8ca4dbdef6002b0738dd4c30d569eb9e36c3 | /imetadata/database/base/dml/__init__.py | a6be46c1871dc8b7d257b2735654be33a8be0ce4 | [] | no_license | GISdeveloper2017/imetadata | da32e35215cc024a2e5d244ee8afc375c296550d | 58516401a054ff0d25bfb244810a37838c4c8cf6 | refs/heads/master | 2023-03-26T06:38:28.721553 | 2021-03-06T09:32:06 | 2021-03-06T09:32:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | # -*- coding: utf-8 -*-
# @Time : 2020/11/23 21:11
# @Author : 王西亚
# @File : __init__.py.py
| [
"wangxiya@me.com"
] | wangxiya@me.com |
9de698aabcd24e0d8e7b125ea53adbb5167b3d8b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02394/s366374910.py | 34d6b2cc6782e5002623f9419f9f8a358a2dd94e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | w, h, x, y, r = map(int, input().split())
if 0 <= (x-r) and (x+r) <= w and 0 <= (y-r) and (y+r) <= h:
print("Yes")
else:
print("No")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
95f93a3b553112932f685d0062512d329d20bfb9 | 4b40a5126bc5728282542bf7b2fd70d5425433c7 | /rovorweb/rovorweb/settings_shared.py | d83da6afeebfe641084704acfa444ecadbcb74ee | [] | no_license | DerHabicht/RedROVOR | 9d33bc1ac568169bea5c8b3a8ee4209b65b94413 | 17a19dea7c3033b9adaec614983e703f5bc9e8ba | refs/heads/master | 2020-03-09T09:38:11.873128 | 2013-08-14T04:20:18 | 2013-08-14T04:20:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,918 | py | # Django settings for Rovor project.
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__)) #get the directory of the project, up two levels
ADMINS = (
('ROVOR-admin', ''),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Denver'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT,'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT,'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
#ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'rovorweb.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'rovorweb.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'icons_mimetypes',
'dirmanage',
'root',
'accounts',
'reduction',
'obs_database',
'targets',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(levelname)s %(asctime)s %(pathname)s $(lineno)d] %(message)s',
'datefmt':'%d/%b/%Y %H:%M:%S',
},
'standard': {
'format': '[%(levelname)s %(asctime)s %(module)s] %(message)s',
'datefmt':'%d/%b/%Y %H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters' : {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter':'standard',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins','console'],
'level': 'ERROR',
'propagate': True,
},
'Rovor': {
'handlers':['console'],
'level': 'DEBUG',
}
}
}
#login settings
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
| [
"astrothayne@gmail.com"
] | astrothayne@gmail.com |
c52ffa502f70e56cd01b3beb435cf4271d9274d0 | 3fe3da509f1899d7ae5116c6ee1cafb22699e21d | /naivebayes.py | 24caa0dfc5007700464fc7fa3c8c57a8cb762f63 | [] | no_license | tinawu-23/binary-classification-models | 64757cc03024bf617598ecd22e362aa33198f2e8 | e4f64b61baa6332aec328f73b233ce4d079cea8e | refs/heads/master | 2020-03-30T00:15:29.534837 | 2018-09-27T18:17:28 | 2018-09-27T18:17:28 | 150,513,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py | #!/usr/bin/env python3
import pandas as pd
# read training data
df = pd.read_csv('Dataset-football-train.txt', sep='\t', lineterminator='\n')
df_features = df[['Is_Home_or_Away','Is_Opponent_in_AP25_Preseason','Media']]
# P(Lose) P(lose)
l,w = df.groupby('Label').size()
pl = l / (l+w)
pw = w / (l+w)
# P(feature)
featuredict = {}
for feature in df_features:
featuredict[feature] = df.groupby([feature,'Label']).size()
# read testing data
df_test = pd.read_csv('Dataset-football-test.txt', sep='\t', lineterminator='\n')
perdictiondict = {}
print('Perdiction Results: ')
for index,row in df_test.iterrows():
lose = win = 0
i = 0
for feature in df_features:
if i==0:
lose = featuredict[feature][row[feature]]['Lose']
win = featuredict[feature][row[feature]]['Win']
else:
try:
lose *= featuredict[feature][row[feature]]['Lose']
except:
lose = 0
try:
win *= featuredict[feature][row[feature]]['Win']
except:
win = 0
i += 1
perdiction = 'Win' if (lose*pl/(l**3)) < (win*pw/(w**3)) else 'Lose'
print("ID {}: {}".format(row['ID'],perdiction))
perdictiondict[row['ID']] = perdiction
# Accuracy, Precision, Recall, F1 score calculation
print('\nModel Evaluation: ')
TP = FP = TN = FN = 0
for index,row in df_test.iterrows():
if perdictiondict[row['ID']] == 'Win' and row['Label'] == 'Win':
TP += 1
elif perdictiondict[row['ID']] == 'Win' and row['Label'] == 'Lose':
FP += 1
elif perdictiondict[row['ID']] == 'Lose' and row['Label'] == 'Lose':
TN += 1
elif perdictiondict[row['ID']] == 'Lose' and row['Label'] == 'Win':
FN += 1
accuracy = (TP+TN)/(TP+FP+TN+FN)
precision = TP/(TP+FP)
recall = TP/(TP+FN)
F1 = 2*precision*recall / (precision+recall)
print("Accuracy: {}\nPrecision: {}\nRecall: {}\nF1 Score: {}".format(round(accuracy,3),round(precision,3),round(recall,3),round(F1,3)))
| [
"tinawu.nd23@gmail.com"
] | tinawu.nd23@gmail.com |
4821c274eca3dedfa79f027f183fccf8e8699689 | a7a0dc5d5e6f8e61cd9729dcb79ee191edf4ee4b | /Week-1/AplusB.py | edd08978f5416aac11d4e295965ac481138d49af | [] | no_license | bvchand/algorithmic-toolbox | 857904bf6c14b5f3f5f3d33a739ed33b9a6985e2 | 935211ea22b1d429190a26fec95516691a358760 | refs/heads/main | 2023-01-06T15:35:24.635505 | 2020-11-04T21:13:58 | 2020-11-04T21:13:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | #python3
# a, b = input().split()
a = input().split()
print(a)
# s = int(a) + int(b)
# print(s) | [
"65008811+bvchand@users.noreply.github.com"
] | 65008811+bvchand@users.noreply.github.com |
8ba3ca416a5d385c1158274f46e71ad3750148eb | e7af30370e277b459e1c49edcc0562d5b5c32abc | /Learning_ScikitLearn/Model/Linear_Classification/LogisticRegression_Classification.py | 68bb53cef0d25d1f7959af186211991c7beda251 | [] | no_license | justgolikeme/My_MachineLearning | 208ab766478662cf36ffa7f9202fed0ad6f0ad28 | 948a84684a2a6f1c9e613948ed246062468016bd | refs/heads/master | 2022-05-13T05:02:48.488269 | 2020-01-03T07:27:50 | 2020-01-03T07:27:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,023 | py | # -*- coding: utf-8 -*-
# @Time : 2019/12/16 15:55
# @Author : Mr.Lin
'''
用于分类的线性模型
线性模型也广泛应用于分类问题。我们首先来看二分类。这时可以利用下面的公式进行
预测:
ŷ = w[0] * x[0] + w[1] * x[1] + …+ w[p] * x[p] + b > 0
这个公式看起来与线性回归的公式非常相似,但我们没有返回特征的加权求和,而是为预
测设置了阈值(0)。如果函数值小于 0,我们就预测类别 -1;如果函数值大于 0,我们就
预测类别 +1。对于所有用于分类的线性模型,这个预测规则都是通用的。同样,有很多种
不同的方法来找出系数(w)和截距(b)。
对于用于回归的线性模型,输出 ŷ 是特征的线性函数,是直线、平面或超平面(对于更高
维的数据集)。对于用于分类的线性模型,决策边界是输入的线性函数。换句话说,(二
元)线性分类器是利用直线、平面或超平面来分开两个类别的分类器。本节我们将看到这
方面的例子。
学习线性模型有很多种算法。这些算法的区别在于以下两点:
• 系数和截距的特定组合对训练数据拟合好坏的度量方法;
• 是否使用正则化,以及使用哪种正则化方法。
不同的算法使用不同的方法来度量“对训练集拟合好坏”。由于数学上的技术原因,不可
能调节 w 和 b 使得算法产生的误分类数量最少。对于我们的目的,以及对于许多应用而
言,上面第一点(称为损失函数)的选择并不重要。
最常见的两种线性分类算法是 Logistic 回归(logistic regression)和线性支持向量机(linear
support vector machine,线性 SVM),前者在 linear_model.LogisticRegression 中实现,
后者在 svm.LinearSVC (SVC 代表支持向量分类器)中实现。虽然 LogisticRegression
的名字中含有回归(regression),但它是一种分类算法,并不是回归算法,不应与
LinearRegression 混淆。
'''
from sklearn.cross_validation import cross_val_predict, cross_val_score
from sklearn.linear_model import LogisticRegression
from Learning_ScikitLearn.Model.Linear_Classification.Data_Source import X_test,X_train,y_train,y_test,data_y,data_X
# logreg = LogisticRegression().fit(X_train, y_train)
# print("Training set score: {:.3f}".format(logreg.score(X_train, y_train)))
# print("Test set score: {:.3f}".format(logreg.score(X_test, y_test)))
# Training set score: 0.955
# Test set score: 0.958
#
# [0.94827586 0.9137931 0.92982456 0.94736842 0.96491228 0.98245614
# 0.94736842 0.94642857 0.96428571 0.96428571]
# print("")
# print(cross_val_score(logreg, data_X, data_y, cv=10))
def test_C_Parameter():
C = [0.1,1,10]
for c in C:
logreg = LogisticRegression(C=c)
logreg.fit(X_train,y_train)
print("C为:{}下的分数:{}\n".format(c,cross_val_score(logreg, data_X, data_y, cv=10)))
test_C_Parameter()
| [
"2669093302@qq.com"
] | 2669093302@qq.com |
98e5d278e93ebbab8df9de68cc59178ebac7ddb8 | e04d81ee1b4b3f7b768677fa3c75d96e0188dd7a | /Delaunay_Triangulation/Old_Stuff/PointSet.py | f1d2197a634990a54c85b320c850d9e12d0e9344 | [] | no_license | tanneryilmaz/Numerical_Simulations | e82ce5b357c2701dc0c790a60ad04fb0e50332d1 | 5d79b3c6397e0d1877948144af5affc9c19cf95e | refs/heads/master | 2020-05-15T10:47:03.679806 | 2019-04-28T20:50:49 | 2019-04-28T20:50:49 | 168,756,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import numpy as np
def generate_rand_points(dimension_amplitudes, num_points):
num_dimensions = len(dimension_amplitudes)
points = np.random.rand(num_points, num_dimensions)
#this loop scales each dimension
for i in range(num_dimensions):
points[:, i] *= dimension_amplitudes[i]
return points
| [
"tannerisyilmaz@gmail.com"
] | tannerisyilmaz@gmail.com |
7ad47d35b8b6d618120876ea81cee10cd4498f0f | 329b48089c64ebefe78d52f1c71c73bdadadd4b4 | /ML/m02_3_xor.py | f054586220c6b7e9e2f5ec6da088dfde56b25a5d | [] | no_license | variablejun/keras__R | 7f854570952ed97c48715047015786d873e512cb | 9faf4814b46cda1ac0ddbf2a2f8236fa0394f144 | refs/heads/main | 2023-07-13T19:32:25.950500 | 2021-08-22T18:26:52 | 2021-08-22T18:26:52 | 398,870,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from sklearn.svm import LinearSVC
import numpy as np
from sklearn.metrics import accuracy_score
#1 data
x_data = [[0,0],[0,1],[1,0],[1,1]]
y_data = [0,1,1,0]
#2 model
model = LinearSVC()
#3 fit
model.fit(x_data,y_data)
#4 평가
y_predict = model.predict(x_data)
print(x_data,' 의 예측값 : ',y_predict)
results= model.score(x_data, y_data)
print('score : ',results)
acc = accuracy_score(y_data,y_predict)
print('acc : ',acc)
'''
[[0, 0], [0, 1], [1, 0], [1, 1]] 의 예측값 : [0 0 0 0]
score : 0.5
acc : 0.5
[[0, 0], [0, 1], [1, 0], [1, 1]] 의 예측값 : [1 1 1 1]
score : 0.5
acc : 0.5
''' | [
"crescendo0217@gmail.com"
] | crescendo0217@gmail.com |
8d17d63a7dd29b178320358d7e33a6579f81462f | 747008e6f3cc4c3a84f5324dcbe3bb633e2ad429 | /ticket_management/doctype/schedular_events/test_schedular_events.py | cb172b90641e14269427fa980fa740df6cc66621 | [] | no_license | anulakshmiv/ticket_management | f711301251bcdf5644dd3da3d33d2888cd671277 | 6f9f415151c3024aeb9b04819101911291c1fd0a | refs/heads/master | 2023-01-03T12:22:44.087365 | 2020-10-27T04:47:57 | 2020-10-27T04:47:57 | 306,605,397 | 0 | 0 | null | 2020-10-27T06:11:39 | 2020-10-23T10:39:20 | Python | UTF-8 | Python | false | false | 237 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Momscode Technologies Pvt.Ltd and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestSchedularEvents(unittest.TestCase):
pass
| [
"anulakshmiv123@gmail.com"
] | anulakshmiv123@gmail.com |
2660b0414dcfc91fedbe970131fbcd2163477d60 | 637baf7ba30ea3ee8f318163a784c46797d424e6 | /third_party/FasterRCNN/FasterRCNN/utils/box_ops.py | ecaa19e0ba82ee8f2eada2318a6783a351982165 | [
"Apache-2.0"
] | permissive | strongwolf/ssl_detection | 8ad9e32d389727a7e3d8e488fad2f01ad874a61b | c60809ab0bbfcf3c3695b0be44dd4c7ac36b2dc8 | refs/heads/master | 2022-11-03T17:30:52.495216 | 2020-06-17T09:41:38 | 2020-06-17T09:41:38 | 272,935,568 | 0 | 0 | Apache-2.0 | 2020-06-17T09:41:54 | 2020-06-17T09:41:54 | null | UTF-8 | Python | false | false | 1,952 | py | # -*- coding: utf-8 -*-
# File: box_ops.py
import tensorflow as tf
from tensorpack.tfutils.scope_utils import under_name_scope
"""This file is modified from https://github.com/tensorflow/models/blob/master/object_detection/core/box_list_ops.py"""
@under_name_scope()
def area(boxes):
"""
Args:
boxes: nx4 floatbox
Returns:
n
"""
x_min, y_min, x_max, y_max = tf.split(boxes, 4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
@under_name_scope()
def pairwise_intersection(boxlist1, boxlist2):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
@under_name_scope()
def pairwise_iou(boxlist1, boxlist2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
intersections = pairwise_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0), tf.zeros_like(intersections),
tf.truediv(intersections, unions))
| [
"zizhaoz@google.com"
] | zizhaoz@google.com |
706738bb2921c057b3622f10b82c723cd8f877d7 | b69b69edcf3c2ef4ffed200d6982cfe0ce9678c8 | /1_elonvsjeff/resize.py | 6eef8a783a19a28709147fdf5366ee30946eaea3 | [] | no_license | Roboramv2/Python-games | a21525d10bdf0d7ac75fdc93b0dec5461114ca67 | c5c60ca2d331d4801f668a49a029092eaa0d14e9 | refs/heads/main | 2023-07-14T18:11:03.945349 | 2021-08-20T04:33:46 | 2021-08-20T04:33:46 | 381,820,187 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import cv2
name = './assets/desboss.png'
new = cv2.imread(name)
new = cv2.resize(new, (64, 64))
cv2.imwrite('./assets/desboss.png', new) | [
"sriramkraja@gmail.com"
] | sriramkraja@gmail.com |
2dbd988ce6fdc9cdd5bc1c7f8d948b0ac97b8516 | 469fad5f90e8d11c018fde9a7a6c44014a16f37e | /etl_part | 437d4060131923a1060058895e6280c5a894d828 | [] | no_license | andrewlin0/ETL_Pipeline_Part_DS3002 | bb59fc08a827f6b56801754375ca1e4e02f75d29 | 7dd7570a4f26afb30e842b0849c7a40b78cdb7fe | refs/heads/main | 2023-04-15T01:05:07.593146 | 2021-04-29T15:08:18 | 2021-04-29T15:08:18 | 356,676,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,031 | #!/usr/bin/python3
import sys
import getopt
import csv
import json
import requests
import pandas as pd
import kaggle
import os
# Operation 1: Fetching from remote
def fetch_from_kaggle(user, title, folder_name):
try:
# Authenticate to use Kaggle API
kaggle.api.authenticate()
# Download the dataset from Kaggle and put it into folder_name
kaggle.api.dataset_download_files(user + "/" + title, path=folder_name, unzip=True)
except:
print("Invalid username and/or dataset title")
# Operation 2a: CSV -> TSV
def csv_to_tsv(csv_path, out_tsv):
try:
# CSV -> TSV
csv.writer(open(out_tsv, 'w+', encoding = 'utf-8'), delimiter='\t').writerows(csv.reader(open(csv_path)))
except:
print("Could not make into TSV")
# Operation 2b: CSV -> JSON
def csv_to_json(csv_path, json_path):
jsonArray = []
with open(csv_path, encoding = 'utf-8') as csvf:
# Load csv file data using dictionary reader
csvReader = csv.DictReader(csvf)
# Convert each csv row into python dictionary
for row in csvReader:
jsonArray.append(row)
# Convert Python jsonArray to JSON String and write to file
with open(json_path, "w", encoding = 'utf-8') as jsonf:
jsonString = json.dumps(jsonArray, indent = 4)
jsonf.write(jsonString)
# Operation 3: Brief Summary
def numrows_cols(csv_path):
# Make the csv into a pandas dataframe
df = pd.read_csv(csv_path)
# Get number of rows
rows = df.shape[0]
# Get number of columns
cols = df.shape[1]
print("This file has " + str(rows) + " rows and " + str(cols) + " columns.")
def main(argv):
try:
opts, args = getopt.getopt(argv, "hi:o:", ["ifile="])
except getopt.GetoptError:
print("etl_part -i")
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print("convert -i <inputid>")
sys.exit()
elif opt in ("-i", "--ifile"):
user = argv[1]
title = argv[2]
folder_name = argv[3]
check1 = 0
check2 = 0
check3 = 0
try:
print("User is:", user)
check1 = 1
except:
print("Enter a Username")
try:
print("Title is:", title)
check2 = 1
except:
print("Enter the title of the data located in the URL")
try:
print("Folder name is:", folder_name)
check3 = 1
except:
print("Enter a name for the folder that will be created for the data to be stored in")
if check1 == 1 and check2 == 1 and check3 == 1:
# Fetch the data
fetch_from_kaggle(user, title, folder_name)
# Change the directory to the folder the user inputted
os.chdir(folder_name)
# Get the list of files read in
in_csv_files = os.listdir()
# If there are multiple csv files in the download, we can take care of them by iterating through each one
for i in range(len(in_csv_files)):
if in_csv_files[i][-4:] == ".csv":
in_csv_files[i] = in_csv_files[i].replace('.csv', "")
out_tsv = in_csv_files[i] + ".tsv"
out_json = in_csv_files[i] + ".json"
in_csv = in_csv_files[i] + ".csv"
# CSV-> TSV
csv_to_tsv(in_csv, out_tsv)
# CSV -> JSON
try:
json_path = out_json
csv_to_json(in_csv, json_path)
except:
print("Possible UnicodeDecodeError, this data cannot be made into JSON format. Try another dataset for JSON.")
numrows_cols(in_csv)
else:
print(in_csv_files[i], "is not a .csv file. These transformations are only applicable to .csv files.")
else:
print("You did not enter values for all the fields (username, title, folder_name)")
if __name__ == "__main__":
main(sys.argv[1:]) | [
"noreply@github.com"
] | noreply@github.com | |
e62d3fc22e691fd90a2e493aa90f107c9561d19d | 0d7d08cd2cfa8f3330c12d87e00eaba0563bbffc | /demo/yuanliang/facebook/fetchCampaign.py | f380fc31401509e17a9eb434dd1a98b423bf2240 | [] | no_license | tanmillet/py-ws | b1997f15a2f202dfcad04c8f6506c1754586bc19 | 177d4007abe26e63d1d58bbc624164fd86d382f9 | refs/heads/master | 2020-03-27T17:07:08.209291 | 2019-01-01T06:45:54 | 2019-01-01T06:45:54 | 146,829,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,634 | py | from facebook_business.adobjects.campaign import Campaign
from facebook_business.adobjects.adsinsights import AdsInsights
from datetime import datetime
from publicConfig import BasicConfig as cfg
class GetCampaigns():
def __init__(self, adaccount, model):
self.adaccount = adaccount
# self.dbconfig = cfg.dbconfig
self.model = model
self.yesterday, self.today, self.time_range = cfg.get_time_range()
def fetch_campaigns(self):
try:
campaigns = self.adaccount.get_campaigns(fields=[
Campaign.Field.name,
Campaign.Field.account_id,
Campaign.Field.id,
Campaign.Field.status,
Campaign.Field.objective,
Campaign.Field.effective_status,
Campaign.Field.created_time,
Campaign.Field.updated_time])
# params={Campaign.Field.effective_status:
# [AdSet.Status.archived, AdSet.Status.active,
# AdSet.Status.paused]})
except Exception as e:
print(e)
recent_campaigns = list()
#current_hour = str(datetime.now().hour)
current_hour = '01'
last_hour = str(datetime.now().hour - 1)
for campaign in campaigns:
updated_dt = campaign['updated_time'][:10]
updated_hour = campaign['updated_time'][11:13]
if (updated_dt == self.yesterday or updated_dt == self.today)\
and updated_hour in (current_hour, last_hour, '23'):
recent_campaigns.append(campaign)
else:
break
return recent_campaigns
def handle_campaign(self, campaign):
result = dict()
result['name'] = campaign.get('name', '').replace("'", '\"')
result['facebook_account_id'] = campaign.get('account_id', '')
result['facebook_ad_campaign_id'] = campaign.get('id', '')
result['status'] = campaign.get('status', '')
result['objective'] = campaign.get('objective', '')
result['effective_status'] = campaign.get('effective_status', '')
result['campaign_created_time'] = campaign.get('created_time', '')
result['updated_time'] = campaign.get('updated_time', '')
result['created_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(result)
self.model.replace_into_campaign_basic(result)
def fetch_campaign_data(self, campaign_id):
campaign = Campaign(campaign_id)
campaign_datas = campaign.get_insights(fields=[
AdsInsights.Field.campaign_id,
AdsInsights.Field.campaign_name,
AdsInsights.Field.spend,
AdsInsights.Field.impressions,
AdsInsights.Field.unique_actions,
AdsInsights.Field.reach],
params={
'breakdowns':
['hourly_stats_aggregated_by_audience_time_zone'],
'time_range': self.time_range})
return campaign_datas
def handle_campaign_data(self, campaign_id):
# try:
campaign_datas = self.fetch_campaign_data(campaign_id)
'''
except Exception as e:
error_info = '获取广告系列:{} 统计数据发生错误'.format(campaign_id)
print(error_info)
'''
# self.model.delete_from_campaign_stat(campaign_id)
for campaign_data in campaign_datas:
result = dict()
result['spend'] = campaign_data.get('spend', '0')
result['impression_count'] = campaign_data.get('impressions', '0')
result['reach_count'] = campaign_data.get('reach', '0')
result['ad_compaign_id'] = campaign_data.get('campaign_id', '0')
result['stat_dt'] = campaign_data.get('date_start', '0')
result['stat_hour'] = campaign_data['hourly_stats_aggregated_by_audience_time_zone'][0:2]
result['create_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
actions = campaign_data.get('unique_actions', '')
if actions:
for action in actions:
if action['action_type'] == 'link_click':
result['link_click_count'] = action['value']
if action['action_type'] == 'offsite_conversion.fb_pixel_purchase':
result['effective_count'] = action['value']
if action['action_type'] == '':
result['add_to_cart'] = action['value']
print(result)
self.model.insert_into_campaignstat(result)
| [
"tanchongtao@stosz.com"
] | tanchongtao@stosz.com |
e2f80ae63c842ab915e70054164ea7ef16f417b2 | 15fb62305a2fa0146cc84b289642cc01a8407aab | /Python/119-pascalTriangle2.py | ca82b9b5ce299755fd88d42d79285542b566e463 | [] | no_license | geniousisme/leetCode | ec9bc91864cbe7520b085bdab0db67539d3627bd | 6e12d67e4ab2d197d588b65c1ddb1f9c52a7e047 | refs/heads/master | 2016-09-09T23:34:03.522079 | 2015-09-23T16:15:05 | 2015-09-23T16:15:05 | 32,052,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from math import factorial
class Solution:
# @param {integer} rowIndex
# @return {integer[]}
def getRow(self, rowIndex):
res = []
f = factorial
n = rowIndex
for k in xrange(rowIndex + 1):
res.append(f(n) / f(k) / f(n - k))
return res
if __name__ == '__main__':
s = Solution()
for i in xrange(10):
print s.getRow(i)
| [
"chia-hao.hsu@aiesec.net"
] | chia-hao.hsu@aiesec.net |
9d79f133ae46df0a2a814949bc56bb9b67709332 | 92754bb891a128687f3fbc48a312aded752b6bcd | /Algorithms/Python3.x/836-Rectangle_Overlap.py | 109710852b3db1879f46f641e56714e64efbeca6 | [] | no_license | daidai21/Leetcode | ddecaf0ffbc66604a464c3c9751f35f3abe5e7e5 | eb726b3411ed11e2bd00fee02dc41b77f35f2632 | refs/heads/master | 2023-03-24T21:13:31.128127 | 2023-03-08T16:11:43 | 2023-03-08T16:11:43 | 167,968,602 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | # Runtime: 32 ms, faster than 89.53% of Python3 online submissions for Rectangle Overlap.
# Memory Usage: 13.9 MB, less than 8.33% of Python3 online submissions for Rectangle Overlap.
class Solution:
def isRectangleOverlap(self, rec1: List[int], rec2: List[int]) -> bool:
return rec1[0] < rec2[2] and rec2[0] < rec1[2] and rec1[1] < rec2[3] and rec2[1] < rec1[3]
"""
(left1, right1), (left2, right2)
Meet the requirements of the topic Equivalent to :
left1 < x < right1 && left2 < x < right2
left1 < x < right2 && left2 < x < right1
left1 < right2 && left2 < right1
"""
| [
"daidai4269@aliyun.com"
] | daidai4269@aliyun.com |
797df30fb9b58c8d6901190b1bb85519f459c80e | 7b85069a6088991ab15f558696bd008acb20364d | /plotting/ttbarCR_DataVsMC.py | 7e5d09da0efc2294e85c13e1124f95f6af0047c7 | [] | no_license | patrickbryant/XhhCommon | e6235f5ff09d3aa95137f22d556323b42061711b | 791b9964a20bfc05253810793c08b76d908e0046 | refs/heads/master | 2020-04-20T22:30:45.516683 | 2019-02-04T20:15:38 | 2019-02-04T20:15:38 | 169,142,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,886 | py | # Calculate e from different inputs
import ROOT
import array
binDict = {}
binDict["Pt"] = [125,150,175,200,225,250,300,400,700]
binDict["Eta"] = [-2.5,-1.5,-1.0,-0.5,0,0.5,1.0,1.5,2.5]
binDict["leadJet_Pt_m"] = [0,100,125,150,175,200,300,400]
binDict["sublJet_Pt_m"] = [0,40,60,80,100,150,400]
def parseOptions():
from optparse import OptionParser
from collections import OrderedDict
p = OptionParser()
p.add_option('--data', type = 'string', default = "", dest = 'inputDataFile', help = 'input Data File' )
p.add_option('--mc', type = 'string', default = "", dest = 'inputMCFile', help = 'input MC File' )
p.add_option('--out', type = 'string', default = "", dest = 'outFileName', help = 'output File' )
(o,a) = p.parse_args()
return (o,a)
def getNum(infile,histName):
hist = infile.Get(histName)
return hist.GetEntries()
def getHist(infile,histName):
hist = infile.Get(histName)
for v in binDict.keys():
if histName.find(v) != -1:
bins = binDict[v]
xBins = array.array("d", bins)
histNew = hist.Rebin(len(bins) - 1, hist.GetName()+"_rebinned", xBins)
return histNew
hist.Rebin(4)
return hist
def printLine(oFile,line):
print line
oFile.write(line+"\n")
def pNum(num):
return str(int(num))
def pFloat(num,rNum):
return str(round(float(num),rNum))
def makeTables(bNum, aNum):
outTxtFileC3 = open(o.outFileName.replace(".root","_DataMC_c3.tex"),"w")
printLine(outTxtFileC3, "\\begin{tabular}{ c | c | c }")
printLine(outTxtFileC3, "Observable & Before $m_h$ & After $m_h$ (100-140 GeV) \\\\")
printLine(outTxtFileC3, "\hline")
printLine(outTxtFileC3, "$N_{P}$ & "+pNum(bNum["Np"])+" & "+pNum(aNum["Np"])+" \\\\")
printLine(outTxtFileC3, "$N_{F}$ & "+pNum(bNum["Nf"])+" & "+pNum(aNum["Nf"])+" \\\\")
printLine(outTxtFileC3, "$\epsilon$ from Data & "+pFloat(bNum["eff_c3_Data" ],3)+" $\pm$ "+pFloat(bNum["eff_c3_Data_Err" ],4)+" & "+pFloat(aNum["eff_c3_Data"],3)+" $\pm$ "+pFloat(aNum["eff_c3_Data_Err"],3)+" \\\\")
printLine(outTxtFileC3, "\hline")
printLine(outTxtFileC3, "$\epsilon$ from MC & "+pFloat(bNum["eff_c3_MC" ],3)+" $\pm$ "+pFloat(bNum["eff_c3_MC_Err" ],4)+" & "+pFloat(aNum["eff_c3_MC"],3)+" $\pm$ "+pFloat(aNum["eff_c3_MC_Err"],3)+" \\\\")
printLine(outTxtFileC3, "\end{tabular}")
def doInclusive(regName,hMod,lMod, dataFile, mcFile):
print
print
print "=========================="
print regName
print "=========================="
Np_MC = getNum(mcFile,"PtLepTop_mub100PassTTVeto"+lMod+"/nbjets")
Nf_MC = getNum(mcFile,"PtLepTop_mub100FailTTVeto"+lMod+"/nbjets")
NlepTot_MC = Np_MC+Nf_MC
print "\tNp_MC:",Np_MC
print "\tNf_MC:",Nf_MC
c3_MC = Np_MC/(NlepTot_MC)
eff_c3_MC_Err = pow( (c3_MC * (1-c3_MC)) /NlepTot_MC ,0.5)
print "\tc3_MC:",c3_MC
print "\t\teff_MC:",c3_MC,"+/-",eff_c3_MC_Err
Np_Data = getNum(dataFile,"PtLepTop_mub100PassTTVeto"+lMod+"/nbjets")
Nf_Data = getNum(dataFile,"PtLepTop_mub100FailTTVeto"+lMod+"/nbjets")
NlepTot_Data = Np_Data+Nf_Data
print "\tNp_Data:",Np_Data
print "\tNf_Data:",Nf_Data
c3_Data = Np_Data/(NlepTot_Data)
eff_c3_Data_Err = pow( (c3_Data * (1-c3_Data)) /NlepTot_Data ,0.5)
print "\tc3_Data:",c3_Data
print "\t\teff_Data:",c3_Data,"+/-",eff_c3_Data_Err
return {"Np":Np_Data, "Nf":Nf_Data,
"eff_c3_Data":c3_Data, "eff_c3_Data_Err":eff_c3_Data_Err,
"eff_c3_MC":c3_MC, "eff_c3_MC_Err":eff_c3_MC_Err,
}
def doVar(name,hCandName,varName,hMod,lMod):
print
print
print "=========================="
print varName
print "=========================="
outFile.cd()
thisDir = outFile.mkdir(name+"_"+varName)
thisDir.cd()
Np_MC = getHist(inFileMC,"PtLepTop_mub100PassTTVeto"+lMod+"/hCand_"+varName)
Np_MC.SetName("Np_MC")
Nf_MC = getHist(inFileMC,"PtLepTop_mub100FailTTVeto"+lMod+"/hCand_"+varName)
Nf_MC.SetName("Nf")
NlepTot_MC = ROOT.TH1F(Np_MC)
NlepTot_MC.Add(Nf_MC)
NlepTot_MC.SetName("NlepTot_MC")
Np_MC.Write()
Nf_MC.Write()
NlepTot_MC.Write()
c3_MC = ROOT.TH1F(Np_MC)
c3_MC.Sumw2()
c3_MC.Divide(NlepTot_MC)
c3_MC.SetName("c3_MC")
c3_MC.Write()
eff_c3_MC = ROOT.TH1F(c3_MC)
eff_c3_MC.SetName("eff_c3_MC")
eff_c3_MC.Write()
#
# Data
#
Np_Data = getHist(inFileData,"PtLepTop_mub100PassTTVeto"+lMod+"/hCand_"+varName)
Np_Data.SetName("Np")
Nf_Data = getHist(inFileData,"PtLepTop_mub100FailTTVeto"+lMod+"/hCand_"+varName)
Nf_Data.SetName("Nf")
NlepTot_Data = ROOT.TH1F(Np_Data)
NlepTot_Data.Add(Nf_Data)
NlepTot_Data.SetName("NlepTot_Data")
Np_Data.Write()
Nf_Data.Write()
NlepTot_Data.Write()
c3_Data = ROOT.TH1F(Np_Data)
c3_Data.Sumw2()
c3_Data.Divide(NlepTot_Data)
c3_Data.SetName("c3_Data")
c3_Data.Write()
eff_c3_Data = ROOT.TH1F(c3_Data)
eff_c3_Data.SetName("eff_c3_Data")
eff_c3_Data.Write()
return
def main(inFileData, inFileMC, outFile):
vars = ["Pt","Eta","mW","mTop","Xtt","dRjj","Mass","leadJet_Pt_m","sublJet_Pt_m"]
for v in vars:
doVar("BeforeMh","hCand_",v,"Inclusive","")
doVar("AfterMh", "hCand_",v,"Signal" ,"_PassMh")
BeforeNums = doInclusive("BeforeMh","Inclusive", "" , inFileData, inFileMC)
AfterNums = doInclusive("AfterMh" ,"Signal", "_PassMh", inFileData, inFileMC)
makeTables(BeforeNums, AfterNums)
if __name__ == "__main__":
o,a = parseOptions()
inFileData = ROOT.TFile(o.inputDataFile,"READ")
inFileMC = ROOT.TFile(o.inputMCFile, "READ")
outFile = ROOT.TFile(o.outFileName,"RECREATE")
main(inFileData, inFileMC, outFile)
| [
"patrickbryant@uchicago.edu"
] | patrickbryant@uchicago.edu |
51a4ec40ea713aa652fab9745f8e256f3a957ce0 | b2e342b8161a8ff02ab26b53ce42e3ca01118fad | /mathematics/combinatorics/ncr_table.py | 74eed4404a9b3663f17ab6bdf68a58f23e576d69 | [] | no_license | Eppie/hackerrank | b62541184ed919c80f647afbc89d6f428fdaed82 | d6e44170d1ec6668e5c00df5dd613295d8154d2e | refs/heads/master | 2020-06-12T10:42:53.672658 | 2016-12-12T04:28:34 | 2016-12-12T04:28:34 | 75,587,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | def choose(n, k):
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
for _ in range(int(input())):
n = int(input())
c = ' '.join([str(choose(n, k) % 10**9) for k in range(n + 1)])
print(c)
| [
"aepstein@udel.edu"
] | aepstein@udel.edu |
bfb211f64cb26ced576000456975b8ac4e62ba43 | dab869acd10a3dc76e2a924e24b6a4dffe0a875f | /Laban/build/bdist.win32/winexe/temp/numpy.core.operand_flag_tests.py | abe53bfc427cda30a4fdef6d870c6ffe58b6c013 | [] | no_license | ranBernstein/Laban | d82aff9b0483dd007e03a06e51f7d635f62ed05d | 54c88afa9493deacbdd182904cc5d180ecb208b4 | refs/heads/master | 2021-01-23T13:17:51.777880 | 2017-02-14T09:02:54 | 2017-02-14T09:02:54 | 25,508,010 | 3 | 1 | null | 2017-02-14T09:02:55 | 2014-10-21T07:16:01 | Tcl | UTF-8 | Python | false | false | 379 | py |
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'numpy.core.operand_flag_tests.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
| [
"bernstein.ran@gmail.com"
] | bernstein.ran@gmail.com |
187bd878dceba2676d7199c3b1f337090f34e18f | 883977455c283fad7786f828fa7267da0a03bedd | /src/helpers/errorHandler.py | 7b9d21d22a5997fb8bac2b15aa6179b581c29c38 | [] | no_license | DavEst91/IH-W6-Chat-Sentiment-Analysis-service | 3c8f143a2a2f0aa876bec2e3ae16cdf5b5f42205 | 77c14c3acb2d8be865550730da38a640c869f06c | refs/heads/master | 2022-05-26T12:42:53.088457 | 2020-05-04T09:21:24 | 2020-05-04T09:21:24 | 260,218,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class APIError(Exception):
statusCode = 500
class Error404(Exception):
statusCode = 404
def errorHandler(fn):
def wrapper(*args,**kwargs):
try:
return fn(*args,**kwargs)
except Exception as e:
print(e)
return {
"status":"error",
"message":str(e)
}, e.statusCode
wrapper.__name__ = fn.__name__
return wrapper | [
"davidestebanmendoza@gmail.com"
] | davidestebanmendoza@gmail.com |
6c1293f5580f3019be940ec4e5b0a47a4f2148e7 | 3581255131ff134fe77a11703b417623d6de1086 | /Undo_Redo-Stack.py | 065cfc0d192ebd8d14ad9fdb039fd1401dd8e37e | [] | no_license | hemant110800/Python-Data-structure-Applications-implementation- | 3452acb474ae80e35b8b7e89849978fb2f1f74fb | 59715ee599bcaffbd4adff2e3eff8e9eb3fabc23 | refs/heads/master | 2022-04-15T11:45:04.138165 | 2020-04-11T10:40:38 | 2020-04-11T10:40:38 | 254,845,052 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,478 | py | class Stack:
def __init__(self,max_size):
self.__max_size=max_size
self.__elements=[None]*self.__max_size
self.__top=-1
def is_full(self):
if(self.__top==self.__max_size-1):
return True
return False
def is_empty(self):
if(self.__top==-1):
return True
return False
def push(self,data):
if(self.is_full()):
print("The stack is full!!")
else:
self.__top+=1
self.__elements[self.__top]=data
def pop(self):
if(self.is_empty()):
print("The stack is empty!!")
else:
data= self.__elements[self.__top]
self.__top-=1
return data
def display(self):
if(self.is_empty()):
print("The stack is empty")
else:
index=self.__top
while(index>=0):
print(self.__elements[index])
index-=1
def get_max_size(self):
return self.__max_size
#You can use the below __str__() to print the elements of the DS object while debugging
def __str__(self):
msg=[]
index=self.__top
while(index>=0):
msg.append((str)(self.__elements[index]))
index-=1
msg=" ".join(msg)
msg="Stack data(Top to Bottom): "+msg
return msg
def remove():
global clipboard,undo_stack
data=clipboard[len(clipboard)-1]
clipboard.remove(data)
undo_stack.push(data)
print("Remove:",clipboard)
def undo():
global clipboard,undo_stack,redo_stack
if(undo_stack.is_empty()):
print("There is no data to undo")
else:
data=undo_stack.pop()
clipboard.append(data)
redo_stack.push(data)
print("Undo:",clipboard)
def redo():
global clipboard, undo_stack,redo_stack
if(redo_stack.is_empty()):
print("There is no data to redo")
else:
data=redo_stack.pop()
if(data not in clipboard):
print("There is no data to redo")
redo_stack.push(data)
else:
clipboard.remove(data)
undo_stack.push(data)
print("Redo:",clipboard)
clipboard=["A","B","C","D","E","F"]
undo_stack=Stack(len(clipboard))
redo_stack=Stack(len(clipboard))
remove()
undo()
redo() | [
"noreply@github.com"
] | noreply@github.com |
b417569a701914b13050a450dfb4e9d8d98231f5 | 59cdb8b3995ee5938dc4710e32f29ac273410265 | /firing_analyses/unit_response_characterization/gather_psths.py | a170cd2c1a9f45a1da9d668f911611f166dfb215 | [] | no_license | abuzarmahmood/firing_space_plot | 15ff667fada8f4e985a6a6c6f31261b72b0f4b60 | 9fe925d9b443fda96d8e23d6d2d2d2aa60b08f15 | refs/heads/master | 2023-07-25T01:39:31.942434 | 2023-07-15T14:24:38 | 2023-07-15T14:24:38 | 139,602,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,390 | py | """
Go through all specified files and generate PSTHs
for GC and BLA neurons to save in a consolidated location
For each neuron, also calculate discriminability and palatability correlation
"""
########################################
# ___ _
#|_ _|_ __ ___ _ __ ___ _ __| |_
# | || '_ ` _ \| '_ \ / _ \| '__| __|
# | || | | | | | |_) | (_) | | | |_
#|___|_| |_| |_| .__/ \___/|_| \__|
# |_|
########################################
import os
import sys
import pylab as plt
import numpy as np
import argparse
from glob import glob
import json
import pandas as pd
import pingouin as pg
from joblib import Parallel, delayed, cpu_count
from tqdm import tqdm,trange
from scipy.stats import spearmanr, pearsonr
sys.path.append('/media/bigdata/firing_space_plot/ephys_data')
from ephys_data import ephys_data
import visualize
def parallelize(func, iterator):
return Parallel(n_jobs = cpu_count()-2)\
(delayed(func)(this_iter) for this_iter in tqdm(iterator))
############################################################
# _ _ ____ _
#| | ___ __ _ __| | | _ \ __ _| |_ __ _
#| | / _ \ / _` |/ _` | | | | |/ _` | __/ _` |
#| |__| (_) | (_| | (_| | | |_| | (_| | || (_| |
#|_____\___/ \__,_|\__,_| |____/ \__,_|\__\__,_|
############################################################
file_list_path = '/media/bigdata/Abuzar_Data/hdf5_file_list.txt'
plot_save_dir = '/media/bigdata/Abuzar_Data/all_overlay_psths'
if not os.path.exists(plot_save_dir):
os.makedirs(plot_save_dir)
region_name_list = ['gc','bla']
region_plot_dirs = [os.path.join(plot_save_dir,this_name) \
for this_name in region_name_list]
for this_plot_dir in region_plot_dirs:
if not os.path.exists(this_plot_dir):
os.makedirs(this_plot_dir)
def get_plot_dir(region_name):
return [plot_dir for name,plot_dir \
in zip(region_name_list,region_plot_dirs)\
if region_name == name ][0]
counter_list = [0,0]
def add_to_counter(region_name):
ind = [num for num,this_name \
in enumerate(region_name_list)\
if region_name == this_name][0]
current_count = counter_list[ind]
counter_list[ind] +=1
return current_count
#parser = argparse.ArgumentParser(description = 'Script to fit changepoint model')
#parser.add_argument('dir_name', help = 'Directory containing data files')
#parser.add_argument('states', type = int, help = 'Number of States to fit')
#args = parser.parse_args()
#data_dir = args.dir_name
taste_names = ['nacl', 'suc', 'ca', 'qhcl']
pal_map = dict(zip(taste_names, [3,4,2,1]))
with open(file_list_path,'r') as this_file:
file_list = this_file.read().splitlines()
dir_list = [os.path.dirname(x) for x in file_list]
#dir_list = [x for x in dir_list if 'bla_gc'in x]
wanted_sessions = ['AM34_4Tastes_201215', 'AM37_4Tastes_210112']
dir_list = [[x for x in dir_list if y in x] for y in wanted_sessions]
dir_list = [x for y in dir_list for x in y]
#For each file, calculate baks firing, split by region
# and save PSTH in a folder with file name and
# unit details
alpha = 0.05
#black_list = [
# '/media/storage/gc_only/AS18/AS18_4Tastes_200228_151511_copy/AS18_4Tastes_200228_151511'
# ]
#dir_list = [x for x in dir_list if x not in black_list]
#dir_list = [x for x in dir_list if 'AM34' in x]
#for data_dir in dir_list:
for ind in trange(len(dir_list)):
#for ind in trange(53, len(dir_list)):
data_dir = dir_list[ind]
#data_dir = os.path.dirname(file_list[0])
#data_dir = '/media/bigdata/Abuzar_Data/AM28/AM28_2Tastes_201005_134840'
data_basename = os.path.basename(data_dir)
# Look for info file
# If absent, skip this file because we won't know tastant names
info_file_path = glob(os.path.join(data_dir,"*.info"))
if len(info_file_path) == 0:
continue
with open(info_file_path[0], 'r') as params_file:
info_dict = json.load(params_file)
taste_names = info_dict['taste_params']['tastes']
taste_pals = np.array([pal_map[x] for x in taste_names])
dat = ephys_data(data_dir)
# Try to get spikes, if can't, skip file
try:
dat.get_spikes()
except:
continue
if not dat.spikes[0].shape[-1]==7000:
continue
dat.firing_rate_params = dat.default_firing_params
dat.firing_rate_params['type'] = 'conv'
dat.get_unit_descriptors()
dat.get_region_units()
dat.get_firing_rates()
unit_region_map = [{x:region_name for x in this_region} \
for this_region,region_name \
in zip(dat.region_units, dat.region_names)]
fin_unit_map = {}
for x in unit_region_map:
fin_unit_map.update(x)
# For each neuron, calculate disciriminability per bin
inds = np.array(list(np.ndindex(dat.firing_array.shape)))
firing_frame = pd.DataFrame(
dict(
taste = inds[:,0],
neurons = inds[:,1],
trials = inds[:,2],
bins = inds[:,3],
firing = dat.firing_array.flatten()
)
)
group_keys = ['neurons','bins']
grouped_frame = list(firing_frame.groupby(group_keys))
group_tuples = [x[0] for x in grouped_frame]
group_tuple_dicts = [dict(zip(group_keys, x)) for x in group_tuples]
group_dat = [x[1] for x in grouped_frame]
anova_lambda = lambda x : \
pg.anova(data=x, dv = 'firing', between = 'taste')['p-unc'].values[0]
p_vals = parallelize(anova_lambda, group_dat)
# It seems like sometimes the anova conks out
# Replace any strings with int(1)
p_vals = [x if isinstance(x, np.float) else 1 for x in p_vals]
discrim_frame = pd.DataFrame(group_tuple_dicts)
discrim_frame['discrim_p_vals'] = p_vals
discrim_frame['discrim_bool'] = (discrim_frame['discrim_p_vals'] < alpha )*1
# Conservative criterion, significance has to persist for 75ms otherwise toss
# This is from 3 consecutive windows of firing rate with 25ms steps
kern_len = 4
box_kern = np.ones(kern_len)/kern_len
discrim_frame['discrim_bool_cons'] = \
np.convolve(discrim_frame['discrim_bool'], box_kern, mode = 'same') == 1
discrim_frame['discrim_bool_cons'] *= 1
discrim_frame['p_vals_conv'] = \
np.convolve(discrim_frame['discrim_p_vals'], box_kern, mode = 'same')
# Also calculate palatability correlation for sinle neurons
taste_pal_broad = np.expand_dims(taste_pals, (1,2,3))
taste_pal_broad = np.broadcast_to(taste_pal_broad,
dat.firing_array.shape)
firing_array = dat.firing_array.copy()
#firing_array = np.moveaxis(firing_array, 1,2)
#firing_array = np.reshape(firing_array, (-1, *firing_array.shape[2:]))
#taste_pal_broad = np.moveaxis(taste_pal_broad, 1,2)
#taste_pal_broad = np.reshape(taste_pal_broad, (-1, *taste_pal_broad.shape[2:]))
#firing_array = firing_array.T
#taste_pal_broad = taste_pal_broad.T
iter_inds = list(np.ndindex((
firing_array.shape[1],
firing_array.shape[-1])))
corr_lambda = lambda inds: \
pearsonr( firing_array[:,inds[0],:,inds[1]].flatten(),
taste_pal_broad[:,inds[0],:,inds[1]].flatten()
)
corr_outs = parallelize(corr_lambda, iter_inds)
corr_pvals = [x[1] for x in corr_outs]
corr_rhos = [np.abs(x[0]) for x in corr_outs]
iter_array = np.array(iter_inds)
corr_frame = pd.DataFrame(
dict(
neurons = iter_array[:,0],
bins = iter_array[:,1],
corr_pvals = corr_pvals,
corr_rhos = corr_rhos
)
)
corr_frame['pvals_cons'] = \
np.convolve(corr_frame['corr_pvals'], box_kern, mode = 'same')
corr_frame['sig_bool'] = corr_frame['pvals_cons'] <= alpha
#corr_array = corr_frame.pivot(
# index = 'neurons',
# columns = 'bins',
# values = 'corr_pvals').to_numpy()
#fig,ax = plt.subplots()
#ax.imshow(corr_array < alpha,
# interpolation = 'nearest', aspect='auto')
#fig.savefig(
# os.path.join(plot_save_dir, f'{data_basename}_corr.png'),
# dpi = 300)
#plt.close(fig)
############################################################
#fin_pval_frame = discrim_frame.join(corr_frame,
# lsuffix = 'x', rsuffix = 'y')
#fin_pval_frame.drop(columns = ['binsy','neuronsy'], inplace=True)
#fin_pval_frame.rename(columns = dict(neuronsx = 'neurons',
# binsx = 'bins'), inplace=True)
#fin_pval_frame['region'] = [fin_unit_map[x] for x in \
# fin_pval_frame.neurons.values]
#fin_pval_frame.to_json(
# os.path.join(plot_save_dir, f'{data_basename}_unit_pvals.json')
# )
#fin_pval_frame['time'] = (fin_pval_frame.bins * bin_width)-stim_t
############################################################
############################################################
stim_t = 2000
time_lims = [1000,5000]
time_vec = np.arange(dat.spikes[0].shape[-1])-stim_t
time_vec = time_vec[time_lims[0]:time_lims[1]]
if dat.firing_rate_params['type'] == 'baks':
bin_width = int(dat.firing_rate_params['baks_resolution']/\
dat.firing_rate_params['baks_dt'] )
else:
bin_width = int(dat.firing_rate_params['step_size'])
baks_time_vec = time_vec[::bin_width]
#fin_pval_frame = fin_pval_frame[fin_pval_frame.time.isin(baks_time_vec)]
corr_frame['time'] = (corr_frame.bins * bin_width)-stim_t
discrim_frame['time'] = (discrim_frame.bins * bin_width)-stim_t
discrim_frame = discrim_frame[discrim_frame.time.isin(baks_time_vec)]
corr_frame = corr_frame[corr_frame.time.isin(baks_time_vec)]
# Add region name
corr_frame['region'] = [fin_unit_map[x] for x in \
corr_frame.neurons.values]
discrim_frame['region'] = [fin_unit_map[x] for x in \
discrim_frame.neurons.values]
discrim_frame.to_json(
os.path.join(plot_save_dir, f'{data_basename}_discrim_frame.json')
)
corr_frame.to_json(
os.path.join(plot_save_dir, f'{data_basename}_corr_frame.json')
)
mean_firing = np.mean(dat.firing_array,axis=2)
mean_firing = mean_firing[...,time_lims[0]//bin_width:time_lims[1]//bin_width]
for this_region_name, this_unit_list in zip(dat.region_names,dat.region_units):
for unit_num in this_unit_list:
#unit_frame = fin_pval_frame[fin_pval_frame.neurons.isin([unit_num])]
unit_corr_frame = \
corr_frame[corr_frame.neurons.isin([unit_num])]
unit_sig_corr = unit_corr_frame[unit_corr_frame.corr_pvals < alpha]
unit_discrim_frame = \
discrim_frame[discrim_frame.neurons.isin([unit_num])]
unit_discrim_frame['bool'] = \
1*(unit_discrim_frame.discrim_p_vals < alpha)
#fig,ax = plt.subplots(3,1, sharex=True)
fig = plt.figure()
ax = []
ax.append(fig.add_subplot(2,1,1))
ax.append(fig.add_subplot(4,1,3))#, sharex = ax[0]))
ax.append(fig.add_subplot(4,1,4))#, sharex = ax[0]))
xlims = [-500, 1500]
xinds = np.logical_and(baks_time_vec >= xlims[0],
baks_time_vec <= xlims[1])
fin_time_vec = baks_time_vec[xinds]
unit_discrim_frame = unit_discrim_frame[unit_discrim_frame.time.isin(fin_time_vec)]
unit_corr_frame = unit_corr_frame[unit_corr_frame.time.isin(fin_time_vec)]
unit_sig_corr = unit_sig_corr[unit_sig_corr.time.isin(fin_time_vec)]
for taste_num,this_taste in enumerate(mean_firing[:,unit_num]):
ax[0].plot(fin_time_vec,
this_taste[xinds], label = taste_names[taste_num],
linewidth = 2)
#ax[0].legend()
fig.suptitle(os.path.basename(dat.data_dir) + \
f'\nUnit {unit_num}, '\
f'Electrode {dat.unit_descriptors[unit_num][0]}')#, '\
#f'Region : {this_region_name}')
ax[-1].set_xlabel('Time post-stimulus delivery (ms)')
ax[0].set_ylabel('Firing Rate (Hz)')
#ax[0].set_xlim([-500, 1500])
#ax[1].set_xlim([-500, 1500])
#ax[2].set_xlim([-500, 1500])
cmap = plt.get_cmap('binary')
#ax[1].plot(unit_discrim_frame.time, unit_discrim_frame['bool'])
ax[1].plot(unit_discrim_frame.time,
unit_discrim_frame['discrim_bool_cons'],
color = cmap(0.5))
ax[1].fill_between(
x = unit_discrim_frame.time,
y1 = unit_discrim_frame['discrim_bool_cons'],
y2 = 0,
alpha = 0.7,
color = cmap(0.5))
#ax[1].plot(unit_discrim_frame.time,
# np.log10(unit_discrim_frame['p_vals_conv']))
#ax[1].axhline(np.log10(0.05))
ax[1].set_ylabel('Discrim sig')
ax[2].plot(unit_corr_frame.time, unit_corr_frame.corr_rhos,
color = cmap(0.7))
ax[2].fill_between(
x = unit_corr_frame.time,
y1 = unit_corr_frame['corr_rhos'],
#where = unit_corr_frame['corr_pvals'] <= 0.05,
where = unit_corr_frame['sig_bool'],
y2 = 0,
alpha = 0.7,
color = cmap(0.7))
#ax[2].plot(unit_sig_corr.time, unit_sig_corr.corr_rhos, 'x')
ax[2].set_ylabel('Pal Corr sig')
#ax[0].tick_params(axis='x', which = 'both', bottom = False)
ax[0].set_xticklabels([])
ax[1].set_xticklabels([])
#plt.show()
fig.savefig(os.path.join(get_plot_dir(this_region_name),
f'{data_basename}_unit{add_to_counter(this_region_name)}' + '.svg'))
plt.close(fig)
| [
"abuzarmahmood@gmail.com"
] | abuzarmahmood@gmail.com |
a7556063e49aff2dda7e2b3cc964e43037048d34 | 6cb1d8f1416af7b7c5c83ab35cb6928ea9955aff | /ch07/rnnlm_gen.py | a30f1107227f403e0e15f919e3f9b09e39193409 | [] | no_license | lee-saint/practice-nlp | f68ccc3140f725f3edcd7048c324b847583b7f20 | 19003fcd5f55f4f110417a3950a32bb5fba1850c | refs/heads/master | 2020-12-01T20:05:15.014495 | 2020-01-21T09:22:18 | 2020-01-21T09:22:18 | 230,750,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | import numpy as np
from common.functions import softmax
from ch06.RNNLM import Rnnlm
from ch06.better_rnnlm import BetterRnnlm
from dataset import ptb
class RnnlmGen(Rnnlm):
def generate(self, start_id, skip_ids=None, sample_size=100):
word_ids = [start_id]
x = start_id
while len(word_ids) < sample_size:
x = np.array(x).reshape(1, 1)
score = self.predict(x)
p = softmax(score.flatten())
sampled = np.random.choice(len(p), size=1, p=p)
if (skip_ids is None) or (sampled not in skip_ids):
x = sampled
word_ids.append(int(x))
return word_ids
class BetterRnnlmGen(BetterRnnlm):
def generate(self, start_id, skip_ids=None, sample_size=100):
word_ids = [start_id]
x = start_id
while len(word_ids) < sample_size:
x = np.array(x).reshape(1, 1)
score = self.predict(x).flatten()
p = softmax(score).flatten()
sampled = np.random.choice(len(p), size=1, p=p)
if (skip_ids is None) or (sampled not in skip_ids):
x = sampled
word_ids.append(int(x))
return word_ids
if __name__ == '__main__':
corpus, word_to_id, id_to_word = ptb.load_data('train')
vocab_size = len(word_to_id)
corpus_size = len(corpus)
model = RnnlmGen()
model.load_params('../ch06/Rnnlm.pkl')
# 시작(start) 문자와 건너뜀(skip) 문자 설정
start_word = 'you'
start_id = word_to_id[start_word]
skip_words = ['N', '<unk>', '$']
skip_ids = [word_to_id[w] for w in skip_words]
# 문장 생성
word_ids = model.generate(start_id, skip_ids)
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print(txt)
better_model = BetterRnnlmGen()
better_model.load_params('../ch06/BetterRnnlm.pkl')
# 시작(start) 문자와 건너뜀(skip) 문자 설정
start_word = 'you'
start_id = word_to_id[start_word]
skip_words = ['N', '<unk>', '$']
skip_ids = [word_to_id[w] for w in skip_words]
# 문장 생성
word_ids = better_model.generate(start_id, skip_ids)
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print(txt)
better_model.reset_state()
model.reset_state()
start_words = 'the meaning of life is'
start_ids = [word_to_id[w] for w in start_words.split(' ')]
for x in start_ids[:-1]:
x = np.array(x).reshape(1, 1)
model.predict(x)
word_ids = model.generate(start_ids[-1], skip_ids)
word_ids = start_ids[:-1] + word_ids
txt = ' '.join([id_to_word[i] for i in word_ids])
txt = txt.replace(' <eos>', '.\n')
print('-' * 50)
print(txt)
| [
"plutorian131@gmail.com"
] | plutorian131@gmail.com |
e2a87d9fe95c8083628a16c93827ac0004484eeb | 1aa6d82ed86b45ed6d890df806ba2d4856b5799c | /create_lambda_function.py | 69d3dc1ae37d31f052ab91d0b5803cebc2188c08 | [] | no_license | SCEDC/pds-lambda-example | e7dd959af27948003e2c3ca7da07b6581601df03 | e6b83e7fbb7194aa4de37919676c658d1e91d641 | refs/heads/master | 2020-07-21T13:08:32.420625 | 2019-12-20T23:50:57 | 2019-12-20T23:50:57 | 206,875,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # In this example, we have a file named `credentials` in a `.aws` directory.
# e.g.:
# $ cat ~/.aws/credentials
# [lambda-invoker]
# aws_access_key_id = EXAMPLEACCESSKEY
# aws_secret_access_key = EXAMPLESECRETACCESSKEY
import boto3
# Auth to create a Lambda function (credentials are picked up from above .aws/credentials)
session = boto3.Session(profile_name='schen-gps')
# Make sure Lambda is running in the same region as the HST public dataset
client = session.client('lambda', region_name=AWS_REGION)
# Use boto to create a Lambda function.
# Role is created here: https://console.aws.amazon.com/iam/home?region=us-east-1#/home
# The Role needs to have the AWSLambdaFullAccess permission policies attached
# 'your-s3-bucket' is the S3 bucket you've uploaded the `venv.zip` file to
response = client.create_function(
FunctionName=LAMBDA_FUNCTION,
Runtime='python3.7',
Role=IAM_ROLE, # <- Update this with your IAM role name
Handler='process.handler',
Code={
'S3Bucket': LAMBDA_BUCKET, # <- this is the bucket which holds your venv.zip file
'S3Key': 'venv.zip'
},
Description=LAMBDA_DESCRIPTION,
Timeout=300,
MemorySize=1024,
Publish=True
)
| [
"schen@gps.caltech.edu"
] | schen@gps.caltech.edu |
cde96ba8bed0f8a27d9a27fc09c79f90b37b0093 | 4781d9293b59a5072647bb179195b143c60621bd | /백준/3190_뱀/3190_뱀.py | 466985fd6c7408c5d7d548c56da8c4c1f93da5da | [] | no_license | chriskwon96/Algorithm_codes | bf98131f66ca9c091fe63db68b220527800069c9 | edb7b803370e87493dad4a38ee858bb7bb3fd31d | refs/heads/master | 2023-08-15T18:48:26.809864 | 2021-10-12T13:43:21 | 2021-10-12T13:43:21 | 387,803,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | di = [0, -1, 0, +1]
dj = [+1, 0, -1, 0]
N = int(input())
matrix = [[0]*N for _ in range(N)]
K = int(input())
for _ in range(K): #사과위치 1로 지정
i, j = map(int, input().split())
matrix[i-1][j-1] = 1
L = int(input())
q = [(0,0)] #뱀 몸
X1, k, cnt = 0, 0, 0
flag = 1
for _ in range(L):
X, C = input().split()
for _ in range(int(X)-X1):
head = q[0]
cnt += 1
n_x, n_y = head[0]+di[k], head[1]+dj[k]
if 0<=n_x<N and 0<=n_y<N and ((n_x, n_y) not in q): #다음칸이 판 안에 있고 내 몸이 아니라면
q.insert(0, (n_x, n_y)) #머리좌표 q에 삽입
if matrix[n_x][n_y]: #사과라면
matrix[n_x][n_y] = 0 #사과 지워주기
else:
q.pop() #사과가 아니면 꼬리 줄여주기
else: # 게임이 끝나면
print(cnt)
flag = 0
break
if not flag:
break
X1 = int(X)
if C == 'L':
k = (k+1)%4
else:
k = (k-1)%4
if flag: #인풋을 다 받아도 끝나지 않았다면
head = q[0]
n_x, n_y = head[0]+di[k], head[1]+dj[k]
while 0<=n_x<N and 0<=n_y<N and ((n_x, n_y) not in q):
cnt += 1
q.insert(0, (n_x, n_y))
if matrix[n_x][n_y]: #사과라면
matrix[n_x][n_y] = 0 #사과 지워주기
else:
q.pop() #사과가 아니면 꼬리 줄여주기
n_x, n_y = n_x + di[k], n_y + dj[k]
print(cnt+1)
| [
"chriskwon96@naver.com"
] | chriskwon96@naver.com |
2229b47fc55272836e95e04921e5b28ecd842fee | 793bf54af217556ec77a706fd018e74f8d642f71 | /Recurrence_relation.py | 66aedef715ad021f9df95280438399ff073b64bc | [] | no_license | EmmaTrann/Recurrence-Relations | bf28814f62aee8c3c88a2d1c6dbd39954703c915 | 456e42d748834cb73d6870e29581d4ac4d5d1b2f | refs/heads/main | 2023-05-31T19:41:16.971340 | 2021-06-10T09:14:49 | 2021-06-10T09:14:49 | 373,629,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,128 | py | import numpy as np
import math
import matplotlib.pyplot as plt
#evaluateRecurrence
def evaluateRecurrence(myfunction, nvec):
value = myfunction(nvec)
return value
#Merge sort
def mergecost(n):
#Base case
if (n<2):
value = 0;
else:
value = 1 + 2*mergecost(n/2) +n
return value
#Tower of Hanoi
def Hanoicost(n):
#Base case
if (n<1):
value = 0
else:
value = 2*Hanoicost(n-1) + 1
return value
#T3
def T3cost(n):
#Base case
if (n<3):
value = 0
else:
value = 5*T3cost(n/3) +n
return value
#factorial
def factorialcost(n):
#base case
if (n<1):
value = 1
else:
value = n*factorialcost(n-1)
return value
def main():
arr_length = int(input("Enter size n for the problem: "))
nvec = np.zeros((arr_length,1))
for n in range(1,arr_length):
nvec[n] = n
#mergesort - numerical
merge_num = np.zeros((arr_length,1))
for n in range(1, arr_length):
merge_num[n] = evaluateRecurrence(mergecost, nvec[n])
#mergesort - analytical
merge_analytic = np.zeros((arr_length,1))
for n in range(1, arr_length):
merge_analytic[n] = n*math.log(n)*1.5
#mergesort plot
plt.figure()
plt.title("Merge Sort Plot")
plt.plot(nvec, merge_num, label = "Merge Sort Numerical")
plt.plot(nvec,merge_analytic, label = "Merge Sort Analytical")
plt.legend()
#Tower of Hanoi - numerical
Hanoi_num = np.zeros((arr_length,1))
for n in range(1, arr_length):
Hanoi_num[n] = evaluateRecurrence(Hanoicost, nvec[n])
#Tower of Hanoi - analytical
Hanoi_analytic = np.zeros((arr_length,1))
for n in range(1, arr_length):
Hanoi_analytic[n] = (2**n) * 1.2
#Tower of Hanoi plot
plt.figure()
plt.title("Tower of Hanoi")
plt.plot(nvec, Hanoi_num, label = "Tower of Hanoi Numerical")
plt.plot(nvec,Hanoi_analytic, label = "Tower of Hanoi Analytical")
plt.legend()
#T3 - numerical
T3_num = np.zeros((arr_length,1))
for n in range(1, arr_length):
T3_num[n] = evaluateRecurrence(T3cost, nvec[n])
#T3 - analytical
T3_analytic = np.zeros((arr_length,1))
for n in range(1, arr_length):
T3_analytic[n] = (n**1.46497)*1.2
#T3 plot
plt.figure()
plt.title("Problem T3(n)")
plt.plot(nvec, T3_num, label = "T3(n) Numerical")
plt.plot(nvec,T3_analytic, label = "T3(n) Analytical")
plt.legend()
#factorial - numerical
fac_num = np.zeros((arr_length,1))
for n in range(1, arr_length):
fac_num[n] = evaluateRecurrence(factorialcost, nvec[n])
#factorial - analytical
fac_analytic = np.zeros((arr_length,1))
for n in range(1, arr_length):
fac_analytic[n] = math.factorial(n) * 1.2
#factorial plot
plt.figure()
plt.title("Factorial")
plt.plot(nvec, fac_num, label = "Factorial Numerical")
plt.plot(nvec,fac_analytic, label = "Factorial Analytical")
plt.legend()
plt.show()
main()
| [
"noreply@github.com"
] | noreply@github.com |
a25c1018a88e0e7ecb8fa25ef9b010dfaddd4a57 | 5baac75b696f83144df384d405871184b5c29ddb | /Python/MyTestApp/Display_des.py | 0937c6907ac54d67a834623078016c1c3a9ba052 | [] | no_license | xz2275/Chad2 | bc424a85a7b0da9e023d2d03687b7e4fe1758e4c | 42b7b620ba06e7238a2fceddef7c54220145b0bd | refs/heads/master | 2020-05-17T08:27:26.073105 | 2013-05-07T15:50:13 | 2013-05-07T15:50:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | # ===============================
seriesID = 'GDPC1'
des_file = seriesID + '_des.txt'
# ================================
with open(des_file, 'r') as fr_des:
for line in fr_des:
print line
fr_des.closed
| [
"carolz1207@gmail.com"
] | carolz1207@gmail.com |
f01a840f95bb39e07a686de1c1808c42acd6b7d6 | b277ca06cb0c33635e31928a3643c85f67623af4 | /buildenv/lib/python3.5/site-packages/sphinx/ext/graphviz.py | 54659484351c1a12cbe419b1d165e5f7cb9d8724 | [
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] | permissive | angrycaptain19/container-camp | a3e5c9b9f130776c842032148fcdba094bc0da8f | b0b14fe30aee310cb3775c1491d5b6304173936b | refs/heads/master | 2023-03-12T18:04:13.700249 | 2021-03-01T23:02:30 | 2021-03-01T23:02:30 | 343,728,529 | 0 | 0 | NOASSERTION | 2021-03-02T10:30:35 | 2021-03-02T10:07:11 | null | UTF-8 | Python | false | false | 13,081 | py | # -*- coding: utf-8 -*-
"""
sphinx.ext.graphviz
~~~~~~~~~~~~~~~~~~~
Allow graphviz-formatted graphs to be included in Sphinx-generated
documents inline.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import codecs
import posixpath
from os import path
from subprocess import Popen, PIPE
from hashlib import sha1
from six import text_type
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.i18n import search_image_for_language
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
logger = logging.getLogger(__name__)
mapname_re = re.compile(r'<map id="(.*?)"')
class GraphvizError(SphinxError):
category = 'Graphviz error'
class graphviz(nodes.General, nodes.Inline, nodes.Element):
pass
def figure_wrapper(directive, node, caption):
# type: (Directive, nodes.Node, unicode) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
parsed = nodes.Element()
directive.state.nested_parse(ViewList([caption], source=''),
directive.content_offset, parsed)
caption_node = nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node += caption_node
return figure_node
def align_spec(argument):
# type: (Any) -> bool
return directives.choice(argument, ('left', 'center', 'right'))
class Graphviz(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
# type: () -> List[nodes.Node]
if self.arguments:
document = self.state.document
if self.content:
return [document.reporter.warning(
__('Graphviz directive cannot have both content and '
'a filename argument'), line=self.lineno)]
env = self.state.document.settings.env
argument = search_image_for_language(self.arguments[0], env)
rel_filename, filename = env.relfn2path(argument)
env.note_dependency(rel_filename)
try:
with codecs.open(filename, 'r', 'utf-8') as fp:
dotcode = fp.read()
except (IOError, OSError):
return [document.reporter.warning(
__('External Graphviz file %r not found or reading '
'it failed') % filename, line=self.lineno)]
else:
dotcode = '\n'.join(self.content)
if not dotcode.strip():
return [self.state_machine.reporter.warning(
__('Ignoring "graphviz" directive without content.'),
line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = {}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
class GraphvizSimple(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
# type: () -> List[nodes.Node]
node = graphviz()
node['code'] = '%s %s {\n%s\n}\n' % \
(self.name, self.arguments[0], '\n'.join(self.content))
node['options'] = {}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
def render_dot(self, code, options, format, prefix='graphviz'):
# type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
str(self.builder.config.graphviz_dot_args)).encode('utf-8')
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)
if path.isfile(outfn):
return relfn, outfn
if (hasattr(self.builder, '_graphviz_warned_dot') and
self.builder._graphviz_warned_dot.get(graphviz_dot)):
return None, None
ensuredir(path.dirname(outfn))
# graphviz expects UTF-8 by default
if isinstance(code, text_type):
code = code.encode('utf-8')
dot_args = [graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(['-T' + format, '-o' + outfn])
if format == 'png':
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
try:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
logger.warning(__('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting'), graphviz_dot)
if not hasattr(self.builder, '_graphviz_warned_dot'):
self.builder._graphviz_warned_dot = {}
self.builder._graphviz_warned_dot[graphviz_dot] = True
return None, None
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code)
except (OSError, IOError) as err:
if err.errno not in (EPIPE, EINVAL):
raise
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise GraphvizError(__('dot exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
if not path.isfile(outfn):
raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
return relfn, outfn
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
raise GraphvizError(__("graphviz_output_format must be one of 'png', "
"'svg', but is %r") % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError as exc:
logger.warning('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
imgcss = imgcls and 'class="%s"' % imgcls or ''
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
if format == 'svg':
svgtag = '''<object data="%s" type="image/svg+xml">
<p class="warning">%s</p></object>\n''' % (fname, alt)
self.body.append(svgtag)
else:
with open(outfn + '.map', 'rb') as mapfile:
imgmap = mapfile.readlines()
if len(imgmap) == 2:
# nothing in image map (the lines are <map> and </map>)
self.body.append('<img src="%s" alt="%s" %s/>\n' %
(fname, alt, imgcss))
else:
# has a map: get the name of the map and connect the parts
mapname = mapname_re.match(imgmap[0].decode('utf-8')).group(1) # type: ignore
self.body.append('<img src="%s" alt="%s" usemap="#%s" %s/>\n' %
(fname, alt, mapname, imgcss))
self.body.extend([item.decode('utf-8') for item in imgmap])
if 'align' in node:
self.body.append('</div>\n')
raise nodes.SkipNode
def html_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
logger.warning('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
is_inline = self.is_inline(node)
if not is_inline:
pre = ''
post = ''
if 'align' in node:
if node['align'] == 'left':
pre = '{'
post = r'\hspace*{\fill}}'
elif node['align'] == 'right':
pre = r'{\hspace*{\fill}'
post = '}'
elif node['align'] == 'center':
pre = r'{\hfill'
post = r'\hspace*{\fill}}'
self.body.append('\n%s' % pre)
self.body.append(r'\includegraphics{%s}' % fname)
if not is_inline:
self.body.append('%s\n' % post)
raise nodes.SkipNode
def latex_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
logger.warning('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
self.add_text(_('[graph]'))
raise nodes.SkipNode
def man_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
self.body.append(_('[graph]'))
raise nodes.SkipNode
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
texinfo=(texinfo_visit_graphviz, None),
text=(text_visit_graphviz, None),
man=(man_visit_graphviz, None))
app.add_directive('graphviz', Graphviz)
app.add_directive('graph', GraphvizSimple)
app.add_directive('digraph', GraphvizSimple)
app.add_config_value('graphviz_dot', 'dot', 'html')
app.add_config_value('graphviz_dot_args', [], 'html')
app.add_config_value('graphviz_output_format', 'png', 'html')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| [
"mmsprinkle@gmail.com"
] | mmsprinkle@gmail.com |
faa563cb2ff4a1d5e681f1747302a859b54b1a32 | 61a4d618f8b6b50863171fd52776ff6583ee5665 | /house lease/logic/temptest.py | 4c39c578687e988fd627c59e546cff5f7cd39e86 | [] | no_license | Dark-0-forest/house_lease_system | 231ce42678d3fd3620783c798301d5f79ec7f95a | b10fcd89f31deee84014990315d9db36b0aa3c94 | refs/heads/master | 2023-01-11T00:23:30.451356 | 2020-10-24T05:31:06 | 2020-10-24T05:31:06 | 306,812,969 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | # 需求:生成电话号码
# 流程:中国电信号段
# 中国移动号段
# 中国联通号段
# 11位
# 第一位 1
# 第二位 3,4,5,7,8
# 第三位 根据第二位确定
# 后八位随机数字
# 分析需求,先找已知的条件,确定出不变规律和变化规律
import random
# 生成电话号码
def creat_phone():
# 第二位
second = [3, 4, 5, 7, 8][random.randint(0, 4)]
# 第三位的值根据第二位来确定
# 数组条件依赖可以用字典来产生对应关系进而取值
# 例外可以看成条件肯定否定,用if语句
# 产生一个有复杂条件的字符串需要分类分区
# 依照条件产生后字符串拼接并以format进行格式条件链接
# 问题和知识点联系练习
# 程序自上向下执行
# 注意缩进
third = {3: random.randint(0, 9),
4: [5, 7, 9][random.randint(0, 2)],
5: [i for i in range(10) if i != 4][random.randint(0, 8)],
# 列表生成,然后选取一个
7: [i for i in range(10) if i not in [4, 9]][random.randint(0, 7)],
8: random.randint(0, 9)
}[second]
# 后八位随机抽取
suffix = ""
for x in range(8):
suffix = suffix + str(random.randint(0, 9))
# 0,1,2,,3,4
# 拼接
return "1{}{}{}".format(second, third, suffix)
# 调用
num = input("请输入生成的数量")
for index in range(0, int(num)):
print(creat_phone()) | [
"928774025@qq.com"
] | 928774025@qq.com |
0265fd3051bfb0f15f6c863117bc19a849af6ffb | 174b9087041b42f01098e9526ef813c4a4486dc7 | /time_stopwatch.py | 89094107d69c2947e722bc8212907d9b948faf68 | [] | no_license | Love-Kush-Tak/Automating-boring-stuff-with-python | ffe0512e92df3e225852a8f5a6ce412c4ddc2628 | 08d05888cdbe8f4c66f4ccd1d1f30ef23944861c | refs/heads/master | 2022-12-05T22:30:31.920297 | 2020-08-25T05:15:19 | 2020-08-25T05:15:19 | 290,118,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import time
#Display the program's instructions
print('Press Enter to begin. Afterwards, press wnter to "click" the stopwatch. Press CTRL-C to quit.')
input()
print('Started.')
startTime = time.time()
lastTime = startTime
lapNum=1
try:
while True:
input()
lapTime = round(time.time()-lastTime,2)
totalTime = round(time.time()-startTime,2)
print('Lap #%s: %s (%s)' %(lapNum, totalTime, lapTime), end='')
lapNum += 1
lastTime = time.time() # reset the last lap time
except KeyboardInterrupt:
print('\nDone')
| [
"noreply@github.com"
] | noreply@github.com |
88cb5d3304c964483d4bd13dd17272dd10a0dfaf | 036079adc57dd3ae6aea3a4b9f5e495d0f6b58d8 | /assign2.py | 2163325f780a2272d0fa65485b5a603da071c4c1 | [] | no_license | Spongebob19/MVS-repo | 5951c8ab8a9dbfc4b12d49ebe67825563c185ca9 | 7ba7c459ac38a579c2d40aee4cf3ff4060e69a3e | refs/heads/master | 2020-03-19T05:06:16.167390 | 2018-06-03T12:01:42 | 2018-06-03T12:01:42 | 135,900,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import random
guessesTaken = 0
print('Hello! What is your name?')
myName = input()
number = random.randint(1,10)
print('Well '+myName+', I am thinking of a number between 1 and 10.')
while guessesTaken < 6:
print('Take a guess.')
guess = input()
guess = int(guess)
guessesTaken = guessesTaken + 1
if guess < number:
print('Your guess is too low.')
if guess > number:
print('Your guess is too high.')
if guess == number:
break
if guess == number:
guessesTaken = str(guessesTaken)
print('Nice ! '+myName+' ! You guess my number in '+guessesTaken+' guesses!')
if guess != number:
number = str(number)
print('Sorry Dude, the number i was thonking of was '+number+'')
| [
"noreply@github.com"
] | noreply@github.com |
a27f0b2a1f277cfd24ad5fcbc93d846b911ddeab | fd97e31c33ce13a9f643378eed4a6f79858fddf4 | /classisapi/migrations/env.py | 27fedf22b8a5917bffa49994fe467ebe47fbb636 | [] | no_license | LearningData/classisapi | 493a653454883407bc872d94117f1af8f8a13bcc | 07cf6ccf7143dd488ea0c4080f9834ac49c0c6b2 | refs/heads/master | 2021-03-19T11:51:44.357876 | 2016-02-23T15:59:14 | 2016-02-23T15:59:14 | 50,593,452 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
from classisapi.admin import Base
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| [
"marius@learningdata.ie"
] | marius@learningdata.ie |
e5c94008a3438e1f9390edaf8fb7f885ef853107 | ffaea12d7162fc24e5f4c944d0f41f8abaaa6968 | /COPD/train.py | e7dafcc31719dce1cc3be0b27ee10375bbe49d4b | [] | no_license | liiiiiiiiil/DL | c5fedb62bb89917f4b16d7edec7c3e95e7bcba95 | 2dcbb0774680301c9fcb99d29ac6da2317cba528 | refs/heads/master | 2020-03-11T03:01:23.415533 | 2018-04-29T12:10:42 | 2018-04-29T12:10:42 | 129,734,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | import torch
import time
from utils.run_utils import *
import torch.nn as nn
def train(opt,train_loader,model,criterion,optimizer,epoch,losslist):
batch_time=AverageMeter()
data_time=AverageMeter()
losses=AverageMeter()
top1=AverageMeter()
model.train()
end=time.time()
for i,sample in enumerate(train_loader):
input=sample['image']
target=sample['label']
data_time.update(time.time()-end)
target=target.cuda(async=True)
input_var=torch.autograd.Variable(input).cuda()
target_var=torch.autograd.Variable(target)
if opt.half:
input_var=input_var.half()
output=model(input_var)
loss=criterion(output,target_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output=output.float()
result=accuracy(output.data,target)
losses.update(loss.data[0],input.size(0))
losslist.append(losses.val)
top1.update(result,input.size(0))
batch_time.update(time.time()-end)
end=time.time()
if i % opt.print_freq==0:
print('Epoch:[{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f}({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f}({data_time.avg:.3f})\t'
'Loss {loss.val:.4f}({loss.avg:.4f})\t'
'Accuracy{top1.val:.3f}({top1.avg:.3f})'
.format(epoch,i,len(train_loader),batch_time=batch_time,
data_time=data_time,
loss=losses,top1=top1))
| [
"944642079@qq.com"
] | 944642079@qq.com |
118f0a5fdddc6b68612616b058763a3a0a82276c | 5bedffe3af7a200678ecd8adb9c89aec3a5a9fe2 | /rango/migrations/0009_category_slug.py | bf82d4e20ea98467978704a944724a91a0cf3a10 | [] | no_license | wongj5/tangowithdjango | 95b2309e112766281f9b1e58578a986d79481079 | 416adc90d52be669f5c5412222053d2db99d48f6 | refs/heads/master | 2016-09-05T13:42:21.344511 | 2015-05-20T05:31:16 | 2015-05-20T05:31:16 | 35,659,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0008_remove_category_slug'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=0),
preserve_default=True,
),
]
| [
"wong.justin23@gmail.com"
] | wong.justin23@gmail.com |
dd7dd34d3c33d8d237f4413a2d98aab566ff7489 | 2b6a63dc91944c27d2e5a905516f8d6ab89bb487 | /pageobjects/productscreen.py | c5f43b4675881bc3d0c14b0d05df9bd53e4ac3a4 | [] | no_license | folajimia/Photonaijatestframework | 6f4474a7ee0f873c70b07e2eab02937f4f601138 | 95cce981f82b13a67d443bea89eba22213d30e9c | refs/heads/master | 2023-02-18T03:12:23.786997 | 2018-12-23T22:04:44 | 2018-12-23T22:04:44 | 150,261,253 | 0 | 0 | null | 2023-02-15T17:23:05 | 2018-09-25T12:25:31 | Python | UTF-8 | Python | false | false | 1,126 | py |
class ProductScreen:
def __init__(self, driver):
self.driver = driver
def test_services_screen_components(self):
home_screen = HomeScreen(self.driver)
home_screen.click_service_product_link()
product_screen = ProductScreen(self.driver)
product_screen.validate_title_is_present()
product_screen.validate_icon_is_present()
product_screen.validate_top_menu_is_present()
product_screen.validate_instagram_button_is_displayed()
# self.photo_book_design_image = WebDriverWait(self.driver.instance, 10).until(EC.visibility_of_element_located((By.XPATH,'//*[@id="block-system-main"]/div/div/div/div[1]/div[1]/div/div/a/img')))
# self.referral_service_image = WebDriverWait(self.driver.instance,10).until(EC.visibility_of_element_located((By.XPATH, '//*[@id="block-system-main"]/div/div/div/div[2]/div[1]/div/div/a/img')))
#def validate_photo_book_design_image(self):
# assert self.photo_book_design_image.is_displayed()
#def validate_referral_service_image(self):
# assert self.referral_service_image.is_displayed() | [
"jimi.adekoya@mheducation.com"
] | jimi.adekoya@mheducation.com |
4f656683d5847a47be56020aafa35e0d7f97d5a3 | bab6527375935fd306f6fe984dc663e6eb67cc77 | /venv/bin/easy_install-3.7 | 6c9edf7e170260972e35f1095f2118f6226cfb95 | [] | no_license | HenryM975/Euler | 3660248312f2cb82e48e947a4a4bdf867fe9c666 | d241d139ce44a9407bd4afc0379f81a868297a70 | refs/heads/master | 2022-12-19T02:27:30.137950 | 2020-09-27T10:48:31 | 2020-09-27T10:48:31 | 271,604,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | 7 | #!/root/PycharmProjects/Euler/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"henry975@yandex.ru"
] | henry975@yandex.ru |
61b973875df000a76b6eefe5ee018440f143c300 | 95f99e20583cf636023a1d442cfaca2f9cab98a9 | /org/migrations/0008_auto_20200415_1449.py | 6b0a2c283c787a547dceae5ac407666dbe71b1d1 | [] | no_license | ashersuman/SECTEN | 7765b8bd3329242d8d7ae3af686842de2173fc79 | 847200987fee01ccde421c53246102eb423d2392 | refs/heads/master | 2023-07-30T11:32:25.509988 | 2020-08-29T15:54:04 | 2020-08-29T15:54:04 | 271,321,041 | 0 | 1 | null | 2021-09-22T19:13:42 | 2020-06-10T15:56:43 | Python | UTF-8 | Python | false | false | 1,076 | py | # Generated by Django 3.0.4 on 2020-04-15 09:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('org', '0007_auto_20200415_0044'),
]
operations = [
migrations.AddField(
model_name='org',
name='OrgOwner',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='org.OrgOwner', verbose_name='Combiner'),
),
migrations.AlterField(
model_name='org',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AlterField(
model_name='org',
name='location',
field=models.CharField(default=None, max_length=300, null=True, verbose_name='Address'),
),
]
| [
"user11root@gmail.com"
] | user11root@gmail.com |
25a2dec7b926e52c47264c1c1fa15ae2af41e8e3 | 63718d9acba10c00f51d9f10b8f49cfd758da1af | /outputFiles/statistics/archives/ourIA/improved_closest_v2.py/0.7/3/player1.py | 00ceb1c294ff5ec03d105cd75a4d25e8bcc4e902 | [
"MIT"
] | permissive | dimtion/jml | 2d6760b8b7e12565989ddd5356b181b71a37df68 | dba4db760280cc5ed8c384e36e41d6c7a310fb4f | refs/heads/master | 2021-01-13T00:15:25.114150 | 2015-11-16T09:02:19 | 2015-11-16T09:02:19 | 43,818,465 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,253 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################################################################################################################################################################
######################################################################################################## PRE-DEFINED IMPORTS #######################################################################################################
####################################################################################################################################################################################################################################
# Imports that are necessary for the program architecture to work properly
# Do not edit this code
import ast
import sys
import os
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED CONSTANTS ######################################################################################################
####################################################################################################################################################################################################################################
# Possible characters to send to the maze application
# Any other will be ignored
# Do not edit this code
UP = 'U'
DOWN = 'D'
LEFT = 'L'
RIGHT = 'R'
####################################################################################################################################################################################################################################
# Name of your team
# It will be displayed in the maze
# You have to edit this code
TEAM_NAME = "Improved closest v2"
####################################################################################################################################################################################################################################
########################################################################################################## YOUR VARIABLES ##########################################################################################################
####################################################################################################################################################################################################################################
# Stores all the moves in a list to restitute them one by one
allMoves = [UP, RIGHT, UP, UP, RIGHT, UP, RIGHT, UP, RIGHT, RIGHT, RIGHT, UP, UP, RIGHT, UP, RIGHT]
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED FUNCTIONS ######################################################################################################
####################################################################################################################################################################################################################################
# Writes a message to the shell
# Use for debugging your program
# Channels stdout and stdin are captured to enable communication with the maze
# Do not edit this code
def debug (text) :
# Writes to the stderr channel
sys.stderr.write(str(text) + "\n")
sys.stderr.flush()
####################################################################################################################################################################################################################################
# Reads one line of information sent by the maze application
# This function is blocking, and will wait for a line to terminate
# The received information is automatically converted to the correct type
# Do not edit this code
def readFromPipe () :
# Reads from the stdin channel and returns the structure associated to the string
try :
text = sys.stdin.readline()
return ast.literal_eval(text.strip())
except :
os._exit(-1)
####################################################################################################################################################################################################################################
# Sends the text to the maze application
# Do not edit this code
def writeToPipe (text) :
# Writes to the stdout channel
sys.stdout.write(text)
sys.stdout.flush()
####################################################################################################################################################################################################################################
# Reads the initial maze information
# The function processes the text and returns the associated variables
# The dimensions of the maze are positive integers
# Maze map is a dictionary associating to a location its adjacent locations and the associated weights
# The preparation time gives the time during which 'initializationCode' can make computations before the game starts
# The turn time gives the time during which 'determineNextMove' can make computations before returning a decision
# Player locations are tuples (line, column)
# Coins are given as a list of locations where they appear
# A boolean indicates if the game is over
# Do not edit this code
def processInitialInformation () :
# We read from the pipe
data = readFromPipe()
return (data['mazeWidth'], data['mazeHeight'], data['mazeMap'], data['preparationTime'], data['turnTime'], data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
# Reads the information after each player moved
# The maze map and allowed times are no longer provided since they do not change
# Do not edit this code
def processNextInformation () :
# We read from the pipe
data = readFromPipe()
return (data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
########################################################################################################## YOUR FUNCTIONS ##########################################################################################################
####################################################################################################################################################################################################################################
# This is where you should write your code to do things during the initialization delay
# This function should not return anything, but should be used for a short preprocessing
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def initializationCode (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# Nothing to do
pass
####################################################################################################################################################################################################################################
# This is where you should write your code to determine the next direction
# This function should return one of the directions defined in the CONSTANTS section
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def determineNextMove (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# We return the next move as described by the list
global allMoves
nextMove = allMoves[0]
allMoves = allMoves[1:]
return nextMove
####################################################################################################################################################################################################################################
############################################################################################################# MAIN LOOP ############################################################################################################
####################################################################################################################################################################################################################################
# This is the entry point when executing this file
# We first send the name of the team to the maze
# The first message we receive from the maze includes its dimensions and map, the times allowed to the various steps, and the players and coins locations
# Then, at every loop iteration, we get the maze status and determine a move
# Do not edit this code
if __name__ == "__main__" :
# We send the team name
writeToPipe(TEAM_NAME + "\n")
# We process the initial information and have a delay to compute things using it
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = processInitialInformation()
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
writeToPipe(nextMove)
####################################################################################################################################################################################################################################
#################################################################################################################################################################################################################################### | [
"lcarr@pc-df-303.priv.enst-bretagne.fr"
] | lcarr@pc-df-303.priv.enst-bretagne.fr |
4eb0afa5e2ecc14a86426d9c0720079e9b9536e2 | 2a06a81e9cb23b95375bb42343688db094d18bb3 | /PI/bin/pip3 | c1738126f883ba12ca04716decfb1a628244274e | [] | no_license | leonido1/RaspberryPiOCR | 5daee02ba7ccfb303bde2596004de9be3345dcbe | bdf502981580b64eab4681dd09cfdc7214bde502 | refs/heads/master | 2020-03-31T22:33:04.876954 | 2018-10-11T17:50:39 | 2018-10-11T17:50:39 | 152,622,394 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 226 | #!/home/pi/.virtualenvs/cv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"leonidvishniakov@gmail.com"
] | leonidvishniakov@gmail.com | |
976316936ff45669e030bb2675bd7ccf4f664609 | 86a3f9c934759bb796d80ee28d9d7d71233ccc50 | /exercises/chapter9/exercise_9_6.py | 9a5a5f7faa831f0e7862cd6e330ccecd20291812 | [
"MIT"
] | permissive | JapoDeveloper/think-python | f3c35b6eb5804f0665111f9ad40021d4fbe9900d | 1e6fc4fd635a5bdf1ea38eb93a1e67db9ad99587 | refs/heads/master | 2021-05-22T16:30:28.015119 | 2020-04-11T23:53:52 | 2020-04-11T23:53:52 | 253,004,165 | 0 | 0 | MIT | 2020-04-11T23:53:53 | 2020-04-04T13:29:19 | Python | UTF-8 | Python | false | false | 905 | py | """
Think Python, 2nd Edition
Chapter 9
Exercise 9.6
Description:
Write a function called is_abecedarian that returns True if the letters in a
word appear in alphabetical order (double letters are okay). How many
abecedarian words are there?
"""
from words import list_words
def is_abecedarian(word):
"""check if letters in a word are in alphabetical order"""
if word is None or len(word) == 0:
print('Invalid word input')
return
previous = word[0]
for letter in word:
if previous > letter:
return False
previous = letter
return True
print(is_abecedarian('is')) # True
print(is_abecedarian('passion')) # False
print(is_abecedarian('low')) # True
words = list_words()
abecedarian_count = 0
for word in words:
if is_abecedarian(word):
abecedarian_count += 1
print('{} words are abecedarian'.format(abecedarian_count))
| [
"julio_master_55@hotmail.com"
] | julio_master_55@hotmail.com |
d5d82d46758a40e1f5c1ca060e47303741b8a4da | 83e6bbff0fc96d819ebd15d52aa168760f3bbf1b | /contrib/seeds/generate-seeds.py | 0653ef45f8e6fe1f40d780da0b85cf6783ee36f8 | [
"MIT"
] | permissive | smart-pubgc/PUBGC-COIN | f2bed888e25d96ff419bc220f3c7deeebc39c811 | aad16372c070d790e181814bdf8c955365d23628 | refs/heads/main | 2023-08-17T15:39:45.465056 | 2021-09-20T22:26:20 | 2021-09-20T22:26:20 | 399,748,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,342 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the pubgcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 2242)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 12243)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| [
"89513168+smart-pubgc@users.noreply.github.com"
] | 89513168+smart-pubgc@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.