blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2f9849dbff73217d82d1da421a42a80a9dbdb61 | dcdd2deec12016a985ed1474626c2648ec176505 | /recipes/doctest/2.3.5/conanfile.py | 85969496f093cbb975b07a5535856194196fff15 | [
"MIT"
] | permissive | casabre/conan-center-index | cc7bdb6763b867f3d30c1da05975135012ed592b | 65b4c671426765a14c3e42cdafbb657aa03e0d4d | refs/heads/master | 2020-12-13T10:12:55.682713 | 2020-03-10T13:56:35 | 2020-03-10T13:56:35 | 234,386,496 | 0 | 0 | MIT | 2020-03-10T13:56:36 | 2020-01-16T18:37:26 | Python | UTF-8 | Python | false | false | 1,197 | py | from conans import ConanFile, tools, CMake
import os
class DoctestConan(ConanFile):
name = "doctest"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/onqtam/doctest"
description = "C++11/14/17/20 single header testing framework"
settings = "os", "compiler"
license = "MIT"
_source_subfolder = "source_subfolder"
@property
def _is_mingw(self):
return self.settings.os == "Windows" and self.settings.compiler == "gcc"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
self.copy(pattern="*doctest.h", dst="include", src=self._source_subfolder)
def package_info(self):
if self._is_mingw:
# See https://sourceforge.net/p/mingw-w64/bugs/727/
# can't use destructors in thread_local with mingw
self.cpp_info.defines.append("DOCTEST_THREAD_LOCAL=")
def package_id(self):
self.info.header_only()
| [
"theo.delrieu@tanker.io"
] | theo.delrieu@tanker.io |
ff79cbce30774e383c5e08faa9eff306b1c035f0 | 9754b49675af62e104bd4fa2326b14dd48741ce2 | /controllers/tcpip_simple_street_controller/client.py | a8e57ef5f32022141112b446ad51777f5ea64dbd | [] | no_license | mcitir/Webots_TCPIP | 6b72025611e27d7d2bf148c042e7823cacf11a11 | 829be22b23bde2e87fa8fc783d800f8444d6384c | refs/heads/main | 2023-07-29T09:14:58.179317 | 2021-09-07T20:08:38 | 2021-09-07T20:08:38 | 404,098,738 | 0 | 0 | null | 2021-09-07T19:45:22 | 2021-09-07T19:26:12 | Python | UTF-8 | Python | false | false | 176 | py | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 1235))
while True:
msg = s.recv(1024)
print(msg.decode('utf-8'))
| [
"muzaffer.citir@hotmail.com"
] | muzaffer.citir@hotmail.com |
d2a9cb9e98f386701503627e098a2b8957381254 | f2befaae3840bafd181cc712108e3b64caf2696f | /app/portal/horizon/openstack_dashboard/contrib/developer/theme_preview/panel.py | ebe06fc1d16d07e88e901c59aee0806dedf6353f | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-adcaas-openstack | 17d5c408d421dcfe542002e1f850b2d9f29f1663 | 02bd8a606215c0fa08b926bac1b092b5e8b278df | refs/heads/master | 2023-08-28T12:09:54.972191 | 2022-08-12T02:03:43 | 2022-08-12T02:03:43 | 164,592,273 | 4 | 23 | Apache-2.0 | 2022-08-12T02:03:44 | 2019-01-08T07:40:35 | Python | UTF-8 | Python | false | false | 779 | py | # Copyright 2015 Cisco Systems, Inc.
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class Preview(horizon.Panel):
name = _("Theme Preview")
slug = 'theme_preview'
| [
"a.zong@f5.com"
] | a.zong@f5.com |
fdae1c620f4b33f1f0196a1975c9b4edcf84799e | 4f7beba61739cf1aba7def3033fbf9f7528aba37 | /look/captcha.py | 281894974abda4eecb09d69258e1b74b7a81ffc2 | [
"MIT"
] | permissive | tonglei100/look | 8568d32a578088712596291dd962470975e2d0e0 | e78411eddae52d332b2b012541e63322ce286f46 | refs/heads/master | 2021-06-19T01:17:26.636102 | 2019-10-11T08:40:25 | 2019-10-11T08:40:25 | 154,478,961 | 46 | 18 | null | null | null | null | UTF-8 | Python | false | false | 3,068 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
from look import dataset
from look.cnn_model import CNN
from look import setting
import numpy as np
from look import one_hot
from PIL import Image
def train(model_name='model.pkl'):
cnn = CNN()
cnn.train()
print('init net')
criterion = nn.MultiLabelSoftMarginLoss()
optimizer = torch.optim.Adam(
cnn.parameters(), lr=setting.TRAIN_LEARNING_RATE)
# Train the Model
train_dataloader = dataset.get_train_data_loader()
for epoch in range(setting.TRAIN_NUM_EPOCHS):
for i, (images, labels) in enumerate(train_dataloader):
images = Variable(images)
labels = Variable(labels.float())
predict_labels = cnn(images)
loss = criterion(predict_labels, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch: % -3s loss: %s' % (epoch, loss.item()))
torch.save(cnn.state_dict(), setting.MODEL_PATH /
model_name) # current is model.pkl
print('save last model')
def test(model_name='model.pkl'):
cnn = CNN()
cnn.eval()
cnn.load_state_dict(torch.load(setting.MODEL_PATH / model_name))
print('load cnn net.')
test_dataloader = dataset.get_test_data_loader()
correct = 0
total = 0
for i, (images, labels) in enumerate(test_dataloader):
image = images
vimage = Variable(image)
predict_label = cnn(vimage)
chars = ''
for i in range(setting.MAX_CAPTCHA):
chars += setting.ALL_CHAR_SET[np.argmax(predict_label[0, i * setting.ALL_CHAR_SET_LEN: (
i + 1) * setting.ALL_CHAR_SET_LEN].data.numpy())]
predict_label = chars
true_label = one_hot.decode(labels.numpy()[0])
total += labels.size(0)
if(predict_label == true_label):
correct += 1
else:
print('Predict:' + predict_label)
print('Real :' + true_label)
if(total % 200 == 0):
print('Test Accuracy of the model on the %d test images: %f %%' %
(total, 100 * correct / total))
print('Test Accuracy of the model on the %d test images: %f %%' %
(total, 100 * correct / total))
def recognize(model_name='model.pk'):
cnn = CNN()
cnn.eval()
cnn.load_state_dict(torch.load(setting.MODEL_PATH / model_name))
# print(load cnn net.)
captcha_dataloader = dataset.get_captcha_data_loader()
code = ''
images = {}
for image, label in captcha_dataloader:
images[label] = image
images = [images[key] for key in sorted(images)]
for image in images:
vimage = Variable(image)
predict_label = cnn(vimage)
for i in range(setting.MAX_CAPTCHA):
code += setting.ALL_CHAR_SET[np.argmax(predict_label[0, i * setting.ALL_CHAR_SET_LEN: (
i + 1) * setting.ALL_CHAR_SET_LEN].data.numpy())]
return code
if __name__ == '__main__':
code = recognize()
print(code)
| [
"tonglei@qq.com"
] | tonglei@qq.com |
38c6618e463de13bb008fbbd2be4929039df76fc | d20bc81f7a3251e11263337420521c6df59d6cdb | /evaluation_3.py | 8cfe4f9a8d51aea10ab6c166a7b204bd6ba4a3ef | [] | no_license | indranilpradhan/BlockChain | 2836d2924a16f3b54a8e0d9c3f167a1ec4aba452 | 82af23b0dff3e724d42f5e86b9ff18191b28a94e | refs/heads/master | 2022-10-08T08:15:38.001897 | 2020-06-13T07:00:48 | 2020-06-13T07:00:48 | 271,957,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,982 | py | from random import randint
from random import seed
from random import randint
import hashlib
import Crypto.Util.number
import sys
from Crypto import Random
import random
class Node:
def __init__(self, data):
self.data = data
self.next = None
class HashPtNode:
def __init__(self,data):
self.data = data
self.hashNext = None
self.next = None
class HashSignPtNode:
def __init__(self,data):
self.data = data
self.hashNext = 2
self.signNext = 2
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.last = None
def push(self, head, last, data):
temp = Node(data)
if(head == None and last == None):
head = temp
last = head
else:
last.next = temp
last = temp
return head,last
def pop(self,head,last):
if(head == None):
return None
else:
if(head == last):
head = None
last = None
return None
temp = head
while(temp.next != last):
temp = temp.next
last = temp
last.next = None
return head,last
def traverselist(self, head):
temp = head
while (temp):
print(temp.data,)#hex(id(temp.next)))
temp = temp.next
class HashPtLinkedList:
def __init__(self):
self.head = None
self.last = None
self.q = self.choosing_p(5)
self.g = self.generator(self.q)
self.z = self.calculate_z(self.g, self.q)
self.x = self.generating_x(self.g)
self.k = 13
self.check_node = HashPtNode(0)
def convert_string_asciisum(self,m):
asc = [ord(c) for c in m]
return sum(asc)
def calculate_z(self,g,q):
temp = randint(1,q-1)
z = (g**temp)%q
return z
def hash_function(self,x1,x2):
hash_val = ((self.g**x1)%self.q * (self.z**x2)%self.q)%self.q
return hash_val
def loop_exponent(self,exponent, nr, r, p):
while(nr != 1):
nr = (nr*r)%p
exponent= exponent+1
return exponent
def generating_x(self,g):
x = randint(1,g-1)
return x
def loop_gen(self,nr, exponent, r, p, g):
exponent = self.loop_exponent(exponent, nr, r, p)
if(exponent == p-1 and exponent != None):
g.append(r)
def generator(self,p):
g = []
for i in range(1,p):
r = i
exponent = 1
nr = r%p
self.loop_gen(nr, exponent, r, p, g)
return random.choice(g)
def choosing_p(self,n):
q = Crypto.Util.number.getPrime(n, randfunc=Random.get_random_bytes)
return q
def hash_(self,m):
M = self.convert_string_asciisum(m)
r = (self.g**self.k)%self.q
h = (self.hash_function(r,M))
return h
def push(self, head, last, data):
temp = HashPtNode(data)
if(head == None and last == None):
temp.next = self.check_node
temphashNext = str(temp.hashNext)
tempAddr = str(hex(id(temp.next)))
message = temphashNext+tempAddr+str(temp.data)
hash_message = self.hash_(message)
self.check_node.hashNext = hash_message
head = temp
last = head
else:
temp.next = self.check_node
last.next = temp
lasthashNext= str(last.hashNext)
lastAddr = str(hex(id(last.next)))
lastmessage = lasthashNext+lastAddr+str(last.data)
last_hash_message = self.hash_(lastmessage)
temp.hashNext = last_hash_message
last = temp
lasthashNext = str(last.hashNext)
lastAddr = str(hex(id(last.next)))
lastmessage = lasthashNext+lastAddr+str(last.data)
last_hash_message = self.hash_(lastmessage)
self.check_node.hashNext = last_hash_message
return head,last
def pop(self,head,last):
if(head == None):
return None
else:
if(head == last):
head = None
last = None
return None
temp = head
while(temp.next != last):
temp = temp.next
#check_node = temp.next.next
last = temp
last.next = self.check_node
lasthashNext = str(last.hashNext)
lastAddr = str(hex(id(last.next)))
lastmessage = lasthashNext+lastAddr+str(last.data)
last_hash_message = self.hash_(lastmessage)
self.check_node.hashNext = last_hash_message
return head,last
def traverselist(self, head, last):
temp = head
while (temp != last.next):
print(temp.data)
temp = temp.next
def verify(self, head, last):
nodes = -1
temp = head
i =0
while (temp != last.next):
thashNext = str(temp.hashNext)
tAddr = str(hex(id(temp.next)))
tmessage = thashNext+tAddr+str(temp.data)
t_hash_message = self.hash_(tmessage)
if(t_hash_message != temp.next.hashNext):
nodes = i
return nodes
i = i+1
temp = temp.next
return nodes
class HashSignPtLinkedList:
def __init__(self):
self.head = None
self.last = None
self.q = self.choosing_p(5)
self.g = self.generator(self.q)
self.z = self.calculate_z(self.g, self.q)
self.y,self.x = self.generating_x(self.g)
self.k = 13
self.check_node = HashSignPtNode(0)
def convert_string_asciisum(self,m):
asc = [ord(c) for c in m]
return sum(asc)
def calculate_z(self,g,q):
temp = randint(1,q-1)
z = (g**temp)%q
return z
def hash_function(self,x1,x2):
hash_val = ((self.g**x1)%self.q * (self.z**x2)%self.q)%self.q
return hash_val
def loop_exponent(self,exponent, nr, r, p):
while(nr != 1):
nr = (nr*r)%p
exponent= exponent+1
return exponent
def generating_x(self,g):
x = randint(1,g-1)
y = (self.g**x)%self.q
return y,x
def loop_gen(self,nr, exponent, r, p, g):
exponent = self.loop_exponent(exponent, nr, r, p)
if(exponent == p-1 and exponent != None):
g.append(r)
def generator(self,p):
g = []
for i in range(1,p):
r = i
exponent = 1
nr = r%p
self.loop_gen(nr, exponent, r, p, g)
return random.choice(g)
def choosing_p(self,n):
q = Crypto.Util.number.getPrime(n, randfunc=Random.get_random_bytes)
return q
def digital_signature(self,m):
M = self.convert_string_asciisum(m)
r = (self.g**self.k)%self.q
h = (self.hash_function(r,M))
s = (self.k-(self.x*h))%(self.q-1)
return s,h
def verifier(self,m,s,e):
M = self.convert_string_asciisum(m)
h_s = (self.g**s)%self.q
h_y = (self.y**e)%self.q
rv = (h_s*h_y)%self.q
ev = (self.hash_function(rv,M))
return ev
def hash_(self,m):
M = self.convert_string_asciisum(m)
r = (self.g**self.k)%self.q
h = (self.hash_function(r,M))
return h
def push(self, head, last, data):
temp = HashSignPtNode(data)
if(head == None and last == None):
temp.next = self.check_node
temphashNext = str(temp.hashNext)
tempsignNext = str(temp.signNext)
tempAddr = str(hex(id(temp.next)))
message = temphashNext+tempsignNext+tempAddr+str(temp.data)
hash_sign,hash_message = self.digital_signature(message)
self.check_node.hashNext = hash_message
self.check_node.signNext = hash_sign
head = temp
last = head
else:
temp.next = self.check_node
last.next = temp
lasthashNext= str(last.hashNext)
lastsignNext = str(last.signNext)
lastAddr = str(hex(id(last.next)))
lastmessage = lasthashNext+lastsignNext+lastAddr+str(last.data)
#print(lasthashNext,lastsignNext)
last_hash_sign,last_hash_message = self.digital_signature(lastmessage)
#print(last_hash_message,last_hash_sign)
temp.hashNext = last_hash_message
temp.signNext = last_hash_sign
last = temp
lasthashNext = str(last.hashNext)
lastsignNext = str(last.signNext)
lastsignNext = str(last.signNext)
lastAddr = str(hex(id(last.next)))
lastmessage = lasthashNext+lastsignNext+lastAddr+str(last.data)
last_hash_sign,last_hash_message = self.digital_signature(lastmessage)
self.check_node.hashNext = last_hash_message
self.check_node.signNext = last_hash_sign
return head,last
def pop(self,head,last):
if(head == None):
return None
else:
if(head == last):
head = None
last = None
return None
temp = head
while(temp.next != last):
temp = temp.next
last = temp
last.next = self.check_node
lasthashNext = str(last.hashNext)
lastsignNext = str(last.signNext)
lastAddr = str(hex(id(last.next)))
lastmessage = lasthashNext+lastsignNext+lastAddr+str(last.data)
last_hash_sign,last_hash_message = self.digital_signature(lastmessage)
self.check_node.hashNext = last_hash_message
self.check_node.signNext = last_hash_sign
return head,last
def traverselist(self, head, last):
temp = head
while (temp != last.next):
print(temp.data)
temp = temp.next
def verify(self, head, last):
nodes = -1
temp = head
i =0
while (temp != last.next):
thashNext = str(temp.next.hashNext)
thash = str(temp.hashNext)
tsignNext = str(temp.next.signNext)
tsign = str(temp.signNext)
tAddr = str(hex(id(temp.next)))
tmessage = thash+tsign+tAddr+str(temp.data)
t_hash_message = self.verifier(tmessage,int(tsignNext),int(thashNext))
if(t_hash_message != temp.next.hashNext):
nodes = i
return nodes
i = i+1
temp = temp.next
return nodes
if __name__=='__main__':
print("++++++Pointer++++++++")
ll = LinkedList()
ll.head,ll.last = ll.push(ll.head,ll.last,1)
ll.head,ll.last = ll.push(ll.head,ll.last,2)
ll.head,ll.last = ll.push(ll.head,ll.last,3)
ll.traverselist(ll.head)
print("+++++++Hash pointer++++++++")
llhspt = HashPtLinkedList()
llhspt.head,llhspt.last = llhspt.push(llhspt.head,llhspt.last,1)
llhspt.head,llhspt.last = llhspt.push(llhspt.head,llhspt.last,2)
llhspt.head,llhspt.last = llhspt.push(llhspt.head,llhspt.last,3)
llhspt.traverselist(llhspt.head,llhspt.last)
node = llhspt.verify(llhspt.head,llhspt.last)
if(node == -1):
print("Verified")
else:
print("The modfied node ",node)
print("=======modifying node======")
llhspt.head.next.data = 5
node = llhspt.verify(llhspt.head,llhspt.last)
if(node == -1):
print("Verified")
else:
print("The modfied node ",node)
print("++++++Hash sign pointer++++++++")
llhsSgnpt = HashSignPtLinkedList()
llhsSgnpt.head,llhsSgnpt.last = llhsSgnpt.push(llhsSgnpt.head,llhsSgnpt.last,1)
llhsSgnpt.head,llhsSgnpt.last = llhsSgnpt.push(llhsSgnpt.head,llhsSgnpt.last,2)
llhsSgnpt.head,llhsSgnpt.last = llhsSgnpt.push(llhsSgnpt.head,llhsSgnpt.last,3)
llhsSgnpt.traverselist(llhsSgnpt.head,llhsSgnpt.last)
node = llhsSgnpt.verify(llhsSgnpt.head,llhsSgnpt.last)
if(node == -1):
print("Verified")
else:
print("The modfied node ",node)
print("======modifying node=======")
llhsSgnpt.head.next.next.data = 5
node = llhsSgnpt.verify(llhsSgnpt.head,llhsSgnpt.last)
if(node == -1):
print("Verified")
else:
print("The modfied node ",node)
| [
"noreply@github.com"
] | indranilpradhan.noreply@github.com |
f98206a5f823d8106d69712bbbda48934f3bb4dd | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_policy_rule.py | f12064beb3988ec10b30bd46e8aa3e14b27fa81a | [
"MIT",
"GPL-3.0-only"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 45,525 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_policy_rule
short_description: Manage LTM policy rules on a BIG-IP
description:
- This module will manage LTM policy rules on a BIG-IP.
version_added: "1.0.0"
options:
description:
description:
- Description of the policy rule.
type: str
actions:
description:
- The actions you want the policy rule to perform.
- The available attributes vary by the action, however, each action requires
you specify a C(type).
- These conditions can be specified in any order. Despite the fact they are in a list,
the order in the list does not matter to the BIG-IP.
type: list
elements: dict
suboptions:
type:
description:
- The action type. This value controls which of the following options are required.
- When C(type) is C(forward), the system associates a given C(pool), or C(virtual),
or C(node) with this rule.
- When C(type) is C(enable), the system associates a given C(asm_policy) with
this rule.
- When C(type) is C(ignore), the system removes all existing actions from this
rule.
- When C(type) is C(redirect), the system redirects an HTTP request to a different URL.
- When C(type) is C(reset), the system resets the connection upon C(event).
- When C(type) is C(persist), the system associates C(cookie_insert) and C(cookie_expiry) with this rule.
- When C(type) is C(set_variable), the system sets a variable based on the evaluated Tcl C(expression) based on C(event).
type: str
required: true
choices:
- forward
- enable
- ignore
- redirect
- reset
- persist
- set_variable
pool:
description:
- Pool to which you want to forward traffic.
- This parameter is only valid with the C(forward) type.
type: str
virtual:
description:
- Virtual Server to which you want to forward traffic.
- This parameter is only valid with the C(forward) type.
type: str
node:
description:
- Node to which you want to forward traffic.
- This parameter is only valid with the C(forward) type.
type: str
version_added: "1.2.0"
asm_policy:
description:
- ASM policy to enable.
- This parameter is only valid with the C(enable) type.
type: str
location:
description:
- The new URL for which a redirect response will be sent.
- A Tcl command substitution can be used for this field.
type: str
event:
description:
- Events on which actions, such as reset, can be triggered.
- With the C(set_variable) action, it is used for specifying
an action event, such as request or response.
type: str
expression:
description:
- A tcl expression used with the C(set_variable) action.
type: str
variable_name:
description:
- Variable name used with the C(set_variable) action.
type: str
cookie_insert:
description:
- Cookie name on which you want to persist.
- This parameter is only valid with the C(persist) type.
type: str
version_added: "1.1.0"
cookie_expiry:
description:
- Optional argument, specifying the time for which the session will be persisted.
- This parameter is only valid with the C(persist) type.
type: int
version_added: "1.1.0"
policy:
description:
- The name of the policy you want to associate this rule with.
type: str
required: True
name:
description:
- The name of the rule.
type: str
required: True
conditions:
description:
- A list of attributes that describe the condition.
- See suboptions for details on how to construct each list entry.
- The ordering of this list is important, the module will ensure the order is
kept when modifying the task.
- The suboption options listed below are not required for all condition types,
read the description for more details.
- These conditions can be specified in any order. Despite the fact they are in a list,
the order in the list does not matter to the BIG-IP.
type: list
elements: dict
suboptions:
type:
description:
- The condition type. This value controls which of the following options are required.
- When C(type) is C(http_uri), the system associates a given C(path_begins_with_any)
list of strings with which the HTTP URI should begin. Any item in the
list will provide a match.
- When C(type) is C(all_traffic), the system removes all existing conditions from
this rule.
type: str
required: True
choices:
- http_uri
- all_traffic
- http_host
- ssl_extension
path_begins_with_any:
description:
- A list of strings of characters the HTTP URI should start with.
- This parameter is only valid with the C(http_uri) type.
type: list
elements: str
host_is_any:
description:
- A list of strings of characters the HTTP Host should match.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
host_is_not_any:
description:
- A list of strings of characters the HTTP Host should not match.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
host_begins_with_any:
description:
- A list of strings of characters the HTTP Host should start with.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
server_name_is_any:
description:
- A list of strings of characters the SSL Extension should match.
- This parameter is only valid with the C(ssl_extension) type.
type: list
elements: str
event:
description:
- Events on which conditions such as SSL Extension can be triggered.
type: str
state:
description:
- When C(present), ensures the key is uploaded to the device. When
C(absent), ensures the key is removed from the device. If the key
is currently in use, the module will not be able to remove the key.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
requirements:
- BIG-IP >= v12.1.0
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
- Greg Crosby (@crosbygw)
- Nitin Khanna (@nitinthewiz)
- Andrey Kashcheev (@andreykashcheev)
'''
EXAMPLES = r'''
- name: Create policies
bigip_policy:
name: Policy-Foo
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add a rule to the new policy
bigip_policy_rule:
policy: Policy-Foo
name: rule3
conditions:
- type: http_uri
path_begins_with_any:
- /ABC
actions:
- type: forward
pool: pool-svrs
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add multiple rules to the new policy
bigip_policy_rule:
policy: Policy-Foo
name: "{{ item.name }}"
conditions: "{{ item.conditions }}"
actions: "{{ item.actions }}"
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
loop:
- name: rule1
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_begins_with_any:
- /euro
- name: rule2
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_begins_with_any:
- /HomePage/
- name: rule3
actions:
- type: set_variable
variable_name: user-agent
expression: tcl:[HTTP::header User-Agent]
event: request
conditions:
- type: http_uri
path_begins_with_any:
- /HomePage/
- name: Remove all rules and confitions from the rule
bigip_policy_rule:
policy: Policy-Foo
name: rule1
conditions:
- type: all_traffic
actions:
- type: ignore
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
actions:
description: The new list of actions applied to the rule.
returned: changed
type: complex
contains:
type:
description: The action type.
returned: changed
type: str
sample: forward
pool:
description: Pool for forwarding to.
returned: changed
type: str
sample: foo-pool
sample: hash/dictionary of values
conditions:
description: The new list of conditions applied to the rule.
returned: changed
type: complex
contains:
type:
description: The condition type.
returned: changed
type: str
sample: http_uri
path_begins_with_any:
description: List of strings that the URI begins with.
returned: changed
type: list
sample: [foo, bar]
sample: hash/dictionary of values
description:
description: The new description of the rule.
returned: changed
type: str
sample: My rule
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ansible.module_utils.six import iteritems
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'actionsReference': 'actions',
'conditionsReference': 'conditions',
}
api_attributes = [
'description',
'actions',
'conditions',
]
updatables = [
'actions',
'conditions',
'description',
]
returnables = [
'description',
'action',
'conditions'
]
@property
def name(self):
return self._values.get('name', None)
@property
def description(self):
return self._values.get('description', None)
@property
def policy(self):
if self._values['policy'] is None:
return None
return self._values['policy']
class ApiParameters(Parameters):
def _remove_internal_keywords(self, resource):
items = [
'kind', 'generation', 'selfLink', 'poolReference', 'offset',
]
for item in items:
try:
del resource[item]
except KeyError:
pass
@property
def actions(self):
result = []
if self._values['actions'] is None or 'items' not in self._values['actions']:
return [dict(type='ignore')]
for item in self._values['actions']['items']:
action = dict()
self._remove_internal_keywords(item)
if 'forward' in item:
action.update(item)
action['type'] = 'forward'
del action['forward']
elif 'enable' in item:
action.update(item)
action['type'] = 'enable'
del action['enable']
elif 'redirect' in item:
action.update(item)
action['type'] = 'redirect'
del action['redirect']
elif 'setVariable' in item:
action.update(item)
action['type'] = 'set_variable'
del action['fullPath']
del action['code']
del action['expirySecs']
del action['length']
del action['port']
del action['status']
del action['vlanId']
del action['timeout']
elif 'shutdown' in item:
action.update(item)
action['type'] = 'reset'
del action['shutdown']
if 'persist' in item:
action.update(item)
action['type'] = 'persist'
del action['persist']
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
@property
def conditions(self):
result = []
if self._values['conditions'] is None or 'items' not in self._values['conditions']:
return [dict(type='all_traffic')]
for item in self._values['conditions']['items']:
action = dict()
self._remove_internal_keywords(item)
if 'httpUri' in item:
action.update(item)
action['type'] = 'http_uri'
del action['httpUri']
# Converts to common stringiness
#
# The tuple set "issubset" check that happens in the Difference
# engine does not recognize that a u'foo' and 'foo' are equal "enough"
# to consider them a subset. Therefore, we cast everything here to
# whatever the common stringiness is.
if 'values' in action:
action['values'] = [str(x) for x in action['values']]
elif 'httpHost' in item:
action.update(item)
action['type'] = 'http_host'
if 'values' in action:
action['values'] = [str(x) for x in action['values']]
elif 'sslExtension' in item:
action.update(item)
action['type'] = 'ssl_extension'
if 'values' in action:
action['values'] = [str(x) for x in action['values']]
result.append(action)
# Names contains the index in which the rule is at.
result = sorted(result, key=lambda x: x['name'])
return result
class ModuleParameters(Parameters):
@property
def actions(self):
result = []
if self._values['actions'] is None:
return None
for idx, item in enumerate(self._values['actions']):
action = dict()
if 'name' in item:
action['name'] = str(item['name'])
else:
action['name'] = str(idx)
if item['type'] == 'forward':
self._handle_forward_action(action, item)
elif item['type'] == 'set_variable':
self._handle_set_variable_action(action, item)
elif item['type'] == 'enable':
self._handle_enable_action(action, item)
elif item['type'] == 'ignore':
return [dict(type='ignore')]
elif item['type'] == 'redirect':
self._handle_redirect_action(action, item)
elif item['type'] == 'reset':
self._handle_reset_action(action, item)
del action['shutdown']
elif item['type'] == 'persist':
self._handle_persist_action(action, item)
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
@property
def conditions(self):
result = []
if self._values['conditions'] is None:
return None
for idx, item in enumerate(self._values['conditions']):
action = dict()
if 'name' in item:
action['name'] = str(item['name'])
else:
action['name'] = str(idx)
if item['type'] == 'http_uri':
self._handle_http_uri_condition(action, item)
elif item['type'] == 'http_host':
self._handle_http_host_condition(action, item)
elif item['type'] == 'ssl_extension':
self._handle_ssl_extension_condition(action, item)
elif item['type'] == 'all_traffic':
return [dict(type='all_traffic')]
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
def _handle_http_host_condition(self, action, item):
action['type'] = 'http_host'
if 'host_begins_with_any' in item and item['host_begins_with_any'] is not None:
if isinstance(item['host_begins_with_any'], list):
values = item['host_begins_with_any']
else:
values = [item['host_begins_with_any']]
action.update(dict(
host=True,
startsWith=True,
values=values
))
elif 'host_is_any' in item and item['host_is_any'] is not None:
if isinstance(item['host_is_any'], list):
values = item['host_is_any']
else:
values = [item['host_is_any']]
action.update(dict(
equals=True,
host=True,
values=values
))
elif 'host_is_not_any' in item and item['host_is_not_any'] is not None:
if isinstance(item['host_is_not_any'], list):
values = item['host_is_not_any']
else:
values = [item['host_is_not_any']]
action.update({
'equals': True,
'host': True,
'not': True,
'values': values
})
def _handle_http_uri_condition(self, action, item):
"""Handle the nuances of the forwarding type
Right now there is only a single type of forwarding that can be done. As that
functionality expands, so-to will the behavior of this, and other, methods.
Therefore, do not be surprised that the logic here is so rigid. It's deliberate.
:param action:
:param item:
:return:
"""
action['type'] = 'http_uri'
if 'path_begins_with_any' not in item:
raise F5ModuleError(
"A 'path_begins_with_any' must be specified when the 'http_uri' type is used."
)
if isinstance(item['path_begins_with_any'], list):
values = item['path_begins_with_any']
else:
values = [item['path_begins_with_any']]
action.update(dict(
path=True,
startsWith=True,
values=values
))
def _handle_ssl_extension_condition(self, action, item):
action['type'] = 'ssl_extension'
if 'server_name_is_any' in item:
if isinstance(item['server_name_is_any'], list):
values = item['server_name_is_any']
else:
values = [item['server_name_is_any']]
action.update(dict(
equals=True,
serverName=True,
values=values
))
if 'event' not in item:
raise F5ModuleError(
"An 'event' must be specified when the 'ssl_extension' condition is used."
)
elif 'ssl_client_hello' in item['event']:
action.update(dict(
sslClientHello=True
))
elif 'ssl_server_hello' in item['event']:
action.update(dict(
sslServerHello=True
))
def _handle_forward_action(self, action, item):
"""Handle the nuances of the forwarding type
Right now there is only a single type of forwarding that can be done. As that
functionality expands, so-to will the behavior of this, and other, methods.
Therefore, do not be surprised that the logic here is so rigid. It's deliberate.
:param action:
:param item:
:return:
"""
action['type'] = 'forward'
if not any(x for x in ['pool', 'virtual', 'node'] if x in item):
raise F5ModuleError(
"A 'pool' or 'virtual' or 'node' must be specified when the 'forward' type is used."
)
if item.get('pool', None):
action['pool'] = fq_name(self.partition, item['pool'])
elif item.get('virtual', None):
action['virtual'] = fq_name(self.partition, item['virtual'])
elif item.get('node', None):
action['node'] = item['node']
def _handle_set_variable_action(self, action, item):
"""Handle the nuances of the set_variable type
:param action:
:param item:
:return:
"""
if 'expression' not in item and 'variable_name' not in item:
raise F5ModuleError(
"A 'variable_name' and 'expression' must be specified when the 'set_variable' type is used."
)
if 'event' in item and item['event'] is not None:
action[item['event']] = True
else:
action['request'] = True
action.update(dict(
type='set_variable',
expression=item['expression'],
tmName=item['variable_name'],
setVariable=True,
tcl=True
))
def _handle_enable_action(self, action, item):
"""Handle the nuances of the enable type
:param action:
:param item:
:return:
"""
action['type'] = 'enable'
if 'asm_policy' not in item:
raise F5ModuleError(
"An 'asm_policy' must be specified when the 'enable' type is used."
)
action.update(dict(
policy=fq_name(self.partition, item['asm_policy']),
asm=True
))
def _handle_redirect_action(self, action, item):
"""Handle the nuances of the redirect type
:param action:
:param item:
:return:
"""
action['type'] = 'redirect'
if 'location' not in item:
raise F5ModuleError(
"A 'location' must be specified when the 'redirect' type is used."
)
action.update(
location=item['location'],
httpReply=True,
)
def _handle_reset_action(self, action, item):
"""Handle the nuances of the reset type
:param action:
:param item:
:return:
"""
action['type'] = 'reset'
if 'event' not in item:
raise F5ModuleError(
"An 'event' must be specified when the 'reset' type is used."
)
elif 'ssl_client_hello' in item['event']:
action.update(dict(
sslClientHello=True,
connection=True,
shutdown=True
))
def _handle_persist_action(self, action, item):
"""Handle the nuances of the persist type
:param action:
:param item:
:return:
"""
action['type'] = 'persist'
if 'cookie_insert' not in item:
raise F5ModuleError(
"A 'cookie_insert' must be specified when the 'persist' type is used."
)
elif 'cookie_expiry' in item:
action.update(
cookieInsert=True,
tmName=item['cookie_insert'],
expiry=str(item['cookie_expiry'])
)
else:
action.update(
cookieInsert=True,
tmName=item['cookie_insert']
)
class Changes(Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
try:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class ReportableChanges(Changes):
returnables = [
'description', 'actions', 'conditions'
]
@property
def actions(self):
result = []
if self._values['actions'] is None:
return [dict(type='ignore')]
for item in self._values['actions']:
action = dict()
if 'forward' in item:
action.update(item)
action['type'] = 'forward'
del action['forward']
elif 'set_variable' in item:
action.update(item)
action['type'] = 'set_variable'
del action['set_variable']
elif 'enable' in item:
action.update(item)
action['type'] = 'enable'
del action['enable']
elif 'redirect' in item:
action.update(item)
action['type'] = 'redirect'
del action['redirect']
del action['httpReply']
elif 'reset' in item:
action.update(item)
action['type'] = 'reset'
del action['connection']
del action['shutdown']
elif 'persist' in item:
action.update(item)
action['type'] = 'persist'
action['cookie_insert'] = action['tmName']
if 'expiry' in item:
action['cookie_expiry'] = int(action['expiry'])
del action['expiry']
del action['tmName']
del action['persist']
del action['cookieInsert']
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
@property
def conditions(self):
result = []
if self._values['conditions'] is None:
return [dict(type='all_traffic')]
for item in self._values['conditions']:
action = dict()
if 'httpUri' in item:
action.update(item)
action['type'] = 'http_uri'
del action['httpUri']
elif 'httpHost' in item:
action.update(item)
action['type'] = 'http_host'
del action['httpHost']
elif 'sslExtension' in item:
action.update(item)
action['type'] = 'ssl_extension'
del action['sslExtension']
result.append(action)
# Names contains the index in which the rule is at.
result = sorted(result, key=lambda x: x['name'])
return result
class UsableChanges(Changes):
@property
def actions(self):
if self._values['actions'] is None:
return None
result = []
for action in self._values['actions']:
if 'type' not in action:
continue
if action['type'] == 'forward':
action['forward'] = True
del action['type']
elif action['type'] == 'enable':
action['enable'] = True
del action['type']
elif action['type'] == 'set_variable':
action['setVariable'] = True
action['tcl'] = True
del action['type']
elif action['type'] == 'ignore':
result = []
break
elif action['type'] == 'redirect':
action['httpReply'] = True
action['redirect'] = True
del action['type']
elif action['type'] == 'reset':
action['shutdown'] = True
action['connection'] = True
del action['type']
elif action['type'] == 'persist':
action['persist'] = True
del action['type']
result.append(action)
return result
@property
def conditions(self):
if self._values['conditions'] is None:
return None
result = []
for condition in self._values['conditions']:
if 'type' not in condition:
continue
if condition['type'] == 'http_uri':
condition['httpUri'] = True
del condition['type']
elif condition['type'] == 'http_host':
condition['httpHost'] = True
del condition['type']
elif condition['type'] == 'ssl_extension':
condition['sslExtension'] = True
del condition['type']
elif condition['type'] == 'all_traffic':
result = []
break
result.append(condition)
return result
class Difference(object):
updatables = [
'actions', 'conditions', 'description'
]
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
@property
def actions(self):
result = self._diff_complex_items(self.want.actions, self.have.actions)
if self._conditions_missing_default_rule_for_asm(result):
raise F5ModuleError(
"Valid options when using an ASM policy in a rule's 'enable' action include all_traffic, http_uri, or http_host."
)
return result
@property
def conditions(self):
result = self._diff_complex_items(self.want.conditions, self.have.conditions)
return result
def _conditions_missing_default_rule_for_asm(self, want_actions):
if want_actions is None:
actions = self.have.actions
else:
actions = want_actions
if actions is None:
return False
if any(x for x in actions if x['type'] == 'enable'):
conditions = self._diff_complex_items(self.want.conditions, self.have.conditions)
if conditions is None:
return False
if any(y for y in conditions if y['type'] not in ['all_traffic', 'http_uri', 'http_host']):
return True
return False
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
if self.draft_exists():
redraft = True
else:
redraft = False
self._create_existing_policy_draft_on_device()
self.update_on_device()
if redraft is False:
self.publish_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
if self.draft_exists():
redraft = True
else:
redraft = False
self._create_existing_policy_draft_on_device()
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
if redraft is False:
self.publish_on_device()
return True
def create(self):
self.should_update()
if self.module.check_mode:
return True
if self.draft_exists():
redraft = True
else:
redraft = False
self._create_existing_policy_draft_on_device()
self.create_on_device()
if redraft is False:
self.publish_on_device()
return True
def exists(self):
if self.draft_exists():
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def draft_exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def _create_existing_policy_draft_on_device(self):
params = dict(createDraft=True)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def publish_on_device(self):
params = dict(
name=fq_name(self.want.partition,
self.want.policy,
sub_path='Drafts'
),
command="publish"
)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
if self.draft_exists():
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy),
self.want.name
)
query = "?expandSubcollections=true"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
description=dict(),
actions=dict(
type='list',
elements='dict',
options=dict(
type=dict(
choices=[
'forward',
'enable',
'ignore',
'redirect',
'reset',
'persist',
'set_variable'
],
required=True
),
pool=dict(),
node=dict(),
asm_policy=dict(),
virtual=dict(),
location=dict(),
event=dict(),
cookie_insert=dict(),
cookie_expiry=dict(type='int'),
expression=dict(),
variable_name=dict()
),
mutually_exclusive=[
['pool', 'asm_policy', 'virtual', 'location', 'cookie_insert', 'node']
]
),
conditions=dict(
type='list',
elements='dict',
options=dict(
type=dict(
choices=[
'http_uri',
'http_host',
'ssl_extension',
'all_traffic'
],
required=True
),
path_begins_with_any=dict(
type='list',
elements='str',
),
host_begins_with_any=dict(
type='list',
elements='str',
),
host_is_any=dict(
type='list',
elements='str',
),
host_is_not_any=dict(
type='list',
elements='str',
),
server_name_is_any=dict(
type='list',
elements='str',
),
event=dict()
),
),
name=dict(required=True),
policy=dict(required=True),
state=dict(
default='present',
choices=['absent', 'present']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
ff2b2b75ab60f2bcdf63d75afe4f428eebcd8f6a | 4488e3c26de4291da447d8251c491b43cb810f7c | /orgstruct_partner_zip/__openerp__.py | a3cafaa4ab0bc9f4d686889567eae43e20dccf4d | [] | no_license | smart-solution/odoo-crm-80 | b19592ce6e374c9c7b0a3198498930ffb1283018 | 85dfd0cc37f81bcba24d2a0091094708a262fe2c | refs/heads/master | 2016-09-06T06:04:35.191924 | 2015-07-14T12:48:28 | 2015-07-14T12:48:28 | 33,174,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Organisation Structure Partner Zip',
'version': '1.0',
'category': 'Base',
'description': """
Manage organisation structure
""",
'author': 'Smart Solotution',
'website': 'http://www.smartsolution.be',
'depends': ['orgstruct'],
'data': [
'orgstruct_partner_zip_view.xml',
],
'installable': True,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"fabian.semal@smartsolution.be"
] | fabian.semal@smartsolution.be |
a782f727f4bbde0baba6b3633f25d08a31fcf491 | 5b535c4fce945748343f3f6efba253cbd009672b | /project136.py | a25225f81dc73d57b23706ecd93aa79478c09602 | [] | no_license | MannSangoi1508/project136 | 93846c99d626d5f8f6e64553d73378915776b915 | 48c712d745678f8a324dabd6cb18e032ccddb29b | refs/heads/main | 2023-08-29T14:20:02.578723 | 2021-10-13T02:51:53 | 2021-10-13T02:51:53 | 416,567,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from data import data
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route('/')
def index():
return jsonify({
"data": data,
"message": "success"
}), 200
@app.route("/star")
def planet():
name = request.args.get("name")
star_data = next(item for item in data if item["name"] == name)
return jsonify({
"data": star_data,
"message": "success"
}), 200
if __name__ == "__main__":
app.run() | [
"noreply@github.com"
] | MannSangoi1508.noreply@github.com |
eea03d6e5f4873d9c691ecaf02857e84011b620d | b954dda666c5a0c143174ccebec02b1912caccbf | /pipeline/vehicle_reid/.ipynb_checkpoints/evaluate-checkpoint.py | 80bd3eb60bd5a0fc42954883df1c842acf0d909e | [] | no_license | ngocminhbui/ai19_track2_hcmus | f3260a593f903cdd7668a528fbfb3bf35cb56bf9 | 5ed000202c9e3106c8c49ea7b98f640dd7f6b061 | refs/heads/master | 2020-05-30T05:40:16.441919 | 2019-05-31T16:35:31 | 2019-05-31T16:35:31 | 189,566,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,621 | py | import common
import loss
from importlib import import_module
from itertools import count
import os
import h5py
import json
import numpy as np
from sklearn.metrics import average_precision_score
from shutil import rmtree
import tensorflow as tf
from shutil import rmtree
import common
import loss
from vehicle_reid.query_extention import *
from vehicle_reid.common import *
def calculate_ap(fid, pid_match, score):
val_top = np.argsort(score)[-100:][::-1]
ap = average_precision_score(pid_match[val_top], np.arange(100)) #core[val_top])
try:
k = np.where(pid_match[val_top])[0][0]
except:
print("Wrong!")
k = 100
ap = 0.0
if np.isnan(ap):
print()
print("WARNING: encountered an AP of NaN!")
print("This usually means a person only appears once.")
print("In this case, it's because of {}.".format(fid))
print("I'm excluding this person from eval and carrying on.")
print()
return ap, k
def save_test_img_index(result_folder,ques,aps, query_root):
with open(os.path.join(result_folder,"index.csv"), "w") as fo:
for i in range(len(aps)):
query_img_path = os.path.join(query_root.split("/")[-2],ques[i])
fo.write("{},{:.5f}\n".format(query_img_path,aps[i]))
return
def save_predict_results(val_top, result_folder,score, pid, fid, pid_match, gallery_pids, gallery_fids, gallery_views, gal_root):
#Missing images out of top 100:
all_imgs = np.argwhere(gallery_pids == pid)
found_imgs = val_top
missing_mask = np.isin(all_imgs,found_imgs, invert=True)
missing_imgs = all_imgs[missing_mask[:,0]][:,0]
with open(os.path.join(result_folder, fid.replace('.jpg','.txt')), "w") as fo:
for x in found_imgs:
fo.write("{:s},{:5f},{},{}\n".format(os.path.join(gal_root,gallery_fids[x]),
score[x],
pid_match[x],
gallery_views[x]))
for x in missing_imgs:
fo.write("{:s},{:5f},{},{}\n".format(os.path.join(gal_root,gallery_fids[x]),
score[x],
pid_match[x],
gallery_views[x]))
fo.close()
def save_submission(file, top100, gallery_fids):
sub = [os.path.basename(x).split('.')[0] for x in gallery_fids[top100]]
sub = [-1 if not x.isdigit() else x for x in sub]
for img_id in sub[:-1]:
file.write("{} ".format(int(img_id)))
file.write('{}\n'.format(int(sub[-1])))
def get_value_from_h5_file(h5file, dataset):
hf = h5py.File(h5file, 'r')
return hf.get(dataset)
class Evaluator:
def __init__(self, exp_root, gpu_id, batch_size = 128):
self.exp_root = exp_root
self.gpu_id = gpu_id
self.batch_size = batch_size
def do_evaluate_with_config(self,config, query_extention = True):
self.query_dataset = config.QUE_FILE
self.gallery_dataset = config.GAL_FILE
print("QUE: ",config.QUE_FILE)
print("GAL: ",config.GAL_FILE)
self.query_embeddings=os.path.join(
self.exp_root,config.QUE_EMB_FILE)
self.gallery_embeddings=os.path.join(
self.exp_root,config.GAL_EMB_FILE)
self.query_root = config.QUE_IMG_ROOT
self.gal_root = config.GAL_IMG_ROOT
self.result_folder = config.RESULTS_ROOT
self.gal_view_point = os.path.join(self.exp_root, config.GAL_VIEW_POINT)
self.que_view_point = os.path.join(self.exp_root, config.QUE_VIEW_POINT)
#Remove result folder:
if (os.path.exists(self.result_folder)):
rmtree(self.result_folder)
os.mkdir(self.result_folder)
self.load_embed_files()
self.exe_query(query_extention)
def calculate_distances(self, Q, G):
metric = 'euclidean'
batch_embs = tf.data.Dataset.from_tensor_slices(
(Q)).batch(self.batch_size).make_one_shot_iterator().get_next()
batch_distances = loss.cdist(batch_embs, G , metric=metric)
distances = np.zeros((len(Q), len(G)), np.float32)
with tf.Session() as sess:
for start_idx in count(step=self.batch_size):
try:
dist = sess.run(batch_distances)
distances[start_idx:start_idx + len(dist)] = dist
except tf.errors.OutOfRangeError:
print() # Done!
break
print(distances.shape)
return distances
def load_embed_files(self):
print("Load: ", self.query_dataset)
self.query_pids, self.query_fids, self.query_views = common.load_dataset(self.query_dataset, None)
print("Load: ", self.gallery_dataset)
self.gallery_pids, self.gallery_fids, self.gallery_views = common.load_dataset(self.gallery_dataset, None)
self.gallery_views = self.gallery_views.astype(int)
self.query_views = self.query_views.astype(int)
print("Load: ", self.query_embeddings)
with h5py.File(self.query_embeddings, 'r') as f_query:
self.query_embs = np.array(f_query['emb'])
print("Load: ", self.gallery_embeddings)
with h5py.File(self.gallery_embeddings, 'r') as f_gallery:
self.gallery_embs = np.array(f_gallery['emb'])
query_dim = self.query_embs.shape[1]
gallery_dim = self.gallery_embs.shape[1]
if query_dim != gallery_dim:
raise ValueError('Shape mismatch between query ({}) and gallery ({}) '
'dimension'.format(query_dim, gallery_dim))
print("==========================")
def select_top(self, inp_arr, selected):
for i in np.argsort(inp_arr):
if (i not in selected):
selected.append(i)
return selected, i
def select_NN(self, top1_view, tracklet_dists, k = 4):
selected = [top1_view]
selected, top2_view = self.select_top(tracklet_dists[top1_view], selected)
selected, top3_view = self.select_top(tracklet_dists[top2_view], selected)
selected, top4_view = self.select_top(tracklet_dists[top3_view], selected)
return selected
def track_re_ranking(self, top_list, tracklet_mapper, score):
tmp = score
for i, top_track in enumerate(top_list):
tmp = re_ranking_v2(tracklet_mapper[top_track], tmp, self.gallery_views, 1.0 - 0.05 * i)
return tmp
def exe_query(self, query_extention = True):
aps = []
ques = []
cmc = np.zeros(len(self.gallery_pids), dtype=np.int32)
gallery_views_id, gallery_views_count = np.unique(self.gallery_views, return_counts=True)
metric = 'euclidean'
print(self.gallery_embs.shape)
print(self.query_embs.shape)
batch_pids, batch_fids, batch_embs = tf.data.Dataset.from_tensor_slices(
(self.query_pids, self.query_fids, self.query_embs)).batch(self.batch_size).make_one_shot_iterator().get_next()
batch_distances = loss.cdist(batch_embs, self.gallery_embs, metric=metric)
self.submission_file = "track2.txt"
print("Total queries: ", len(self.query_fids))
print("Results folder: ", self.result_folder)
print("Submission file: ", self.submission_file)
dist_h5_file = "results_dists/test798x798.h5"
tracklet_dists = load_h5(dist_h5_file)
tracklet_mapper = load_h5(dist_h5_file, "mapper")
trklet_dict = {}
for i, trid in enumerate(tracklet_mapper):
trklet_dict[i] = i
with tf.Session() as sess, open(self.submission_file, "w") as f_sub:
for start_idx in count(step=self.batch_size):
try:
if (query_extention):
top1_view = tf_get_top1_view(batch_distances, self.gallery_views)
que_ext_re_ranking = tf_query_extention(top1_view, self.gallery_views, self.gallery_embs)
top1_views, distances, pids, fids = sess.run([top1_view, que_ext_re_ranking, batch_pids, batch_fids])
else:
distances, pids, fids = sess.run([batch_distances, batch_pids, batch_fids])
top1_view = np.zeros(fids.shape, dtype=int)
print('\rCalculating batch {}-{}/{}'.format( start_idx, start_idx + len(fids), len(self.query_fids)), flush=True, end='')
except tf.errors.OutOfRangeError:
print() # Done!
break
pids, fids = np.array(pids, '|U'), np.array(fids, '|U')
pid_matches = self.gallery_pids[None] == pids[:,None]
scores = 1 / (1 + distances)
for i in range(len(distances)):
fid = fids[i]
pid = pids[i]
pid_match = pid_matches[i,:]
score = scores[i]
top1_view = top1_views[i]
top1_view = trklet_dict[top1_view]
if(query_extention):
selected = self.select_NN(top1_view, tracklet_dists)
score = self.track_re_ranking(selected, tracklet_mapper, score)
top100 = np.argsort(score)[-100:][::-1]
#Save submission file
save_submission(f_sub, top100, self.gallery_fids)
#Save predict results:
save_predict_results(top100, self.result_folder,score, pid, fid, pid_match, self.gallery_pids, self.gallery_fids, self.gallery_views, self.gal_root.split("/")[-2])
#Calculate AP:
ap, k = calculate_ap(fid, pid_match, score)
cmc[k:] += 1
aps.append(ap)
ques.append(fid)
# Save index.csv
save_test_img_index(self.result_folder,ques,aps, self.query_root)
# Compute the actual cmc and mAP values
cmc = cmc / len(self.query_pids)
mean_ap = np.mean(aps)
print('mAP: {:.2%} | top-1: {:.2%} top-2: {:.2%} | top-5: {:.2%} | top-10: {:.2%}'.format(
mean_ap, cmc[0], cmc[1], cmc[4], cmc[9]))
| [
"bnminh.96@gmail.com"
] | bnminh.96@gmail.com |
7646b91fdd47af06c8ff4fe87a2cac43c2633522 | f9ddd2426a37bec716d568aa91ce9cde2adb4792 | /selentest7_1.py | 4e698c332c0a10e0004599a6801e69716e5d3cfe | [] | no_license | Vika-Domaskina/test | 2aeb76671e1d4c5bc86f5dbea330eec51ed76999 | 09fe20d2c0dcbbe8be8b0708a73ce8802921033f | refs/heads/master | 2021-01-10T15:25:36.145443 | 2016-01-19T18:54:38 | 2016-01-19T18:54:38 | 49,298,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,417 | py | import unittest, time, sys
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC #
from selenium.webdriver.common.by import By
import configparser
from selenium.webdriver.common.action_chains import ActionChains
class Braw(unittest.TestCase):
myMessXpath="//li[@id='l_msg']/a"
def __init__(self):
self.driver = webdriver.Firefox()
def click_by_xpath(self,xpath):
elem=self.driver.find_element_by_xpath(xpath)
elem.click()
return elem
def login_vk(self,login,passw):
self.driver.get("http://www.vk.com")
elem = self.click_by_xpath("//input[@id='quick_email']")
elem.send_keys(login)
elem = self.click_by_xpath("//input[@id='quick_pass']")
elem.send_keys(passw)
self.click_by_xpath("//button[@id='quick_login_button']")
def wait_by_xpath(self,xpath,time=5):
wait=WebDriverWait(self.driver,time)
#element=wait.until(EC.element_to_be_clickable((By.XPATH,xpath)))
element=wait.until(EC.presence_of_element_located((By.XPATH,xpath)))
def open_my_messages(self):
self.wait_by_xpath(self.myMessXpath)
self.click_by_xpath(self.myMessXpath)
class VkTest2(unittest.TestCase):
def setUp(self):
print("setUp")
self.driver= Braw()
self.driver2= Braw()
def test1_vk_different(self):
config = configparser.ConfigParser()
config.read('config.ini')
driver = self.driver
driver2 = self.driver2
driver.login_vk(config['Vk.com']['login1'],config['Vk.com']['paswd1'])
driver2.login_vk(config['Vk.com']['login2'],config['Vk.com']['paswd2'])
driver.open_my_messages()
driver2.open_my_messages()
driver.click_by_xpath("//li[@id='l_msg']") # self.driver == driver
driver.wait_by_xpath("//div[@id='im_bar']")
driver.driver.find_element_by_xpath("//div[@id='im_bar']")
el=driver.click_by_xpath("//div[@id='im_filter_out']")
el.send_keys(config['Vk.com']['user_name2']) # find user to write a message
driver.wait_by_xpath("//div[@id='im_friends']")
driver.click_by_xpath("//div[@id='im_friends']/div[position()=1]") # select user
driver.wait_by_xpath("//div[@id='im_controls_wrap']")
elem=driver.click_by_xpath("//div[@id='im_controls_wrap']//div[@id='im_peer_controls']/table/tbody/tr/td[@id='im_write_form']/div[@id='im_texts']") # send messages
elem.send_keys("Hi,Vika!")
self.driver.click_by_xpath("//div[@id='im_send_wrap']/button[@id='im_send']")
driver2.wait_by_xpath("//span[text()='+1']",10)
elem2=driver2.click_by_xpath(driver2.myMessXpath)
driver2.wait_by_xpath("//div[contains(@id, 'im_dialog') and contains(@class,'dialogs_new_msg')]")
elem3= driver2.click_by_xpath("//div[contains(@id, 'im_dialog') and contains(@class,'dialogs_new_msg')]") # find unread message
if len(driver2.driver.find_elements(By.XPATH,driver2.myMessXpath)) > 0:
print('find')
driver.click_by_xpath("//li[@id='l_msg']")
time.sleep(5)
driver.click_by_xpath("//div[@id='im_dialogs']//div[position()=2]") # find first dialog
text_l1=driver2.driver.find_element_by_xpath("//div[@class='im_rows im_peer_rows'and not(@style='display: none;')]//table[@class='im_log_t']/tbody/tr[last()]//div[@class='im_msg_text']").text
text_l2=driver.driver.find_element_by_xpath("//div[@class='im_rows im_peer_rows'and not(@style='display: none;')]//table[@class='im_log_t']/tbody/tr[last()]//div[@class='im_msg_text']").text
assert text_l1==text_l2,'Sended and recived messages are not equal!'
time.sleep(20)
def test2_vk_like(self):
config = configparser.ConfigParser()
config.read('config.ini')
driver = self.driver
driver2 = self.driver2
driver.login_vk(config['Vk.com']['login1'],config['Vk.com']['paswd1'])
driver2.login_vk(config['Vk.com']['login2'],config['Vk.com']['paswd2'])
time.sleep(3)
driver.click_by_xpath("//div[@id='side_bar']/ol/li[@id='l_fr']")
driver.wait_by_xpath("//div[@id='friends_search']//input[@id='s_search']")
elem=driver.click_by_xpath("//div[@id='friends_search']//input[@id='s_search']")
elem.send_keys(config['Vk.com']['user_name2']) # friends_search
driver.click_by_xpath("//div[@id='friends_search']/button[@id='invite_button']")
driver.click_by_xpath("//div[@id='list_content']/div/div[position()=1]/div[@class='info fl_l']/div[position()=1]") #friend page
driver.wait_by_xpath("//div[@id='page_body']/div[@id='wrap3']//div[@id='page_avatar']/a[@id='profile_photo_link']")
driver.click_by_xpath("//div[@id='page_body']/div[@id='wrap3']//div[@id='page_avatar']/a[@id='profile_photo_link']") #friend avatar
driver.wait_by_xpath("//div[@id='pv_wide']/div[@id='pv_like_wrap']/span[@id='pv_like_link']")
driver.driver.execute_script("window.scrollTo(0, 150)")
driver.click_by_xpath("//div[@id='pv_wide']/div[@id='pv_like_wrap']/span[@id='pv_like_link']") #like friends foto
action=webdriver.ActionChains(driver.driver) # move to list
batton_like=WebDriverWait(driver.driver, 10).until(EC.visibility_of_element_located((By.XPATH,"//div[@id='pv_wide']/div[@id='pv_like_wrap']/span[@id='pv_like_link']")))
action.move_to_element(batton_like).perform()
list_likes=WebDriverWait(driver.driver, 10).until(EC.visibility_of_element_located((By.XPATH,"//div[@class='like_head_wrap']/span[position()=1]")))
action.move_to_element(batton_like).click(list_likes).perform()
driver.wait_by_xpath("//div[@id='wk_likes_content']/div[@id='wk_likes_rows']/div/div[@class='wk_likes_liker_name']/a")
find_like_users=driver.driver.find_elements_by_xpath("//div[@id='wk_likes_content']/div[@id='wk_likes_rows']/div/div[@class='wk_likes_liker_name']/a")
name_user1=config['Vk.com']['user_name1']
for i in find_like_users:
name=i.text
if name == name_user1:
print('This user:',name,'liked foto')
break
else:
raise ValueError ('Not found user who liked foto')
driver.wait_by_xpath("//div[@id='wk_box']/a[@id='wk_close_link']")
driver.click_by_xpath("//div[@id='wk_box']/a[@id='wk_close_link']")
driver.wait_by_xpath("//div[@id='pv_wide']/div[@id='pv_like_wrap']/span[@id='pv_like_link']") #dislike foto
driver.driver.execute_script("window.scrollTo(0, 150)")
driver.click_by_xpath("//div[@id='pv_wide']/div[@id='pv_like_wrap']/span[@id='pv_like_link']")
time.sleep(10)
def tearDown(self):
print("tearDown")
self.driver.driver.close()
self.driver2.driver.close()
if __name__ == "__main__":
unittest.main() | [
"v1ktir1ya.domask1na@gmail.com"
] | v1ktir1ya.domask1na@gmail.com |
3a53cb314b58082e61aced67aaaa888078c41c10 | 4349c9bea560b094c9c84540b539b612bef40953 | /subliminal/plugins/Subtitulos.py | bcb26d7660b42c8945d9ef49c23251f6c806f8e1 | [] | no_license | fgel/subliminal | 456c263603cbe5143e6b6343930222ece9c465dc | 3cf265f6c978506d02e74c87cadd0a8e6c6419fe | refs/heads/master | 2021-01-18T09:21:04.687551 | 2011-11-11T19:37:19 | 2011-11-11T19:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,288 | py | # -*- coding: utf-8 -*-
#
# Subliminal - Subtitles, faster than your thoughts
# Copyright (c) 2008-2011 Patrick Dessalle <patrick@dessalle.be>
# Copyright (c) 2011 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of Subliminal.
#
# Subliminal is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from BeautifulSoup import BeautifulSoup
import guessit
import urllib2
import unicodedata
import re
import PluginBase
from subliminal.classes import Subtitle
class Subtitulos(PluginBase.PluginBase):
site_url = 'http://www.subtitulos.es'
site_name = 'Subtitulos'
server_url = 'http://www.subtitulos.es'
api_based = False
_plugin_languages = {u'English (US)': 'en', u'English (UK)': 'en', u'English': 'en', u'French': 'fr', u'Brazilian': 'pt-br',
u'Portuguese': 'pt', u'Español (Latinoamérica)': 'es', u'Español (España)': 'es', u'Español': 'es', u'Italian': 'it',
u'Català': 'ca'}
def __init__(self, config_dict=None):
super(Subtitulos, self).__init__(self._plugin_languages, config_dict, True)
self.release_pattern = re.compile('Versión (.+) ([0-9]+).([0-9])+ megabytes')
def list(self, filepath, languages):
possible_languages = self.possible_languages(languages)
if not possible_languages:
return []
guess = guessit.guess_file_info(filepath, 'autodetect')
if guess['type'] != 'episode':
self.logger.debug(u'Not an episode')
return []
# add multiple things to the release group set
release_group = set()
if 'releaseGroup' in guess:
release_group.add(guess['releaseGroup'].lower())
else:
if 'title' in guess:
release_group.add(guess['title'].lower())
if 'screenSize' in guess:
release_group.add(guess['screenSize'].lower())
if 'series' not in guess or len(release_group) == 0:
self.logger.debug(u'Not enough information to proceed')
return []
self.release_group = release_group # used to sort results
return self.query(guess['series'], guess['season'], guess['episodeNumber'], release_group, filepath, possible_languages)
def query(self, name, season, episode, release_group, filepath, languages):
sublinks = []
searchname = name.lower().replace(' ', '-')
if isinstance(searchname, unicode):
searchname = unicodedata.normalize('NFKD', searchname).encode('ascii','ignore')
searchurl = '%s/%s/%sx%.2d' % (self.server_url, urllib2.quote(searchname), season, episode)
self.logger.debug(u'Searching in %s' % searchurl)
try:
req = urllib2.Request(searchurl, headers={'User-Agent': self.user_agent})
page = urllib2.urlopen(req, timeout=self.timeout)
except urllib2.HTTPError as inst:
self.logger.info(u'Error: %s - %s' % (searchurl, inst))
return []
except urllib2.URLError as inst:
self.logger.info(u'TimeOut: %s' % inst)
return []
soup = BeautifulSoup(page.read())
for subs in soup('div', {'id': 'version'}):
version = subs.find('p', {'class': 'title-sub'})
sub_teams = self.listTeams([self.release_pattern.search('%s' % version.contents[1]).group(1).lower()], ['.', '_', ' ', '/', '-'])
self.logger.debug(u'Team from website: %s' % sub_teams)
self.logger.debug(u'Team from file: %s' % release_group)
if not release_group.intersection(sub_teams): # On wrong team
continue
for html_language in subs.findAllNext('ul', {'class': 'sslist'}):
sub_language = self.getRevertLanguage(html_language.findNext('li', {'class': 'li-idioma'}).find('strong').contents[0].string.strip())
if not sub_language in languages: # On wrong language
continue
html_status = html_language.findNext('li', {'class': 'li-estado green'})
sub_status = html_status.contents[0].string.strip()
if not sub_status == 'Completado': # On not completed subtitles
continue
sub_link = html_status.findNext('span', {'class': 'descargar green'}).find('a')['href']
result = Subtitle(filepath, self.getSubtitlePath(filepath, sub_language), self.__class__.__name__, sub_language, sub_link, teams=sub_teams)
sublinks.append(result)
sublinks.sort(self._cmpReleaseGroup)
return sublinks
def download(self, subtitle):
self.downloadFile(subtitle.link, subtitle.path)
return subtitle
| [
"diaoulael@gmail.com"
] | diaoulael@gmail.com |
91f79fb9fb400ec780afd516d32f2506c0c22632 | e0ef2b9fbababa5b5aee11969ff5abd7092671a9 | /LC5 - Longest Palindromic Subsequence.py | 01a1f7288b49b60d7e053f43a199ee19d015a2e6 | [
"CC0-1.0"
] | permissive | karthyvenky/LeetCode-Challenges | fc87898305e179d20fee9c9f932c82ae2ba512fc | 7015c741db3d84917915d6989669f9ffcaad9bd2 | refs/heads/main | 2023-05-07T10:13:27.385210 | 2021-02-19T00:36:40 | 2021-02-19T00:36:40 | 340,215,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | #%%
#Subsequence
def subsequence(string):
if len(string) == 0:
return " "
parts = []
for part in subsequence(string[1:]):
parts.append(part)
parts.append(string[0]+ part)
return parts
# %%
subsequence("NET")
# %%
# Longest Common Subsequence - by recursion
def LCS_recursion(X,Y):
if len(X) == 0 or len(Y) == 0:
return 0
for i in range(len(X)):
for j in range(len(Y)):
if X[i] == Y[j]:
return 1 + LCS_recursion(X[i+1:], Y[j+1:])
else:
return max(LCS_recursion(X[i+1:],Y[j:]), LCS_recursion(X[i:], Y[j+1:]))
# %%
X = "AGGTAB"
Y = "GXTXAYB"
LCS_recursion(X,Y)
# %%
# Longest Common Subsequence - by memoization
def LCS_memoization(X,Y,T):
if len(X) == 0 or len(Y) == 0:
return 0
for i in range(len(X)):
for j in range(len(Y)):
if X[i] == Y[j]:
T[i][j] = 1 + LCS_memoization(X[i+1:], Y[j+1:], T)
else:
T[i][j] = max(LCS_memoization(X[i+1:],Y[j:],T), LCS_memoization(X[i:], Y[j+1:],T))
return T[i][j]
# %%
X = "AGGTAB"
Y = "GXTXAYB"
rows = len(X)
cols = len(Y)
T = [[0 for i in range(cols)] for j in range(rows)]
LCS_memoization(X,Y,T)
# %%
#Longest common subsequence - Dynamic Programming
def LCS_Dynamic(X, Y):
X = " " + X # add additional space for padding
Y = " " + Y
rows = len(X)
cols = len(Y)
T = [[0 for i in range(cols)] for j in range(rows)]
maxlen = 0
for i in range(1, rows):
for j in range(1, cols):
if X[i] == Y[j]:
T[i][j] = 1 + T[i-1][j-1]
else:
T[i][j] = max(T[i-1][j], T[i][j-1])
maxlen = T[rows-1][cols-1]
substr = ""
for i in range(1,rows):
for j in range(1,cols):
if T[i][j] > T[i-1][j] and T[i][j] > T[i][j-1] \
and T[i][j] == T[i-1][j-1]+1:
substr += X[i]
print(maxlen, substr)
for v in T:
print(v)
# %%
X = "abcdef"
Y = "pqrbrceuf"
LCS_Dynamic(X,Y)
# %%
# Longest Palindromic subsequence
X = "aaaabbaa"
Y = X[::-1]
LCS_Palindromic_Dynamic(X,Y)
# %%
#Longest palindromic subsequence - Dynamic Programming
#Using longest common subsequence method
def LCS_Palindromic_Dynamic(X, Y):
X = " " + X # add additional space for padding
Y = " " + Y
rows = len(X)
cols = len(Y)
T = [[0 for i in range(cols)] for j in range(rows)]
maxlen = 0
for i in range(1, rows):
for j in range(1, cols):
if X[i] == Y[j]:
T[i][j] = 1 + T[i-1][j-1]
else:
T[i][j] = max(T[i-1][j], T[i][j-1])
maxlen = T[rows-1][cols-1]
substr = ""
for i in range(1,rows):
if T[i][] == T[i-1][j-1]+1:
substr += X[i]
print(maxlen, substr)
for v in T:
print(v) | [
"noreply@github.com"
] | karthyvenky.noreply@github.com |
6ec582b45c915e09dd744f84899c6718fc1c86f7 | f6c6e0ebc18b7b1a28c23367f62c960e86194c88 | /fileIO/hdf5/backup/plot_h5.py | 0190bfb940e43963a0d6738e3a4fb4c64a2e2b2f | [] | no_license | TheGrim1/python_work | 9316d6fbb71a4be9bd901f104e939949dfd91174 | 5b34277aed4c06b62276644160e0aa97a4260233 | refs/heads/master | 2021-01-11T13:54:54.366575 | 2019-03-12T12:38:39 | 2019-03-12T12:38:39 | 94,876,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | from __future__ import print_function
# home: /data/id13/inhouse2/AJ/skript/fileIO/hdf5/open_h5.py
# global imports
import h5py
import sys, os
import matplotlib.pyplot as plt
import time
# local imports
from open_h5 import open_h5
def plot_h5(data,
index=0,
title = "Title"):
dimension = len(data.shape)
print("dimension of data to be plotted is %s" % dimension)
if dimension == 3:
plt.imshow(data[index,:,:], interpolation = 'none')
elif dimension == 2:
plt.imshow(data[:,:], interpolation = 'none')
elif dimension not in (2,3):
print("invalid data for plotting \ntitle : %s\n%s" % (title, dimension))
# plt.clim(0,0.001)
plt.show()
plt.title(title)
def plotmany_h5(data,index):
ax1 = plt.subplot(1,1,1,axisbg = (0.9, 0.9, 0.95))
ax1.figure.set_size_inches(10,10)
title = "Plotting image no %s of %s"
ax1.title(title % (0, 0))
ax1.ion()
if dimension == 3:
toplot = data[index,:,:]
elif dimension == 2:
toplot = data[:,:]
elif dimension not in (2,3):
print("invalid data for plotting \ntitle : %s\n%s" % (title, dimension))
ax1.pcolor(toplot,norm=LogNorm(vmin=max(data.min(),0.0001),vmax=max(data.max(),0.01)), cmap='PuBu')
nimages = data.shape[0]
plt.show()
def main(args):
'also does the plotting'
for fname in args:
data = open_h5(fname, framelist = [529,530,532], threshold = 5000000)
# print 'the data shape is:'
# print data.shape
if data.ndim != 2:
plotmany_h5(data)
else:
plot_h5(data, title = os.path.basename(fname))
if __name__ == '__main__':
usage =""" \n1) python <thisfile.py> <arg1> <arg2> etc.
\n2) python <thisfile.py> -f <file containing args as lines>
\n3) find <*yoursearch* -> arg1 etc.> | python <thisfile.py>
"""
args = []
if len(sys.argv) > 1:
if sys.argv[1].find("-f")!= -1:
f = open(sys.argv[2])
for line in f:
args.append(line.rstrip())
else:
args=sys.argv[1:]
else:
f = sys.stdin
for line in f:
args.append(line.rstrip())
# print args
main(args)
| [
"opid13@nanofocus.esrf.fr"
] | opid13@nanofocus.esrf.fr |
bca28e38fcd7944d329ae326037eaa937382d563 | 032144d039ead151804d56910a4059d0fc48c374 | /civpy/structures/node_load.py | f1c958aa95696c84c41c95e1f4f0d620e2bcf427 | [
"BSD-3-Clause"
] | permissive | mpewsey/civpy | 038f5bd4a22971864cceb7c4f9568fdcca40a147 | bbf74b1c04ca9f7604831f5280cc80d796240e67 | refs/heads/master | 2022-02-26T06:45:40.087975 | 2019-05-05T04:48:47 | 2019-05-05T04:48:47 | 170,751,326 | 16 | 3 | BSD-3-Clause | 2022-02-11T02:30:16 | 2019-02-14T20:09:32 | Python | UTF-8 | Python | false | false | 5,178 | py | """
Copyright (c) 2019, Matt Pewsey
"""
import weakref
import numpy as np
__all__ = ['NodeLoad']
class NodeLoad(np.ndarray):
"""
A class representing a load applied to a node.
Parameters
----------
node : str
The name of the node to which the load will be applied.
fx, fy, fz : float
The applied global node forces.
mx, my, mz : float
The applied global moments.
dx, dy, dz : float
The applied node deflections.
rx, ry, rz : float
The applied node rotations.
"""
def __new__(cls, node, fx=0, fy=0, fz=0, mx=0, my=0, mz=0,
dx=0, dy=0, dz=0, rx=0, ry=0, rz=0):
obj = np.array([fx, fy, fz, mx, my, mz,
dx, dy, dz, rx, ry, rz], dtype='float').view(cls)
obj.node = node
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.node = getattr(obj, 'node', '')
self.node_ref = None
def node():
def fget(self):
return self._node
def fset(self, value):
if not isinstance(value, str):
value = str(value)
self._node = value
def fdel(self):
del self._node
return locals()
node = property(**node())
def node_ref():
def fget(self):
value = self._node_ref
if value is None:
return value
return value()
def fset(self, value):
if value is not None:
value = weakref.ref(value)
self._node_ref = value
def fdel(self):
del self._node_ref
return locals()
node_ref = property(**node_ref())
def fx():
def fget(self):
return self[0]
def fset(self, value):
self[0] = value
return locals()
fx = property(**fx())
def fy():
def fget(self):
return self[1]
def fset(self, value):
self[1] = value
return locals()
fy = property(**fy())
def fz():
def fget(self):
return self[2]
def fset(self, value):
self[2] = value
return locals()
fz = property(**fz())
def mx():
def fget(self):
return self[3]
def fset(self, value):
self[3] = value
return locals()
mx = property(**mx())
def my():
def fget(self):
return self[4]
def fset(self, value):
self[4] = value
return locals()
my = property(**my())
def mz():
def fget(self):
return self[5]
def fset(self, value):
self[5] = value
return locals()
mz = property(**mz())
def dx():
def fget(self):
return self[6]
def fset(self, value):
self[6] = value
def fdel(self):
del self._dx
return locals()
dx = property(**dx())
def dy():
def fget(self):
return self[7]
def fset(self, value):
self[7] = value
def fdel(self):
del self._dy
return locals()
dy = property(**dy())
def dz():
def fget(self):
return self[8]
def fset(self, value):
self[8] = value
def fdel(self):
del self._dz
return locals()
dz = property(**dz())
def rx():
def fget(self):
return self[9]
def fset(self, value):
self[9] = value
def fdel(self):
del self._rx
return locals()
rx = property(**rx())
def ry():
def fget(self):
return self[10]
def fset(self, value):
self[10] = value
def fdel(self):
del self._ry
return locals()
ry = property(**ry())
def rz():
def fget(self):
return self[11]
def fset(self, value):
self[11] = value
def fdel(self):
del self._rz
return locals()
rz = property(**rz())
def __repr__(self):
s = [
'node={!r}'.format(self.node),
'forces={!r}'.format((self.fx, self.fy, self.fz)),
'moments={!r}'.format((self.mx, self.my, self.mz)),
'defl={!r}'.format((self.dx, self.dy, self.dz)),
'rot={!r}'.format((self.rx, self.ry, self.rz))
]
return '{}({})'.format(type(self).__name__, ', '.join(s))
def forces(self):
"""Returns the applied force and moment matrix."""
return self[:6]
def deflections(self):
"""Returns the applied deflection and rotation matrix."""
return self[6:]
def get_node(self):
"""Gets the referenced node."""
if self.node_ref is None:
raise ValueError('Node has not been set.')
return self.node_ref
def set_node(self, ndict):
"""
Sets the node reference.
Parameters
----------
ndict : dict
A dictionary mapping node names to node objects.
"""
self.node_ref = ndict[self.node]
| [
"mattpewsey@gmail.com"
] | mattpewsey@gmail.com |
7996b0260e16f6a5f2b53bf673c0e97d691983cd | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/39/usersdata/136/13246/submittedfiles/dec2bin.py | f1bd3e0804d034f78294918ab7ae45017384299e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | n = int(input("Digite um número decimal:"))
i = 0
j = 1
d = n%2
while n>0:
d = n%2
n = n/2
i = i+(d*j)
j = j*10
print("%d"%i) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b9f65ae6c1eeeaf446d14f71ca47f60802fcc57b | 0639b9986cfdff6ccaf883388282fce45c57c08a | /util.py | 5625a7f50a7ceaf80b333511ae870b805b910068 | [
"MIT"
] | permissive | masterLei/alkaid | 159b27f0285693f664766b33af8264a62e424468 | 2548f46826e2ad58ab1787dfac29314b4aaad8cb | refs/heads/master | 2021-08-24T15:49:18.155786 | 2017-12-10T09:19:40 | 2017-12-10T09:19:40 | 113,736,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | #encoding:utf-8
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import tushare as ts
import datetime
import numpy as np
import pandas as pd
from matplotlib.dates import date2num
df = ts.get_hist_data('601318', ktype='D', start='2017-01-01')
df = df.sort_index(axis=0,ascending=True)
i = 0
result = pd.DataFrame(columns=('date','price','label'))
for index,row in df.iterrows():
if(i == 4):
array = df.iloc[0:i, 1]#low
ind = array.argmin()
result.loc[0] = {'date':ind,'price':array[ind],'label':'low'}
if(i!= 0 and i%5 == 0 and i!=4):
array = df.iloc[i-5:i,1]
if result.iloc[len(result) - 1].label == 'low':
ind = array.argmax()
result.loc[len(result)] = {'date': ind, 'price': array[ind], 'label': 'high'}
else:
ind = array.argmin()
result.loc[len(result)] = {'date': ind, 'price': array[ind], 'label': 'low'}
i +=1
times = []
lows = []
for index,row in result.iterrows():
date_time = datetime.datetime.strptime(row.date, '%Y-%m-%d')
t = date2num(date_time)
times.append(t)
lows.append(row.price)
print("result",result)
x = np.array(times)
y = np.array(lows)
plt.gca().xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d'))
plt.plot(x,y)
plt.show()
| [
"515051058@qq.com"
] | 515051058@qq.com |
e499d64a713706ac756077b6e2c5e825b687d3c7 | ff941fe046a593189050b6fdf77d44ade0925e8a | /lesson_python_basics/default_parameters2.py | 7a862b12f45c6e47912aa620b568a03c7691b53e | [] | no_license | hamk-webdev-intip19x6/petrikuittinen_assignments | c0dd02d3465ebf29f4387ab2805b12858c22110b | 68dc154fbc571d8bc85f8eec0130b49e143c1e51 | refs/heads/master | 2021-11-11T04:08:45.963836 | 2021-10-04T15:47:03 | 2021-10-04T15:47:03 | 233,399,492 | 0 | 9 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | def ask(question, choices, correct, retries=2):
i = 1
print(question)
for c in choices:
print(i, c)
i += 1
while retries>0:
try:
guess = int(input("?"))
except ValueError:
continue
if guess==correct:
print("right")
break
print("wrong guess")
retries -= 1
else:
print("the correct reply was", correct, choices[correct-1])
ask("What is the capital of Australia?", \
("London", "Sydney", "Canberra", "Victoria"), 3)
ask("When Finland gained independence?", \
("1900", "1917", "1919", "1939"), 2, 1)
ask(question="What is the chemical symbol of Iron?", \
correct=1, choices=("Fe", "R", "Ir", "I"))
ask("How to delete a variable in Python?", \
("delete", "del", "remove", "destroy"), \
retries=3, correct=2)
| [
"pkuittinen@ubuntupetrikuittinen.xbp5jv35e4rujjrlarqjj32eqf.fx.internal.cloudapp.net"
] | pkuittinen@ubuntupetrikuittinen.xbp5jv35e4rujjrlarqjj32eqf.fx.internal.cloudapp.net |
e5ad761d2f6d6b27a19e5e97fac519795356918e | e37695acd4e1090165d517ec6822834826240e0a | /muBot/acqCPE_texp_CAR.py | 8fd71dbe74d256ca080c48fddcf42f38a37b41a3 | [] | no_license | LouCimmino/muStang | 94fe92227746549a7793de22f67ed61fd03eb6a8 | c89dfc90db7d77318db7a9182cbdb87652dd5422 | refs/heads/master | 2020-08-06T01:44:29.828307 | 2019-10-30T17:57:47 | 2019-10-30T17:57:47 | 212,586,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,946 | py | import sys
import os
import subprocess
import time
import zmq
import curses
#skList = ['2', '3', '13', '9', '5', '0', '17', '7', '8', '12', '4', '1']
skList = [sys.argv[1], '17']
dac10List = ['600','600']
zero = []
unoA = []
unoB = ['17',sys.argv[1]]
for Sk in skList :
inputF = open('Core/EASI_Probe.txt', 'r')
outputF = open('Conf/EASI_Probe_' + Sk + '.txt', 'w')
s = inputF.readline()
y = int(Sk)<<10
w = "{:04x}".format(256+y)
outputF.write(w + '\n')
while s:
s = inputF.readline()
outputF.write(s)
inputF.close()
outputF.close()
inputF = open('Core/EASI_Hold_Pot_46ns.txt', 'r')
outputF = open('Conf/EASI_Hold_Pot_46ns_' + Sk + '.txt', 'w')
s = inputF.readline()
y = int(Sk)<<10
w = "{:04x}".format(736+y)
outputF.write(w + '\n')
s = inputF.readline()
y = int(sys.argv[2])
ht = "{:04x}".format(255-y)
outputF.write(ht + '\n')
while s:
s = inputF.readline()
outputF.write(s)
inputF.close()
outputF.close()
inputF = open('Core/EASI_TimeOut_Pot_300ns.txt', 'r')
outputF = open('Conf/EASI_TimeOut_Pot_300ns_' + Sk + '.txt', 'w')
s = inputF.readline()
y = int(Sk)<<10
w = "{:04x}".format(704+y)
outputF.write(w + '\n')
s = inputF.readline()
y = int(sys.argv[3])
to = "{:04x}".format(255-y)
outputF.write(to + '\n')
while s:
s = inputF.readline()
outputF.write(s)
inputF.close()
outputF.close()
subprocess.call("./Reset")
for dac10 in range (0,1,1):
dac8 = 255
while (dac8 >= 255):
print ('----------------------------')
print ('DAC8 value : ' + str(dac8))
print ('DAC10 value : ' + str(dac10))
print ('----------------------------')
inputF = open('EASIprog.c', 'r')
outputF = open('EASIprog.out', 'w')
s = inputF.readline()
while s:
outputF.write(s)
#if ('//DAC8' in s):
# outputF.write('for (i=0; i<32; i++) error = DACbiasSC_EASI(SC_EASI, i, ' + str(dac8) + ');')
# outputF.write('\n')
# s = inputF.readline()
if ('//DAC10' in s) :
outputF.write('\tDAC10thrsSC_EASI(SC_EASI,' + str(dac10) +');\n')
s = inputF.readline()
s = inputF.readline()
inputF.close()
outputF.close()
arg = ["mv", "EASIprog.out", "EASIprog.c"]
subprocess.call(arg)
arg = ["gcc", "-O2", "EASIprog.c", "libreriaSC_EASI.c", "-o", "EASIprog"]
subprocess.call(arg)
subprocess.call("./EASIprog")
subprocess.call('./Init')
for Sk in skList :
arg = ["./ResetSlave", Sk]
subprocess.call(arg)
skCounter = 0
for Sk in skList :
inputF = open('Core/EASI_Slow_Control.txt', 'r')
outputF = open('Conf/EASI_Slow_Control_' + Sk + '.txt', 'w')
s = inputF.readline()
y = int(Sk)<<10
w = "{:04x}".format(224 + y)
outputF.write(w + '\n')
s = inputF.readline()
outputF.write(s)
s = inputF.readline()
y = int(dac10List[skCounter])<<2
w = "{:04x}".format(61443 + y)
outputF.write(w + '\n')
while s:
s = inputF.readline()
outputF.write(s)
inputF.close()
outputF.close()
skCounter = skCounter + 1
for Sk in skList :
subprocess.call('./Init')
arg = ["./SendFSlaves", "Conf/EASI_Probe_" + Sk + ".txt"]
subprocess.call(arg)
arg = ["./SendFSlaves", "Conf/EASI_Hold_Pot_46ns_" + Sk + ".txt"]
subprocess.call(arg)
arg = ["./SendFSlaves", "Conf/EASI_TimeOut_Pot_300ns_" + Sk + ".txt"]
subprocess.call(arg)
arg = ["./SendFSlaves", "Conf/EASI_Slow_Control_" + Sk + ".txt"]
subprocess.call(arg)
print ('\nConfiguring Triggers...')
arg = ["./SendFMaster", "MasterCMD/EN_MUX0_S.txt"]
subprocess.call(arg)
arg = ["./SendFMaster", "MasterCMD/EN_MUX1_S.txt"]
subprocess.call(arg)
arg = ["./SendFMaster", "MasterCMD/EN_MUX2_S.txt"]
subprocess.call(arg)
arg = ["./SendFMaster", "MasterCMD/EN_MUX3_S.txt"]
subprocess.call(arg)
arg = ["./SendFMaster", "MasterCMD/EN_MUX4_S.txt"]
subprocess.call(arg)
subprocess.call('./Reset')
runCounter = 0
evts = 1000
numEvts = 5
#pedEvts = 10000
print("Ready to go!\n")
#while (runCounter<int(numRuns)):
subprocess.call("./Reset")
adcCounter = 0
print("Run " + str(runCounter) + " :: Now reading")
while(adcCounter < numEvts):
outputF = open('/home/DatiTB/slaveData', 'w')
outputF.write(str(int(round(time.time()*1000))) + '\n')
outputF.close()
adcCounter = adcCounter + 1
arg = ['./ReadSlave', str(evts) , skList[0], skList[1]]
subprocess.call(arg)
outputF = open('/home/DatiTB/slaveData', 'a')
outputF.write(str(int(round(time.time()*1000))))
outputF.close()
arg = ['mv', '/home/DatiTB/slaveData', '/home/texp/slaveData_punti' + str(numEvts) + '_Sk' + skList[0] + '_ns' + str(adcCounter+3)]
subprocess.call(arg)
input('!!! Change Delay and Invio to go! Ctrl+C to exit !!!')
dac8 = dac8 - 64
if (dac8 == -1):
dac8 = 0
print ('\n--- Shutting Down System\n')
| [
"lcimmin@gmail.com"
] | lcimmin@gmail.com |
74a7db8e6aecf3838c5580179296a816ce6d3c0f | 771ac2901a1c14745093a90ea272df9bb1e5a2ef | /manage.py | 9cb650fc07101436e8ee94d793ea95e03d923ae3 | [] | no_license | miraiakagawa/pinger | 47a59bc5e8e3e012c0dbc28a65fd617984941578 | 37916cd4987367f51986a732f999f62c27507218 | refs/heads/master | 2022-12-10T13:03:13.829711 | 2013-09-11T18:16:33 | 2013-09-11T18:16:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pinger.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"mirai418@me.com"
] | mirai418@me.com |
1b7ef58ff701c085ead9ee493e21e53abfca2806 | 250b997d715c168315a927e28124cf24c77048c0 | /python3基础/9.Python修炼第九层/day9预习/11 生产者消费者模型.py | c20f15d6206f6793cd0c499148df156b6e894097 | [] | no_license | cuitianfeng/Python | c78077e5dcad01ee5fe44c0aa8b61bbc2fa388cf | 9c9f10f13311116ce0bc60ec128f765ff2ca3078 | refs/heads/master | 2023-01-10T23:25:57.158141 | 2020-11-17T15:39:36 | 2020-11-17T15:39:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | # from multiprocessing import Process,Queue
# import time,os
# def producer(q,name):
# for i in range(3):
# time.sleep(1)
# res='%s%s' %(name,i)
# q.put(res)
# print('\033[45m<%s> 生产了 [%s]\033[0m' %(os.getpid(),res))
#
#
# def consumer(q):
# while True:
# res=q.get()
# if res is None:break
# time.sleep(1.5)
# print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
#
# if __name__ == '__main__':
# q=Queue()
# #生产者们:即厨师们
# p1=Process(target=producer,args=(q,'包子'))
# p2=Process(target=producer,args=(q,'饺子'))
# p3=Process(target=producer,args=(q,'馄饨'))
#
# #消费者们:即吃货们
# c1=Process(target=consumer,args=(q,))
# c2=Process(target=consumer,args=(q,))
#
# p1.start()
# p2.start()
# p3.start()
# c1.start()
# c2.start()
#
# p1.join()
# p2.join()
# p3.join()
# q.put(None)
# q.put(None)
#
# print('主')
# from multiprocessing import Process, JoinableQueue
# import time, os
#
#
# def producer(q, name):
# for i in range(3):
# time.sleep(1)
# res = '%s%s' % (name, i)
# q.put(res)
# print('\033[45m<%s> 生产了 [%s]\033[0m' % (os.getpid(), res))
# q.join()
#
# def consumer(q):
# while True:
# res = q.get()
# time.sleep(1.5)
# print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
# q.task_done()
#
# if __name__ == '__main__':
# q = JoinableQueue()
#
# # 生产者们:即厨师们
# p1 = Process(target=producer, args=(q, '包子'))
# p2 = Process(target=producer, args=(q, '饺子'))
# p3 = Process(target=producer, args=(q, '馄饨'))
#
# # 消费者们:即吃货们
# c1 = Process(target=consumer, args=(q,))
# c2 = Process(target=consumer, args=(q,))
#
# c1.daemon=True
# c2.daemon=True
# p1.start()
# p2.start()
# p3.start()
# c1.start()
# c2.start()
#
#
# p1.join()
#
# print('主')
# -----------------
# from multiprocessing import Process,Queue
# import time
# import os
#
# def producer(q,name):
# for i in range(3):
# time.sleep(1)
# res='%s%s' %(name,i)
# q.put(res)
# print('\033[45m<%s> 生产了 [%s]\033[0m' %(os.getpid(),res))
#
#
# def consumer(q):
# while True:
# res=q.get()
# if res is None:break
# time.sleep(1.5)
# print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
#
# if __name__ == '__main__':
# q=Queue()
# #生产者们:即厨师们
# p1=Process(target=producer,args=(q,'包子'))
# p2=Process(target=producer,args=(q,'饺子'))
# p3=Process(target=producer,args=(q,'馄饨'))
#
# #消费者们:即吃货们
# c1=Process(target=consumer,args=(q,))
# c2=Process(target=consumer,args=(q,))
#
# p1.start()
# p2.start()
# p3.start()
#
# c1.start()
# c2.start()
#
# p1.join()
# p2.join()
# p3.join()
# q.put(None)
# q.put(None)
#
# print('主')
#
from multiprocessing import Process,JoinableQueue
import time
import os
def producer(q,name):
for i in range(3):
time.sleep(1)
res='%s%s' %(name,i)
q.put(res)
print('\033[45m<%s> 生产了 [%s]\033[0m' %(os.getpid(),res))
q.join()
def consumer(q):
while True:
res=q.get()
time.sleep(1.5)
print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
q.task_done()
if __name__ == '__main__':
q=JoinableQueue()
#生产者们:即厨师们
p1=Process(target=producer,args=(q,'包子'))
p2=Process(target=producer,args=(q,'饺子'))
p3=Process(target=producer,args=(q,'馄饨'))
#消费者们:即吃货们
c1=Process(target=consumer,args=(q,))
c2=Process(target=consumer,args=(q,))
c1.daemon=True
c2.daemon=True
p1.start()
p2.start()
p3.start()
c1.start()
c2.start()
p1.join()
p2.join()
p3.join()
print('主')
| [
"zhang.hongyang@mydreamplus.com"
] | zhang.hongyang@mydreamplus.com |
56e5c49696fd2ab9d295abcdc27255368e6e7461 | ea83d172b211dad5a5d3680a537e4d2d538f42d9 | /week2_priority_queues_and_disjoint_sets/1_make_heap/test_build_heap_cases.py | a7a62dbb8dc4ded4e681d2d59664d2f5a4b798b0 | [] | no_license | FluffyFu/UCSD_Algorithms_Course_2 | 9e17e696be14b70da0d221802e4fb8527aeab0aa | f56aeee174f89cebffe5df6abb3930bda1fd4709 | refs/heads/master | 2022-12-07T00:38:28.499483 | 2020-08-27T15:39:36 | 2020-08-27T15:39:36 | 285,307,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from build_heap import build_heap
with open('tests/04') as f:
n = int(f.readline())
data = list(map(int, f.readline().split()))
with open('tests/04.a') as f:
n_swaps = int(f.readline())
results = []
for line in f.readlines():
results.append(tuple(map(int, line.split())))
my_results = build_heap(data)
# my_results = [(b, a) for a, b in my_results]
assert my_results == results, 'my results len: {}, truth len: {}'.format(
len(my_results), len(results))
print('my_results: ', my_results[:10])
print('truth: ', results[:10])
| [
"fluffyfu400@gmail.com"
] | fluffyfu400@gmail.com |
13be3ecc964be4550e4fc545cc9a1bc72f317b82 | dd6971bd42253e94336a4b84ecc7dfc3bce27b4a | /src/api/migrations/0004_alter_analysisticket_task_id.py | eae8b098e1553ce62f3c7a7cd37ad40bde499b41 | [
"MIT"
] | permissive | Codes-sources-de-la-Justice/TRISTAN | 13217f2dfa2873e810e4bcee177911b0e584956f | 76b0f33b888dea5b5bd5bf46555d6abfc93bb594 | refs/heads/master | 2023-07-16T17:28:55.027779 | 2022-07-07T13:51:29 | 2022-07-07T13:51:29 | 432,159,229 | 1 | 0 | MIT | 2023-03-08T03:01:10 | 2021-11-26T11:45:53 | Nix | UTF-8 | Python | false | false | 401 | py | # Generated by Django 3.2.8 on 2021-10-20 16:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20211020_1600'),
]
operations = [
migrations.AlterField(
model_name='analysisticket',
name='task_id',
field=models.UUIDField(default=None, null=True),
),
]
| [
"ryan.lahfa@justice.gouv.fr"
] | ryan.lahfa@justice.gouv.fr |
52711e1807d241e44a9416e4f87c838a799444fb | 44956e0d322f97ea26a96a4be8f1d0b39dc5f0c7 | /ejercicios_basicos/Ejercicio1.py | f9c0cf9bc8c7a0f851c4e1f2ddefc24d7d87c9aa | [] | no_license | landrea-velez/pyton-45 | 3c4c86761d383ba478402dcbd5ec2b1a5156b90e | ff00c14389fd05ce42a14048453ce9cad5cd51fd | refs/heads/main | 2023-08-19T23:59:16.786144 | 2021-10-27T13:33:24 | 2021-10-27T13:33:24 | 350,850,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | # Ejercicio 1: Escribir un programa que muestre por pantalla la cadena ¡Hola Mundo!.
print("¡Hola mundo!")
| [
"landrea.velez@gmail.com"
] | landrea.velez@gmail.com |
48c1ad6412f958ebfe1d30c7dfd44939152eb862 | e2ae714619949c8210ed52b722cb5023d786f305 | /25-reverse-nodes-in-k-group.py | e3dc991be3fcdc07e46870ac5b846c54c4bc0878 | [] | no_license | nanli-7/algorithms | ce7c0bf2368834c12843cc004405b55f7d23bf36 | 6bee015dac47603253018fd773920e62b29f3f20 | refs/heads/master | 2020-04-15T23:12:37.691828 | 2019-07-06T20:54:55 | 2019-07-06T20:54:55 | 165,097,698 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | """ 25. Reverse Nodes in k-Group - Hard
Topic: linked list
Related: 24. Swap Nodes in Pairs - Medium
Given a linked list, reverse the nodes of a linked list k at a time and return
its modified list.
k is a positive integer and is less than or equal to the length of the linked
list. If the number of nodes is not a multiple of k then left-out nodes in the
end should remain as it is.
Example:
Given this linked list: 1->2->3->4->5
For k = 2, you should return: 2->1->4->3->5
For k = 3, you should return: 3->2->1->4->5
Note:
Only constant extra memory is allowed.
You may not alter the values in the list's nodes, only nodes itself may be changed.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, self.next)
class Solution(object):
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
| [
"nanli-7@users.noreply.github.com"
] | nanli-7@users.noreply.github.com |
69d26ec03967cdb395829e2ec32f85cc823c399d | 96c17d18e5f04134e3865078700b7438d5ed66f7 | /tests/test_header.py | 7b4020ac91db01237e65e5e9e2738f1ce33a2055 | [
"MIT"
] | permissive | PSSST-Protocol/pypssst | 5e8defaf8b776e7db1d80a3c9d5a5ba0cf0464b9 | ade5b2d3d8964d628ce6e74aa92ffcb2de6a538c | refs/heads/master | 2022-12-10T16:46:10.541904 | 2020-09-13T21:53:49 | 2020-09-13T21:53:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import pssst
def test_make_header():
hdr = pssst.Header()
assert not hdr.reply, "Default reply flag not false"
assert not hdr.client_auth, "Default client_auth flag not false"
assert hdr.cipher_suite == pssst.CipherSuite.NONE, "Default cipher suite not NONE"
def test_header_accessors():
hdr = pssst.Header()
hdr.reply = True
assert hdr.reply, "reply flag not True"
hdr.reply = False
assert not hdr.reply, "reply flag not False"
hdr.client_auth = True
assert hdr.client_auth, "client_auth flag not True"
hdr.client_auth = False
assert not hdr.client_auth, "client_auth flag not False"
hdr.cipher_suite = pssst.CipherSuite.X25519_AESGCM128
assert hdr.cipher_suite == pssst.CipherSuite.X25519_AESGCM128, "Failed to set cipher suite"
def _check_bytes(hdr):
h1_bytes = hdr.packet_bytes
h2 = pssst.Header.from_packet(h1_bytes)
h2_bytes = h2.packet_bytes
assert h1_bytes == h2_bytes, "Header bytes failed round-trip"
def test_header_bytes():
hdr = pssst.Header()
_check_bytes(hdr)
hdr = pssst.Header(cipher_suite=pssst.CipherSuite.X25519_AESGCM128)
_check_bytes(hdr)
hdr = pssst.Header(reply=True)
_check_bytes(hdr)
hdr = pssst.Header(client_auth=True)
_check_bytes(hdr)
def test_repr():
hdr = pssst.Header()
assert repr(hdr) == "Header(cipher_suite=CipherSuite.NONE, reply=False, client_auth=False)", "String representation incorrect"
| [
"nicko@nicko.org"
] | nicko@nicko.org |
a2226ea5ebc150cb9ce008821713b63f430c45dd | 909b2554166c20759fdfbf2f6295f4c7ab51116b | /58.py | 50b421caf738dd48a17a54a7ff90101aec55169f | [] | no_license | emil79/project-euler | da790bbde7b6c42fd34b7f841aa52d4947ac86da | f63e3ea0acb273202037fc5a8b4f85ef86bb2fc5 | refs/heads/master | 2016-09-06T18:53:55.515215 | 2013-07-28T22:20:09 | 2013-07-28T22:20:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from math import sqrt
def is_prime(n):
return not any(n % i == 0 for i in range(2, int(sqrt(n) + 1)))
prime_count, total = 8, 13
i = 7
while prime_count * 1.0 / total >= 0.1:
i += 2
prime_count += sum(1 for j in range(1, 4)
if is_prime(i ** 2 - (i - 1) * j))
total += 4
print i
| [
"emil79@gmail.com"
] | emil79@gmail.com |
a02d45050310f3c24a2a11ccd49405ea65bc5f5e | 14749b6709fe4691e6cd84bac4a94b36145c9283 | /io/hdf5.py | 2e4e46cfea329be76d3648d63fa72f58408d545d | [] | no_license | hujh08/datapy | 301a0603f1e52e61e2a66b50a702e0a70886849e | 6d455e06d37865dc623d22c1c543c5bffab9c9c4 | refs/heads/master | 2023-07-22T22:37:32.383458 | 2023-07-15T10:05:34 | 2023-07-15T10:05:34 | 84,442,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,451 | py | #!/usr/bin/env python3
'''
io of HDF5 file
data in HDF5 file is represented by dict in routine
'''
import h5py
import pandas as pd
__all__=['load_hdf5', #'load_hdf5_attrs',
'save_to_hdf5',
'load_hdf5_pds', 'save_to_hdf5_pds',]
# load hdf5
def load_hdf5(path_or_obj, fields=None, key_attrs=None, ignore_dset_attrs=True,
dict_for_list_fields=None):
'''
load data in HDF5
--------
Data
in Group is represented by dict
in Dataset by array
if attrs returned for Dataset, use tuple: (data, attrs) or (attrs,)
keyword argument `fields` could be given for partly loading
It has slightly different meaning for Group and Dataset
for Group, it's for sub-group/dataset, as HDF5
for dataset, it's for items in dataset, as np.ndarray
e.g. slice, list of indices, or tuple of indices list
-----
attrs is metadata attached to both Group and Dataset
attrs would be ignored for any loading of Dataset and partly loading of Group,
except attrs query is specified explicitly
complete loading of Group would contain attrs
which would load all data, naturally including attrs
They could be considered as special sub-group in parent Group and Dataset
For attrs of Group, just same way as its normal sub-Group and sub-Dataset
A special `fields` format is designed for attrs of Dataset
To query attrs, 3 way is allowed ('attrs' could be changed by `key_attrs`):
- raw string 'attrs':
return all attrs if existed
- dict {'attrs': attrs-fields}:
return {'attrs': part-of-attrs-by-fields}
- 2-tuple (item-like, dict):
last element for attrs query
return 2-tuple (data-field, attrs-field)
ignore all attrs query only set `key_attrs` to None or False
--------
Parameters:
path_or_obj: filename, h5py.File, or h5py.Group
HDF5 file or object to load
data in this object specified by `fields` would be returned
if Dataset type, no fields should be given
fields: None (or bool True), field name, item-like, list of names, nested dict or list
which data in HDF5 would be returned
How this keyword is used depends firstly on type of `path_or_obj`
different types:
- None or bool True: return all data in `path_or_obj`
if Group, return dict
otherwise return array (ignore attrs)
NOTE: None, True: totally same meaning
but True has more direct meaning, True to get all data
- field name: str or bytes
In this case, `path_or_obj` must be h5py.File or h5py.Group
specify a Group or Dataset in `path_or_obj`
- item-like: slice, list of indices, or tuple (of slice or list)
In this case, `path_or_obj` must be h5py.Dataset
used as same way as getitem of `np.ndarray`
- list of names: collection of `field names`
In this case, `path_or_obj` must be h5py.File or h5py.Group
specify sub-group or datset to return
data returned has type list
or dict (with those names for keys)
specified by `dict_for_list_fields`
- nested dict: dict with value for sub-fields
In this case, `path_or_obj` must be h5py.File or h5py.Group
its key is to specify sub-group/dataset
its value is of previous types: None (or bool), field name, tuple or list
or other nested
For bool type value:
if a key has value None or bool True, get all data
if the value is bool False, ignore this key
- nested list: list of sub-fields
Similar as nested dict,
`path_or_obj` must be h5py.File or h5py.Group
always used to construct nested data structure
fields given by elements are relative to current group
only allow None (or bool True) and list of names for attrs query
Examples for dataset query:
None # all data, including attrs if not `ignore_dset_attrs`
() # empty tuple, all data, but no attrs
(slice(None, 100), [1, 2]) # part of data
'attrs' # only attrs
{'attrs': ['attr1', 'attr2']} # part of attrs
([1, 2, ..], {'attrs': [...]}) # both data and attrs
key_attrs: None, bool, or str, default None
whether to fetch attrs from HDF5 object
also inherited by sub-group/dataset
if `key_attrs` is None or bool False, ignore all attrs
if True and attrs existed
attrs returned as an item in dict with key `key_attrs`
different types for `key_attrs`:
if None or bool False, ignore attrs
if bool True,
use default key 'attrs' for attrs from HDF5
in output dict
if str, that is key in output dict for attrs
ignore_dset_attrs: bool, default True
whether to ignore attrs of dataset
except explicit attrs query in passed `fields`
It works only when None or True passed to `fields`
meaning loading all data
dict_for_list_fields: None, or bool, default bool True
whether to return dict for list of names given in `fields`,
also inherited by sub-fields in case for nested structure
works only when
load data from Group and
list type given in `fields`
force to True in other cases
if None,
True for names for all elements in list
otherwise False (in case for nested list)
if False, return list of data, each element for a field name
if True, return dict with key
except when nested list given
exception raised in this case
'''
# key attrs
if isinstance(key_attrs, bool):
key_attrs='attrs' if key_attrs else None
kws=dict(key_attrs=key_attrs,
dict_for_list_fields=dict_for_list_fields,
ignore_dset_attrs=ignore_dset_attrs)
# filename
if isinstance(path_or_obj, (str, bytes)):
with h5py.File(path_or_obj, 'r') as h5f:
return _get_data_from_group(h5f, fields=fields, **kws)
# h5py object
assert isinstance(path_or_obj, (h5py.File, h5py.Group))
return _get_data_from_group(path_or_obj, fields=fields, **kws)
def load_hdf5_attrs(path_or_obj, fields):
'''
only load attrs from HDF5 file or instance
'''
raise NotImplementedError('to implement later')
## real work place
### get data from Dataset
def _get_data_from_dataset(dset, fields=None, key_attrs=None, ignore_dset_attrs=True):
'''
get data from h5py.Dataset
return tuple if attrs got
otherwise ndarray
`key_attrs` would be ignored if arg `fields` is not for attrs query
:param fields: None, slice, list of int, tuple of indices list, or dict (or special 2-tuple)
dict is used to get attrs, see `_get_attrs_field_of_hdf5` for detail
only support key by `key_attrs`
special tuple with len==2 and second is dict
also used to query attrs and data together
the dict element is same as previous with only key `key_attrs`
and if first element is bool False, ignore data
:param ignore_dset_attrs: bool, default True
whether to ignore attrs of dataset
except explicit attrs query in passed `fields`
'''
# attrs query
if isinstance(fields, str) or isinstance(fields, dict) or \
(isinstance(fields, tuple) and \
len(fields)==2 and isinstance(fields[1], dict)):
if key_attrs is None:
raise ValueError('got an attrs query for Dataset. '
'But `key_attrs` is None.')
# raw string for just attrs
if isinstance(fields, str):
if fields!=key_attrs:
raise ValueError(f'only allow `key_attrs` "{key_attrs}" as str `fields`, '
f'but got "{fields}".')
return (_get_attrs_field_of_hdf5(dset.attrs),)
# 2-tuple and dict
if isinstance(fields, tuple):
kd, ka=fields # 2-tuple
else:
ka=fields # dict
kd=False
# check dict fields for attrs
keys=list(ka.keys())
if len(keys)!=1 or keys[0]!=key_attrs:
raise ValueError( 'wrong format for dict `fields` of Dataset. '
f'only allow `key_attrs` "{key_attrs}" as key')
k=key_attrs
a=_get_attrs_field_of_hdf5(dset.attrs, fields[k])
if isinstance(kd, bool) and not kd:
return (a,)
# query data
d=_get_data_from_dataset_only_data(dset, kd)
return (d, a)
# only query data
res=_get_data_from_dataset_only_data(dset, fields)
## attrs
if (not ignore_dset_attrs) and \
(key_attrs is not None) and \
(fields is None or (isinstance(fields, bool) and fields)):
if dset.attrs:
a=_get_attrs_field_of_hdf5(dset.attrs)
res=(res, a)
return res
def _get_data_from_dataset_only_data(dset, fields=None):
'''
only get data from dataset, not including attrs
:param fields: None, slice, list of int, or tuple of indices list
'''
if fields is None or (isinstance(fields, bool) and fields): # True or None to load all
return dset[()]
return dset.__getitem__(fields)
### get data from Group
def _get_data_from_group(grp, fields=None, dict_for_list_fields=None, **kwargs):
'''
get data from h5py.Group
work is recursive
attrs of root group by arg `grp` would be ignored
except `fields` is given by None (or bool True)
which will return all data in group, including attrs
'''
# all data in group
if fields is None or (isinstance(fields, bool) and fields): # True or None to load all
return _get_data_from_group_total(grp, **kwargs)
# one field
if isinstance(fields, (str, bytes)):
return _get_data_from_group_by_name(grp, fields, **kwargs)
# nested structure
kws=dict(**kwargs, dict_for_list_fields=dict_for_list_fields)
## dict for `fields`
if isinstance(fields, dict):
return _get_data_from_group_by_dict(grp, fields, **kws)
## list for `fields`
fields=list(fields)
return _get_data_from_group_by_list(grp, fields, **kws)
#### get from Group by different types for `fields`
def _get_data_from_group_total(grp, key_attrs=None, **kwargs):
'''
get data from Group in total
return a dict
'''
res={}
for k in grp.keys():
d=grp[k]
if isinstance(d, h5py.Group):
res[k]=_get_data_from_group_total(d, key_attrs=key_attrs, **kwargs)
continue
res[k]=_get_data_from_dataset(d, key_attrs=key_attrs, **kwargs)
# attrs
if key_attrs is not None and grp.attrs:
if key_attrs in res:
s=(f'name exists as `key_attrs` "{key_attrs}" '
f'in group of HDF5 file: {grp.file}')
raise ValueError(s)
res[key_attrs]=dict(grp.attrs)
return res
def _get_data_from_group_by_name(grp, name, key_attrs=None, **kwargs):
'''
get sub-Group/Dataset with name `name`
'''
obj=_get_sub_or_attrs_of_group(grp, name, key_attrs=key_attrs)
# attrs
if isinstance(obj, h5py.AttributeManager):
return _get_attrs_field_of_hdf5(obj)
# sub-Dataset or -Group
if isinstance(obj, h5py.Dataset):
return _get_data_from_dataset(obj, key_attrs=key_attrs, **kwargs)
return _get_data_from_group_total(obj, key_attrs=key_attrs, **kwargs)
def _get_data_from_group_by_dict(grp, fields, key_attrs=None,
dict_for_list_fields=None, **kwargs):
'''
get data by dict given for `fields`
key: name of sub-dataset/group in root `grp`
value: fields for the key, any type valid in `_get_data_from_group`
if None or bool True, get all data
if bool False, ignore it
:param dict_for_list_fields: None, bool
passed to sub-fields with type `list`
'''
res={}
for k, v in fields.items():
# bool type for dict value
if isinstance(v, bool) and not v: # bool False, ignore this key
continue
d=_get_sub_or_attrs_of_group(grp, k, key_attrs=key_attrs)
# attrs
if isinstance(d, h5py.AttributeManager):
res[k]=_get_attrs_field_of_hdf5(d, v)
continue
# sub-group or -dataset
if isinstance(d, h5py.Dataset):
res[k]=_get_data_from_dataset(d, v, key_attrs=key_attrs, **kwargs)
continue
res[k]=_get_data_from_group(d, v, **kwargs, key_attrs=key_attrs,
dict_for_list_fields=dict_for_list_fields)
return res
def _get_data_from_group_by_list(grp, fields, dict_for_list_fields=None, **kwargs):
'''
get data by list given for `fields`
:param key_attrs: None or str
key to store attrs in dict
passed to sub-field
:param dict_for_list_fields: None, or bool
args given to `dict_for_list_fields` in other funcs
are passed to this func finally
also inherited by nested list
if None,
True for names for all elements in list
otherwise False (in case for nested list)
if False, return list of data, each element for a field name
if True, return dict with key
'''
if dict_for_list_fields is None:
if all([isinstance(k, (str, bytes)) for k in fields]):
dict_for_list_fields=True
else:
dict_for_list_fields=False
kws=dict(**kwargs, dict_for_list_fields=dict_for_list_fields)
# get data by list recursively
res=[_get_data_from_group(grp, k, **kws) for k in fields]
# True for `dict_for_list_fields`
if dict_for_list_fields:
res=dict(zip(fields, res))
return res
#### auxiliary funcs of group
def _get_sub_or_attrs_of_group(grp, name, key_attrs=None):
'''
get sub-group/dataset or dataset for a group
'''
if name not in grp:
if key_attrs is not None and name==key_attrs: # only load attrs
return grp.attrs
s=f'name "{name}" not exists in group of HDF5 file: {grp.file}'
raise ValueError(s)
return grp[name]
def _is_list_type_fields(fields):
'''
list type for arg `fields`
'''
return hasattr(fields, '__iter__') and \
not isinstance(fields, (str, bytes, dict))
### get attrs
def _get_attrs_field_of_hdf5(attrs, fields=None):
'''
get fields of HDF5 attrs
:param fields: None, bool True, or list
'''
if fields is None or (isinstance(fields, bool) and fields): # True or None to load all
return dict(attrs)
return {k: attrs[k] for k in fields}
# save data to hdf5
def save_to_hdf5(datas, path_or_obj, name=None, mode='w', key_attrs=None):
'''
save datas to HDF5
Parameters:
path_or_obj: str, h5py.File or h5py.Group
hdf5 instance or file name to dump data in
mode: str 'r+', 'w', 'w-', 'x', or 'a'
whether create new file or modify exsited
work only when file name given in `path_or_obj`
r+ Read/write, file must exist
w Create file, truncate if exists
w- or x Create file, fail if exists
a Modify if exists, create otherwise
datas: dict
data to save to HDF5
nested structure supported
key to specify a group or dataset
val is data is save to dataset or attrs
for dataset: 2 types
array-like, e.g. np.ndarray: only data
tuple `(np.ndarray, dict)` or `(dict,)`
dict for attrs set
for attrs: dict type
key_attrs: None, bool, or str
whether to write attrs in HDF5 object
if None or bool False, ignore attrs
if bool True,
use default key 'attrs' to fetch value in datas
to write in HDF5 as attrs
if str, that is key in datas
'''
assert mode in ['r+', 'w', 'w-', 'x', 'a']
if isinstance(path_or_obj, (str, bytes)):
with h5py.File(path_or_obj, mode) as h5f:
return save_to_hdf5(datas, h5f, name=name, key_attrs=key_attrs)
grp=path_or_obj
assert isinstance(grp, (h5py.Group, h5py.File))
# attrs
if isinstance(key_attrs, bool):
key_attrs='attrs' if key_attrs else None
# datas
assert isinstance(datas, dict)
if name is not None:
datas={name: datas}
_save_data_to_hdf5_group(grp, datas, key_attrs=key_attrs)
## real work place
### set HDF5 dataset
def _is_append_mode(grp):
'''
is append mode for given group
'''
return grp.file.mode in ['r+', 'a']
def _save_data_to_hdf5_dataset(grp, name, datas):
'''
set data as a Dataset in parent Group
:param name: str
name of dataset to save data
:param datas: array-like, or tuple
data (and attrs) to set in dataset
tuple: `(np.ndarray, dict)` or `(dict,)`
'''
attrs=None
if isinstance(datas, tuple) and isinstance(datas[-1], dict):
if len(datas)==1:
attrs=datas[0]
datas=None # only attrs, no data
elif len(datas)!=2:
s=f'only allow 1-/2-tuple for `datas`, but got {len(datas)}-tuple'
raise ValueError(s)
else:
datas, attrs=datas
if name not in grp:
if datas is None:
raise ValueError('no data given to create new Dataset')
grp.create_dataset(name, data=datas)
elif datas is not None:
if name in grp and _is_append_mode(grp):
del grp[name]
grp[name]=datas
dset=grp[name]
if not isinstance(dset, h5py.Dataset):
raise ValueError( 'only support to save data to `Dataset,` '
f'but got `{type(dset).__name__}`')
# attrs
if attrs is not None:
dset.attrs.update(attrs)
### set HDF5 group
def _save_data_to_hdf5_group(grp, datas, key_attrs=None):
'''
save data to HDF5 group recursively
:param key_attrs: None or str
key in output for attrs from HDF5
ignore it if attrs is empty or `key_attrs` is None
'''
for k, d in datas.items():
# save data to dataset
if not isinstance(d, dict):
_save_data_to_hdf5_dataset(grp, k, d)
continue
# attribute
if k not in grp:
if key_attrs is not None and k==key_attrs:
grp.attrs.update(d)
continue
grp1=grp.create_group(k) # create group
else:
grp1=grp[k]
assert isinstance(grp1, h5py.Group)
# set group recursively
_save_data_to_hdf5_group(grp1, d, key_attrs=key_attrs)
# cooperate pandas
def save_to_hdf5_pds(pdobjs, path_or_obj, mode='w'):
'''
dump pd instances (pd.DataFrame or pd.Series) to HDF5 file
Parameters:
filename: str
file name of HDF5 to dump in
pdobjs: dict of pd instances
instances to store
'''
assert mode in ['w', 'a']
if isinstance(path_or_obj, str): # filename
with pd.HDFStore(path_or_obj, mode) as store:
return save_to_hdf5_pds(pdobjs, store)
store=path_or_obj
# dump via pd.HDFStore
for k, p in _squeeze_dict(pdobjs).items():
assert isinstance(p, (pd.DataFrame, pd.Series))
store[k]=p
def load_hdf5_pds(path_or_obj, names=None, squeeze=False):
'''
load HDF5 to dict of pd instances
:param names: None or list
names of pds to load
:param squeeze: bool, default False
if True, store all pds in one dict with path in HDF5
otherwise, use nested dict
'''
if isinstance(path_or_obj, str): # filename
with pd.HDFStore(path_or_obj, 'r') as store:
kwargs=dict(squeeze=squeeze, names=names)
return load_hdf5_pds(store, **kwargs)
hdfstore=path_or_obj
if names is None:
names=list(hdfstore.keys())
else:
names=['/'+k if not k.startswith('/') else k for k in names]
# squeeze
if squeeze:
return {k.lstrip('/'): hdfstore[k] for k in names}
# nested
res={}
stores_grp={'': res} # stores for different groups
for path, grps, leafs in hdfstore.walk():
gstore=stores_grp[path]
# leaves
for l in leafs:
k=f'{path}/{l}'
if k not in names:
continue
gstore[l]=hdfstore[k]
# create store for groups
for g in grps:
gstore[g]={}
stores_grp[f'{path}/{g}']=gstore[g]
return _del_empty_dict(res)
## auxiliary functions
def _squeeze_dict(data):
'''
squeeze dict
'''
res={}
for k, d in data.items():
k=k.lstrip('/')
assert k and not k.endswith('/') # not empty and not ends with '/'
if isinstance(d, dict):
for k1, d1 in _squeeze_dict(d).items():
k2=f'{k}/{k1}'
assert k2 not in res
res[k2]=d1
continue
assert k not in res
res[k]=d
return res
def _del_empty_dict(data):
'''
delete empy dict
'''
res={}
for k, d in data.items():
if isinstance(d, dict):
if d:
d=_del_empty_dict(d)
if not d: # empty dict
continue
res[k]=d
return res
| [
"hu.jianhong_2008@163.com"
] | hu.jianhong_2008@163.com |
524e15d26173cc7af5c8c1f14c308629ae72c693 | 4642e9e6c5bce8196b08686b66b0d8ddd092ea81 | /PythonProblems.py | b2017ce83919d7375a20a91cdf84715782f6ef6d | [] | no_license | Angelyr/InterviewQuestions | e8f4600c51323ba1ef050e8246afab34e5e5af8c | 4655b7a975ae09218ed60f2fb2f1c36bb91b2500 | refs/heads/master | 2020-06-10T01:30:55.030252 | 2019-09-04T03:56:54 | 2019-09-04T03:56:54 | 193,545,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,455 | py | #https://leetcode.com/problems/jewels-and-stones/
def numJewelsInStones(J, S):
result = 0
for letterJ in J:
for letterS in S:
if letterJ == letterS: result+=1
return result
#https://leetcode.com/problems/max-increase-to-keep-city-skyline/
def maxIncreaseKeepingSkyline(grid):
front = []
side = []
i=0
while i < len(grid):
front.append(0)
side.append(0)
i+=1
i=0
while i < len(grid):
j=0
while j < len(grid[i]):
front[j] = max(front[j], grid[i][j])
side[i] = max(side[i], grid[i][j])
j+=1
i+=1
sum = 0
i=0
while i < len(grid):
j=0
while j < len(grid[i]):
sum += abs(grid[i][j] - min(front[j], side[i]))
grid[i][j] = min(front[j], side[i])
j+=1
i+=1
return sum
#https://leetcode.com/problems/reverse-string/
def reverseString(s):
output = s[::-1]
return output
#https://leetcode.com/problems/maximum-depth-of-binary-tree/
def maxDepth(root):
if root == None: return 0
return 1 + max(maxDepth(root.left), maxDepth(root.right) )
#https://leetcode.com/problems/single-number/
def singleNumber(nums):
sumXOR = 0
for num in nums:
sumXOR ^= num
return sumXOR
#https://leetcode.com/problems/fizz-buzz/
def fizzBuzz(n):
i = 1
output=[]
while i <= n:
if i%3 == 0 and i%5 == 0:
output += ["FizzBuzz"]
elif i%3 == 0:
output += ["Fizz"]
elif i%5 == 0:
output += ["Buzz"]
else:
output += [str(i)]
i+=1
return output
#https://leetcode.com/problems/binary-tree-inorder-traversal/
def inorderTraversal(root):
stack = []
curr = root
output = []
while curr != None or len(stack) > 0:
while curr != None:
stack.append(curr)
curr = curr.left
curr = stack.pop()
output.append(curr.val)
curr = curr.right
return output
#https://leetcode.com/problems/two-sum/
def twoSum(nums, target):
if len(nums) <= 1:
return False
buff_dict = {}
for i in range(len(nums)):
if nums[i] in buff_dict:
return [buff_dict[nums[i]], i]
else:
buff_dict[target - nums[i]] = i
| [
"noreply@github.com"
] | Angelyr.noreply@github.com |
ebf006c8185185dddd4a84f9ef15bb8c06bb38ac | 07622a0fb38e843ab0eef4f69bb8fb25d107c06d | /pretrained_mol_sim/Theano-master/theano/tests/main.py | 97fea16ecc4d8d8763b25fdd0fb1d970cab187e3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | andreeadeac22/graph_coattention | fa59d77252625e4bee1cb9670e4a0fd0fec98135 | 23781fedaa942ca5614054f965cb7b6543e533fa | refs/heads/master | 2023-08-08T01:51:51.368457 | 2020-02-19T04:56:59 | 2020-02-19T04:56:59 | 207,414,336 | 15 | 4 | MIT | 2023-07-22T15:47:39 | 2019-09-09T22:13:34 | Python | UTF-8 | Python | false | false | 6,222 | py | from __future__ import absolute_import, print_function, division
import os
import unittest
import sys
from numpy.testing.nosetester import NoseTester
# This class contains code adapted from NumPy,
# numpy/testing/nosetester.py,
# Copyright (c) 2005-2011, NumPy Developers
class TheanoNoseTester(NoseTester):
"""
Nose test runner.
This class enables running nose tests from inside Theano,
by calling theano.test().
This version is more adapted to what we want than Numpy's one.
"""
def _test_argv(self, verbose, extra_argv):
"""
Generate argv for nosetest command
:type verbose: int
:param verbose: Verbosity value for test outputs, in the range 1-10.
Default is 1.
:type extra_argv: list
:param extra_argv: List with any extra arguments to pass to nosetests.
"""
# self.package_path = os.path.abspath(self.package_path)
argv = [__file__, self.package_path]
argv += ['--verbosity', str(verbose)]
if extra_argv:
argv += extra_argv
return argv
def _show_system_info(self):
import theano
print("Theano version %s" % theano.__version__)
theano_dir = os.path.dirname(theano.__file__)
print("theano is installed in %s" % theano_dir)
super(TheanoNoseTester, self)._show_system_info()
def prepare_test_args(self, verbose=1, extra_argv=None, coverage=False,
capture=True, knownfailure=True):
"""
Prepare arguments for the `test` method.
Takes the same arguments as `test`.
"""
import nose.plugins.builtin
# compile argv
argv = self._test_argv(verbose, extra_argv)
# numpy way of doing coverage
if coverage:
argv += ['--cover-package=%s' % self.package_name,
'--with-coverage', '--cover-tests',
'--cover-inclusive', '--cover-erase']
# Capture output only if needed
if not capture:
argv += ['-s']
# construct list of plugins
plugins = []
if knownfailure:
from numpy.testing.noseclasses import KnownFailure
plugins.append(KnownFailure())
plugins += [p() for p in nose.plugins.builtin.plugins]
return argv, plugins
def test(self, verbose=1, extra_argv=None, coverage=False, capture=True,
knownfailure=True):
"""
Run tests for module using nose.
:type verbose: int
:param verbose: Verbosity value for test outputs, in the range 1-10.
Default is 1.
:type extra_argv: list
:param extra_argv: List with any extra arguments to pass to nosetests.
:type coverage: bool
:param coverage: If True, report coverage of Theano
code. Default is False.
:type capture: bool
:param capture: If True, capture the standard output of the tests, like
nosetests does in command-line. The output of failing
tests will be displayed at the end. Default is True.
:type knownfailure: bool
:param knownfailure: If True, tests raising KnownFailureTest will
not be considered Errors nor Failure, but reported as
"known failures" and treated quite like skipped tests.
Default is True.
:returns: Returns the result of running the tests as a
``nose.result.TextTestResult`` object.
"""
from nose.config import Config
from nose.plugins.manager import PluginManager
from numpy.testing.noseclasses import NumpyTestProgram
# Many Theano tests suppose device=cpu, so we need to raise an
# error if device==gpu.
if not os.path.exists('theano/__init__.py'):
try:
from theano import config
if config.device != "cpu":
raise ValueError("Theano tests must be run with device=cpu."
" This will also run GPU tests when possible.\n"
" If you want GPU-related tests to run on a"
" specific GPU device, and not the default one,"
" you should use the init_gpu_device theano flag.")
except ImportError:
pass
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
self._show_system_info()
cwd = os.getcwd()
if self.package_path in os.listdir(cwd):
# The tests give weird errors if the package to test is
# in current directory.
raise RuntimeError((
"This function does not run correctly when, at the time "
"theano was imported, the working directory was theano's "
"parent directory. You should exit your Python prompt, change "
"directory, then launch Python again, import theano, then "
"launch theano.test()."))
argv, plugins = self.prepare_test_args(verbose, extra_argv, coverage,
capture, knownfailure)
# The "plugins" keyword of NumpyTestProgram gets ignored if config is
# specified. Moreover, using "addplugins" instead can lead to strange
# errors. So, we specify the plugins in the Config as well.
cfg = Config(includeExe=True, plugins=PluginManager(plugins=plugins))
t = NumpyTestProgram(argv=argv, exit=False, config=cfg)
return t.result
def main(modulename):
if 0:
unittest.main()
elif len(sys.argv) == 2 and sys.argv[1] == "--debug":
module = __import__(modulename)
tests = unittest.TestLoader().loadTestsFromModule(module)
tests.debug()
elif len(sys.argv) == 1:
module = __import__(modulename)
tests = unittest.TestLoader().loadTestsFromModule(module)
unittest.TextTestRunner(verbosity=2).run(tests)
else:
print("options: [--debug]")
| [
"andreeadeac22@gmail.com"
] | andreeadeac22@gmail.com |
6735abd8c732b1c3f3fc41bd37381e40e139269d | ac8946e2e2b6faf3fb1f67094b65102ba2d8959b | /scripts/kj21.py | a60ffe975b7ad51f3b927a068bc52d25cb3adbf0 | [] | no_license | Alibhji/3D-Vehicle-detection-from-front-view-camera | aabc54b47959f3321cb1ebfaae9c1b8883f925df | 857984c2580295533badef6886545bd76a5d5501 | refs/heads/master | 2022-12-09T02:45:17.704962 | 2020-09-01T05:12:24 | 2020-09-01T05:12:24 | 235,441,734 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,861 | py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
from tqdm import tqdm#_notebook as tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
import os
from scipy.optimize import minimize
#import plotly.express as px
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
import pretrainedmodels
import pretrainedmodels.utils as utils
import time
from collections import OrderedDict
import gc
from collections import namedtuple
import gc
import os
import pickle
import segmentation_models_pytorch as smp
import torch
from segmentation_models_pytorch.encoders import get_preprocessing_fn
import numpy as np
from math import sin, cos
BATCH_SIZE = 32
n_epochs = 40
save_dir= './kj21'
# mode='eval'
mode='train'
model_pretrained_name= os.path.join(save_dir , '_ep_10.model')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
PATH=os.getcwd()
# PATH =os.path.abspath(PATH)
PATH='./data'
train = pd.read_csv(os.path.join(PATH , 'train.csv'))
test = pd.read_csv(os.path.join(PATH , 'sample_submission.csv'))
train_images_dir = os.path.join(PATH , 'train_images','{}.jpg')
test_images_dir = os.path.join(PATH , 'test_images','{}.jpg')
train_masks_dir= os.path.join(PATH , 'train_masks','{}.jpg')
Train_Masks= os.path.join(PATH,'train_masks')
Test_Masks= os.path.join(PATH,'test_masks')
Train_Masks=[item.split('.')[0] for item in os.listdir(Train_Masks)]
Test_Masks= [item.split('.')[0] for item in os.listdir(Test_Masks)]
# From camera.zip
camera_matrix = np.array([[2304.5479, 0, 1686.2379],
[0, 2305.8757, 1354.9849],
[0, 0, 1]], dtype=np.float32)
camera_matrix_inv = np.linalg.inv(camera_matrix)
outliers_xyx = ['ID_001a12fb2',
'ID_012aef661',
'ID_018444fd9',
'ID_01c850b27',
'ID_0260428a6',
'ID_02b082818',
'ID_030fb4808',
'ID_039502422',
'ID_03e1e30f0',
'ID_0400854c6',
'ID_042da2f13',
'ID_04b8de41e',
'ID_04f676276',
'ID_058133001',
'ID_05b615ca7',
'ID_066750276',
'ID_06b7202be',
'ID_07375a6d5',
'ID_075c0e11e',
'ID_07836399f',
'ID_07d3e4bf7',
'ID_0865cb745',
'ID_08b42e64c',
'ID_090811cf5',
'ID_09658d7d6',
'ID_0b0be29be',
'ID_0b4cab808',
'ID_0b606383c',
'ID_0b6d81cce',
'ID_0beb902ad',
'ID_0c6f8b911',
'ID_0d1612e2f',
'ID_0db064a7c',
'ID_0ddda042d',
'ID_0e30ba42f',
'ID_0e396e0e9',
'ID_0e6c355d9',
'ID_0f3d9104c',
'ID_0f7c7228a',
'ID_105f25107',
'ID_1073fe35b',
'ID_12104686a',
'ID_12267fd01',
'ID_149647474',
'ID_151fa0874',
'ID_15a8550da',
'ID_1606f0fcb',
'ID_161640772',
'ID_166c05a91',
'ID_1677e1977',
'ID_16c9192c8',
'ID_16d084b43',
'ID_175ade1ed',
'ID_181adab76',
'ID_1895b56ec',
'ID_1963b1cc3',
'ID_19b92bcc5',
'ID_1a10eff59',
'ID_1a4107469',
'ID_1b53f57ae',
'ID_1c5a74ad4',
'ID_1c7ce36a4',
'ID_1cc8e893f',
'ID_1d9f515e5',
'ID_1e468bfb0',
'ID_1ea8b0007',
'ID_1edd4d337',
'ID_1fdccb4bb',
'ID_20bda7f69',
'ID_229a5f2ee',
'ID_237fe5c9d',
'ID_23f4a113e',
'ID_240e204c5',
'ID_2423f0e3d',
'ID_243ea86e6',
'ID_24418b6a6',
'ID_27846370b',
'ID_27acaaa8a',
'ID_27f514d1b',
'ID_27f6938ee',
'ID_28ffad68c',
'ID_297d3bc4c',
'ID_2acf84328',
'ID_2b3ff35b0',
'ID_2b59c384d',
'ID_2d2ce3d39',
'ID_2d2fff4cc',
'ID_2e9d7873d',
'ID_2ee82c148',
'ID_2f620ed54',
'ID_2f99afb16',
'ID_30a7c2a64',
'ID_30abb829e',
'ID_317b0b0ce',
'ID_31903da16',
'ID_32289333c',
'ID_324d92b99',
'ID_327979358',
'ID_332e05c70',
'ID_337ddc495',
'ID_33cec9b19',
'ID_33d42e307',
'ID_34821aed0',
'ID_3507e3624',
'ID_357b01084',
'ID_3583071f6',
'ID_362d921bc',
'ID_36465e439',
'ID_36830dcd4',
'ID_36ca6234f',
'ID_36cc638b0',
'ID_36db1e0ca',
'ID_36e56f51c',
'ID_374e97d7a',
'ID_37be44db9',
'ID_37dd679dc',
'ID_37f431d28',
'ID_3891e1ef4',
'ID_38adf5c8e',
'ID_39f28052d',
'ID_3a5ef288d',
'ID_3a7358d13',
'ID_3a75649f5',
'ID_3ae397744',
'ID_3b2c71812',
'ID_3c2fbeb77',
'ID_3c31d847f',
'ID_3cf1e870e',
'ID_3d09a432b',
'ID_3d8d6dd1d',
'ID_3e075cfb4',
'ID_3e29faeb0',
'ID_3e34024dc',
'ID_3ea678dcc',
'ID_3f1c8f602',
'ID_3f2d2f39c',
'ID_3f658c1f9',
'ID_3f8a89a03',
'ID_400e4348a',
'ID_40a669408',
'ID_4199f4cd5',
'ID_422d5d2d1',
'ID_424d8de4b',
'ID_427c943c9',
'ID_42fff9c89',
'ID_4314cce16',
'ID_441886a76',
'ID_44e4eb246',
'ID_44fa78daa',
'ID_456cbcf28',
'ID_46ac698e4',
'ID_46e6b4e59',
'ID_48789938b',
'ID_48b974f73',
'ID_499da4132',
'ID_4a2e2e15b',
'ID_4a9cdac42',
'ID_4aa239dcc',
'ID_4b76d0f7a',
'ID_4e758566d',
'ID_501da7aca',
'ID_50903cfac',
'ID_50997c24e',
'ID_50e5a474e',
'ID_511911aad',
'ID_512074fcf',
'ID_517a14054',
'ID_51f3c0953',
'ID_5212cff85',
'ID_5325864a1',
'ID_534ba0ac4',
'ID_5463219ec',
'ID_55fef460c',
'ID_565c40c14',
'ID_567a39a77',
'ID_57085da35',
'ID_573e54a2d',
'ID_577d1a984',
'ID_57f52dbf4',
'ID_585cc849d',
'ID_593a711f9',
'ID_5a6def0fd',
'ID_5bb4d6d29',
'ID_5be0a526c',
'ID_5bf8b511d',
'ID_5c97d93d9',
'ID_5ce45c160',
'ID_5d4ab3dd2',
'ID_5d9a9a830',
'ID_5dc0d941d',
'ID_5e19e6af1',
'ID_5eb939315',
'ID_5f18e86f3',
'ID_5f6526a36',
'ID_5f8f50a1b',
'ID_605cf1d46',
'ID_612e8cd9a',
'ID_616bdd8aa',
'ID_626d7776d',
'ID_62b4bed34',
'ID_6301a5ee3',
'ID_6367c3fff',
'ID_63cfda92f',
'ID_63d93ffd8',
'ID_648c471c2',
'ID_64c55c11b',
'ID_6502e3a9d',
'ID_657eb589c',
'ID_65dd833a7',
'ID_66114a2bb',
'ID_66520c664',
'ID_666ed008e',
'ID_6682cb3ca',
'ID_675fc5d8a',
'ID_6790d653a',
'ID_688d64128',
'ID_689760cdb',
'ID_68ab23939',
'ID_68db82c42',
'ID_6a4fa3d45',
'ID_6a6d565ca',
'ID_6a8c65399',
'ID_6b2854770',
'ID_6b8e7dbb7',
'ID_6bb20ff30',
'ID_6bcfbe419',
'ID_6c4cb7231',
'ID_6ce9299a5',
'ID_6d0a129ab',
'ID_6d58a0364',
'ID_6da537078',
'ID_6e21495b6',
'ID_6e2f713ca',
'ID_6e8591a14',
'ID_6e9fe6af1',
'ID_6f67a0c55',
'ID_6ff92a2e4',
'ID_701ebdc56',
'ID_704e13e8c',
'ID_70cbd9f23',
'ID_7147e4e12',
'ID_72283017f',
'ID_722c0043b',
'ID_72475c36b',
'ID_729a8ef7b',
'ID_72f7c5a14',
'ID_7344a6126',
'ID_7344dc5fe',
'ID_738a1d889',
'ID_74418d18e',
'ID_745dce7e9',
'ID_749c4d3bd',
'ID_74e94a2db',
'ID_750c68912',
'ID_754e6c384',
'ID_75584dd71',
'ID_7621ea7f6',
'ID_7705b405f',
'ID_77a720d93',
'ID_77d0f1fd1',
'ID_77eb01dff',
'ID_780b7ca82',
'ID_787485a68',
'ID_789fe31ca',
'ID_78ede13b5',
'ID_7aa5be52c',
'ID_7b81dab6e',
'ID_7b8967cb0',
'ID_7c1a543f9',
'ID_7c861e895',
'ID_7d0ab7438',
'ID_7d49a1db9',
'ID_7d9239a52',
'ID_7d97e26ae',
'ID_7dcacedd5',
'ID_7e321c4e5',
'ID_7e4d94572',
'ID_7ec29eaad',
'ID_7f6f07350',
'ID_807337723',
'ID_80aecd428',
'ID_817333c1c',
'ID_817e2ef01',
'ID_819575215',
'ID_8199e5af1',
'ID_8231155b1',
'ID_82d727486',
'ID_82f97f58d',
'ID_83037d345',
'ID_83cff8701',
'ID_84047de00',
'ID_849a2ae15',
'ID_86ec7de88',
'ID_88833c3ee',
'ID_88a99396a',
'ID_8abf3818f',
'ID_8ad1639d4',
'ID_8c00fc538',
'ID_8c5850283',
'ID_8c61b6f15',
'ID_8cef10e05',
'ID_8d5cbc1e6',
'ID_8da41522e',
'ID_8dcb9b2e2',
'ID_8e41c194b',
'ID_8e61da13b',
'ID_8eafe31e3',
'ID_8eb2669b5',
'ID_8ff75c1aa',
'ID_901117b49',
'ID_901fa9e6c',
'ID_906f44587',
'ID_90d3e0e80',
'ID_90e909712',
'ID_912ad6db8',
'ID_91b2a1bb9',
'ID_91dae45bc',
'ID_920fbfaf1',
'ID_937edca6c',
'ID_94a4784ef',
'ID_94d690cbd',
'ID_952360a4e',
'ID_95276c148',
'ID_956fa5a43',
'ID_9587b899b',
'ID_95f0f9f7d',
'ID_96a20096b',
'ID_96f7fd567',
'ID_97157765e',
'ID_973bfd1fb',
'ID_97445a4aa',
'ID_97fd761ea',
'ID_983c1e248',
'ID_98aee2a8e',
'ID_99f8189f6',
'ID_9a44a546e',
'ID_9aadf2e49',
'ID_9aeb19745',
'ID_9bd2e72b9',
'ID_9ca47e157',
'ID_9cd22db32',
'ID_9d1f97fb1',
'ID_9dc0252ab',
'ID_9e2174dfe',
'ID_9e4c3af75',
'ID_9e6e1ad85',
'ID_9e71e2a10',
'ID_9ea507b62',
'ID_9ea9b90a0',
'ID_9eef06273',
'ID_9f3f0a78a',
'ID_9fdd4f9a9',
'ID_a0d7b5db9',
'ID_a0e1b638a',
'ID_a1147b159',
'ID_a13c0ea5d',
'ID_a1e4a213c',
'ID_a20df07ec',
'ID_a27f01e8d',
'ID_a2a4dad88',
'ID_a337525c3',
'ID_a38d7e70d',
'ID_a4057390a',
'ID_a4138397b',
'ID_a4c30e644',
'ID_a516085b6',
'ID_a5c1e2b3d',
'ID_a6bf8f541',
'ID_a6f146710',
'ID_a7f98119b',
'ID_a822f3885',
'ID_a89688b6e',
'ID_a9273c5a9',
'ID_a93677394',
'ID_a94be1fba',
'ID_a97d9b416',
'ID_a9d36e8db',
'ID_aa51f342c',
'ID_aa645dfab',
'ID_ab3e1ad9f',
'ID_ac121d381',
'ID_ac946f5f9',
'ID_ac9e2fdca',
'ID_aca3a70d4',
'ID_accefc9c9',
'ID_acd20715a',
'ID_ad0b4b072',
'ID_ad4474603',
'ID_ad50e86d0',
'ID_ad77c0df0',
'ID_ad98734fb',
'ID_af0603b16',
'ID_b02cee673',
'ID_b06a5c779',
'ID_b1008ef7c',
'ID_b1aea0800',
'ID_b266869b4',
'ID_b3be748fc',
'ID_b405f63d7',
'ID_b42125d61',
'ID_b4b15b8f9',
'ID_b4d6b176b',
'ID_b5ebe839b',
'ID_b63275d1b',
'ID_b69ce0b4c',
'ID_b6c16c7fb',
'ID_b6e911d41',
'ID_b745d0bf8',
'ID_b77a0da76',
'ID_b8e105ca2',
'ID_ba0126346',
'ID_ba069e4ae',
'ID_ba0a13999',
'ID_ba123226f',
'ID_ba5963dd1',
'ID_bae889e7f',
'ID_bb333df1b',
'ID_bbe62ed9c',
'ID_bc0ddb93b',
'ID_bceb075be',
'ID_bd25ccccf',
'ID_bdd08f0f8',
'ID_be01e23e7',
'ID_be4698ce8',
'ID_be5faedf0',
'ID_be8a2ce07',
'ID_beaf86342',
'ID_befcefadb',
'ID_bf2e58fbc',
'ID_bfd83f639',
'ID_bff365e13',
'ID_c0d3f4329',
'ID_c1dc1e7df',
'ID_c1edc9dea',
'ID_c2a614edc',
'ID_c2f349103',
'ID_c316009a9',
'ID_c31679c55',
'ID_c34d42a95',
'ID_c39aa96e7',
'ID_c3e6c3231',
'ID_c46c05e9f',
'ID_c4f92b64e',
'ID_c555f0edd',
'ID_c5bf3135e',
'ID_c607d49fc',
'ID_c60b3eea2',
'ID_c671f55b5',
'ID_c6af78321',
'ID_c6d9ad796',
'ID_c714ce672',
'ID_c73a44ceb',
'ID_c754c08ad',
'ID_c7861ca2c',
'ID_c7d63e9d3',
'ID_c89179341',
'ID_c8d2801b8',
'ID_c9087b6bb',
'ID_caa84eb2c',
'ID_cb5d35aee',
'ID_cbcf128fc',
'ID_cc37fa969',
'ID_cc7290959',
'ID_cd09cc695',
'ID_cd4604d5d',
'ID_cd84cf31a',
'ID_cdb292aa4',
'ID_cdbc00f91',
'ID_ce786198b',
'ID_cf1ab9cc0',
'ID_cf9eaf994',
'ID_cfc444477',
'ID_d005526a7',
'ID_d0cf6475e',
'ID_d1c16e5ff',
'ID_d1e61e771',
'ID_d200ac60c',
'ID_d20edb804',
'ID_d25e0d5ab',
'ID_d25e32e67',
'ID_d2a3f748f',
'ID_d2ae00381',
'ID_d2cdb591b',
'ID_d2f80a8de',
'ID_d2fc3a7c4',
'ID_d30bd5337',
'ID_d404f979f',
'ID_d44b9a2c6',
'ID_d44bbe32c',
'ID_d53fd940a',
'ID_d54fc9d86',
'ID_d6331ccf9',
'ID_d6390f085',
'ID_d6e26d4cf',
'ID_d739de2bf',
'ID_d78489179',
'ID_d7ffe5830',
'ID_d83c8cf22',
'ID_d87068c8e',
'ID_dc906a5d2',
'ID_dca8288ec',
'ID_dcecaf2dc',
'ID_dcf4928d7',
'ID_dd151a1b8',
'ID_dd1bd7316',
'ID_dd6592d06',
'ID_de53777e3',
'ID_df134dd43',
'ID_df2f7a255',
'ID_dfd1abfab',
'ID_e05bd3172',
'ID_e1407b0f5',
'ID_e21e6fa83',
'ID_e2bd06410',
'ID_e35cf2bba',
'ID_e463040d2',
'ID_e4bef04bb',
'ID_e54dfa981',
'ID_e5548f512',
'ID_e56d83468',
'ID_e58264b7f',
'ID_e59604e7e',
'ID_e5d1c1ebb',
'ID_e63e99a6b',
'ID_e662fcb78',
'ID_e67437f6e',
'ID_e8842ee5d',
'ID_e8cb5d892',
'ID_ea0ca7caf',
'ID_ea87260d5',
'ID_eafe67422',
'ID_eb117ac6b',
'ID_eb37b53de',
'ID_ec0e083f4',
'ID_edc1b60d4',
'ID_edd28a56a',
'ID_ef8228a54',
'ID_efa93b990',
'ID_efcff451a',
'ID_f1354858c',
'ID_f1fcf51f5',
'ID_f2aeeab18',
'ID_f2b92b119',
'ID_f32a96913',
'ID_f388a7b94',
'ID_f45e0c82e',
'ID_f4f80c710',
'ID_f64511e66',
'ID_f67cc13ff',
'ID_f79e23bf5',
'ID_f7c3b39db',
'ID_f865e2a0d',
'ID_f8cfb1759',
'ID_f918cb91b',
'ID_fa021e056',
'ID_fa1d1426c',
'ID_fa2998277',
'ID_fa61068ea',
'ID_fc949ae29',
'ID_fce05fc08',
'ID_fe5acc7a0',
'ID_fee391a62',
'ID_ffb67214c']
train=train[~train['ImageId'].isin(outliers_xyx)]
len(train)
ORG_IMG_HEIGHT = 2710
ORG_IMG_WIDTH = 3384
INPUT_HEIGHT = 224
INPUT_WIDTH = 864
INPUT_SCALE = 4
MASK_SCALE = 16
# MASK_SCALE = 24 #--> mask size is 112*140
CUT_from_TOP = (22 + 896 + (896* 3)//4) # pixels # 1590
CUT_from_Down = (896* 1)//4
CUTTED_PIXEL_DOWN_LOCATION = ORG_IMG_HEIGHT - CUT_from_Down
CUT_from_RIGHT= 56 # pixels
CUTTED_PIXEL_RIGHT_LOCATION = ORG_IMG_WIDTH - CUT_from_RIGHT
INPUT_SIZE= [(ORG_IMG_HEIGHT - ( CUT_from_TOP + CUT_from_Down)) // INPUT_SCALE , (ORG_IMG_WIDTH - CUT_from_RIGHT )// INPUT_SCALE]
MASK_SIZE = [(ORG_IMG_HEIGHT - ( CUT_from_TOP + CUT_from_Down)) // MASK_SCALE , (ORG_IMG_WIDTH - CUT_from_RIGHT )// MASK_SCALE]
print('Orginal_SIZE :' , [ORG_IMG_HEIGHT , ORG_IMG_WIDTH])
print('INPUT_SIZE :' ,INPUT_SIZE)
print('MASK_SIZE :' ,MASK_SIZE)
def imread(path, fast_mode=False):
img = cv2.imread(path)
if not fast_mode and img is not None and len(img.shape) == 3:
img = np.array(img[:, :, ::-1])
return img
def str2coords(s, names=['id', 'yaw', 'pitch', 'roll', 'x', 'y', 'z']):
'''
Input:
s: PredictionString (e.g. from train dataframe)
names: array of what to extract from the string
Output:
list of dicts with keys from `names`
'''
coords = []
for l in np.array(s.split()).reshape([-1, 7]):
coords.append(dict(zip(names, l.astype('float'))))
if 'id' in coords[-1]:
coords[-1]['id'] = int(coords[-1]['id'])
return coords
# %% [code] {"_kg_hide-input":true}
# inp = train['PredictionString'][0]
# print('Example input:\n', inp)
# print()
# print('Output:\n', str2coords(inp))
points_df = pd.DataFrame()
for col in ['x', 'y', 'z', 'yaw', 'pitch', 'roll']:
arr = []
for ps in train['PredictionString']:
coords = str2coords(ps)
arr += [c[col] for c in coords]
points_df[col] = arr
print('total number of the objects in the train data set is:', len(points_df))
def rotate(x, angle):
x = x + angle
x = x - (x + np.pi) // (2 * np.pi) * 2 * np.pi
return x
def get_img_coords(s):
'''
Input is a PredictionString (e.g. from train dataframe)
Output is two arrays:
xs: x coordinates in the image (row)
ys: y coordinates in the image (column)
'''
coords = str2coords(s)
xs = [c['x'] for c in coords]
ys = [c['y'] for c in coords]
zs = [c['z'] for c in coords]
P = np.array(list(zip(xs, ys, zs))).T
img_p = np.dot(camera_matrix, P).T
img_p[:, 0] /= img_p[:, 2]
img_p[:, 1] /= img_p[:, 2]
img_xs = img_p[:, 0]
img_ys = img_p[:, 1]
img_zs = img_p[:, 2] # z = Distance from the camera
return img_xs, img_ys
xs, ys = [], []
for ps in train['PredictionString']:
x, y = get_img_coords(ps)
xs += list(x)
ys += list(y)
# %% [code]
zy_slope = LinearRegression()
X = points_df[['z']]
y = points_df['y']
zy_slope.fit(X, y)
print('MAE without x:', mean_absolute_error(y, zy_slope.predict(X)))
# Will use this model later
xzy_slope = LinearRegression()
X = points_df[['x', 'z']]
y = points_df['y']
xzy_slope.fit(X, y)
print('MAE with x:', mean_absolute_error(y, xzy_slope.predict(X)))
print('\ndy/dx = {:.3f}\ndy/dz = {:.3f}'.format(*xzy_slope.coef_))
def _regr_preprocess(regr_dict, flip=False):
if flip:
for k in ['x', 'pitch', 'roll']:
regr_dict[k] = -regr_dict[k]
# for name in ['x', 'y', 'z']:
# regr_dict[name] = regr_dict[name] / 100
regr_dict['x'] = regr_dict['x'] / 40
regr_dict['y'] = regr_dict['y'] / 30
regr_dict['z'] = regr_dict['z'] / 180
regr_dict['roll'] = rotate(regr_dict['roll'], np.pi)
regr_dict['pitch_sin'] = sin(regr_dict['pitch'])
regr_dict['pitch_cos'] = cos(regr_dict['pitch'])
regr_dict.pop('pitch')
regr_dict.pop('id')
return regr_dict
def preprocess_image(img, flip=False):
img = img[CUT_from_TOP: CUTTED_PIXEL_DOWN_LOCATION , : CUTTED_PIXEL_RIGHT_LOCATION]
H_ = img.shape[0] // INPUT_SCALE # 2688/3 --> 896
W_ = img.shape[1] // INPUT_SCALE # 3884/3 --> 1120
img = cv2.resize(img, ( W_ ,H_))
if flip:
img = img[:,::-1]
return (img / 255).astype('float32')
def Gus_kernel_at(mask ,xs,ys,peak=1):
x, y = np.meshgrid(np.linspace(0,MASK_SIZE[1]-1,MASK_SIZE[1]), np.linspace(0,MASK_SIZE[0]-1,MASK_SIZE[0]))
d = peak * np.sqrt(((x-xs)**2)+((y-ys)**2))
sigma, mu = 1.0, 0
g = peak* np.exp(-( (d-mu)**2 / ( 2.0 * sigma**2 ) ) )
mask[g>0] = g [ g>0]+ mask[g>0]
return mask
def get_mask_and_regr(img, labels, flip=False):
mask = np.zeros([MASK_SIZE[0] , MASK_SIZE[1]], dtype='float32')
regr_names = ['x', 'y', 'z', 'yaw', 'pitch', 'roll']
regr = np.zeros([MASK_SIZE[0] , MASK_SIZE[1] ,7], dtype='float32')
coords = str2coords(labels)
# print(coords)
xs, ys = get_img_coords(labels)
for y, x, regr_dict in zip(xs, ys, coords):
x , y = y ,x
# print(x,y, regr_dict)
xs = int((x ) // MASK_SCALE)
# print('y',ys)
ys = int(( y - CUT_from_TOP ) // MASK_SCALE )
# print('y',ys)
# print(x,y , ( CUTTED_PIXEL_RIGHT_LOCATION // MASK_SCALE ) , (CUTTED_PIXEL_DOWN_LOCATION // MASK_SCALE))
if xs >= 0 and (xs < ( CUTTED_PIXEL_RIGHT_LOCATION // MASK_SCALE ))and ys >= 0 and ys < ((CUTTED_PIXEL_DOWN_LOCATION - CUT_from_TOP) // MASK_SCALE):
# mask = Gus_kernel_at (mask ,xs, ys)
mask[ys, xs] = 1
regr_dict = _regr_preprocess(regr_dict, flip)
# print((regr_dict))
regr[ys, xs] = [regr_dict[n] for n in sorted(regr_dict)]
# if flip:
# mask = np.array(mask[:,::-1])
# regr = np.array(regr[:,::-1])
return mask, regr
img = imread(train_images_dir.format('ID_8a6e65317'))
img = preprocess_image(img)
mask, regr = get_mask_and_regr(img, train['PredictionString'][0])
print('img.shape', img.shape, 'std:', np.std(img))
print('mask.shape', mask.shape, 'std:', np.std(mask))
print('regr.shape', regr.shape, 'std:', np.std(regr))
class CarDataset(Dataset ):
"""Car dataset."""
def __init__(self, dataframe, root_dir, training=False, transform=None , skip_masks=False):
self.df = dataframe
self.root_dir = root_dir
self.transform = transform
self.training = training
self.skip_masks = skip_masks
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get image name
idx, labels = self.df.values[idx]
img_name = self.root_dir.format(idx)
# Augmentation
flip = False
if self.training:
flip = np.random.randint(10) == 1
# Read image
img0 = imread(img_name)
imgg = preprocess_image(img0, flip=flip)
imgg = np.rollaxis(imgg, 2, 0)
# Get mask and regression maps
mask, regr = get_mask_and_regr(img0, labels, flip=flip)
regr = np.rollaxis(regr, 2, 0)
if (self.skip_masks):
return [imgg, idx]
return [imgg, mask, regr]
df_train, df_dev = train_test_split(train, test_size=0.01, random_state=42)
df_test = test
# Create dataset objects
train_dataset = CarDataset(df_train, train_images_dir, training=False)
dev_dataset = CarDataset(df_dev, train_images_dir, training=False)
test_dataset = CarDataset(df_test, test_images_dir, training=False ,skip_masks = True)
test_dataset_all = CarDataset(train, train_images_dir, training=False , skip_masks = True )
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
dev_loader = DataLoader(dataset=dev_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
train_loader_All = DataLoader(dataset=test_dataset_all, batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
# self.mp = nn.MaxPool2d(2)
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(out_ch, 8, 3, padding=1),
nn.BatchNorm2d(8)
)
def forward(self, x):
x = self.conv(x)
return x
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
# self.model_name = 'resnet34'
# model_name = 'xception'
# model_name = 'mobilenet_v2'
# model_name = 'vgg19'
# model_name = 'resnet18'
self.model_name = 'resnet152'
print('model_name: ', self.model_name)
self.base_model = smp.Unet()
self.base_model = smp.Unet(self.model_name, encoder_weights='imagenet')
# self.base_model = smp.Unet(self.model_name, classes=3, activation='softmax')
self.base_model.segmentation_head = double_conv(16,64)
def forward(self, x):
x = self.base_model(x)
return x
# print( model )
model = MyModel().to(device)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2,3])
optimizer = optim.Adam(model.parameters(), lr=0.001)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=max(n_epochs, 10) * len(train_loader) // 3, gamma=0.1)
inp= torch.rand([1,3, 224, 832]).to(device)
print('input ---->',inp.shape)
print('output ---->',model(inp).shape)
def criterion(prediction, mask, regr, size_average=True):
# Binary mask loss
pred_mask = torch.sigmoid(prediction[:, 0])
# mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)
mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(1 - pred_mask + 1e-12)
mask_loss = -mask_loss.mean(0).sum()
# Regression L1 loss
pred_regr = prediction[:, 1:]
regr_loss = (torch.abs(pred_regr - regr).sum(1) * mask).sum(1).sum(1) / mask.sum(1).sum(1)
regr_loss = regr_loss.mean(0)
# Sum
loss = mask_loss + regr_loss
if not size_average:
loss *= prediction.shape[0]
return loss
def train_model(epoch, history=None):
ep_since = time.time()
model.train()
running_loss = 0.0
for batch_idx, (img_batch, mask_batch, regr_batch) in enumerate(tqdm(train_loader)):
since = time.time()
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
optimizer.zero_grad()
output = model(img_batch)
loss = criterion(output, mask_batch, regr_batch)
if history is not None:
history.loc[epoch + batch_idx / len(train_loader), 'train_loss'] = loss.data.cpu().numpy()
loss.backward()
optimizer.step()
exp_lr_scheduler.step()
time_elapsed = time.time() - since
print('Ep:{:2d} - {:3d}/{:3d} loss: {:.5f} , time: {:.4f} '.format(epoch,batch_idx,len(train_loader),loss.item(),time_elapsed))
running_loss += loss.item() * img_batch.size(0)
print('Train Epoch: {} \tLR: {:.6f}\tLoss: {:.6f}'.format(
epoch,
optimizer.state_dict()['param_groups'][0]['lr'],
loss.data))
def evaluate_model(epoch, history=None):
model.eval()
loss = 0
with torch.no_grad():
for img_batch, mask_batch, regr_batch in dev_loader:
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
output = model(img_batch)
loss += criterion(output, mask_batch, regr_batch, size_average=False).data
loss /= len(dev_loader.dataset)
if history is not None:
history.loc[epoch, 'dev_loss'] = loss.cpu().numpy()
print('Dev loss: {:.4f}'.format(loss))
history = pd.DataFrame()
if(mode == 'eval'):
torch.cuda.empty_cache()
gc.collect()
model.load_state_dict(torch.load(model_pretrained_name))
print("Model is loaded.")
evaluate_model(1)
if(mode == 'train'):
for epoch in range(n_epochs):
torch.cuda.empty_cache()
gc.collect()
train_model(epoch, history)
evaluate_model(epoch, history)
torch.save(model.state_dict(), os.path.join(save_dir, '_ep_{}.model'.format(epoch)))
with open(os.path.join(save_dir, 'history.pkl'), 'wb') as handle:
pickle.dump(history, handle, protocol=pickle.HIGHEST_PROTOCOL)
# %% [code]
torch.save(model.state_dict(), './model.pth') | [
"40876922+Alibhji@users.noreply.github.com"
] | 40876922+Alibhji@users.noreply.github.com |
e434f9c2a610eb2c094d73f1a9712b433276b975 | e1013a8613903dc187b26c806022005c40022f03 | /Menu/testsuite_Regression_Menu.py | 0e7f12e9175086d27eadf96fa469d71c2deccd93 | [] | no_license | jresmedina/RobotFramework-Selenium | 2e2330154afded20d4b5da78c3e26684383c4c79 | c1ec33f7d1221d4e404728c85d175ffcdbe546d6 | refs/heads/master | 2021-01-10T09:26:45.197745 | 2016-03-15T09:09:26 | 2016-03-15T09:09:26 | 53,927,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | URL = "https://example.com"
BASE_MENU_ELEMENT = "//div[@class='container container-menu']/div[2]/ul/li"
HOME_ID = 1
MENU01_ID = 2
MENU02_ID = 3
MENU03_ID = 4
MENU04_ID = 5
MENU_LIST = []
| [
"jresmedina@gmail.com"
] | jresmedina@gmail.com |
cbdaaa967126972432bc6090cd67e96988f39286 | 7d449d1105974f65d0112e97c68d8fe95c050b34 | /ROS/catkin_ws/build/arbotix_ros/arbotix_controllers/catkin_generated/pkg.installspace.context.pc.py | 980081c6ec8471c6003bca5769ad4faab4b978bc | [] | no_license | Nexxel/Practicas_Programacion_Robots | 3e39a5398a8e043b14664eaeedecfc75e83f5844 | 67b848d3d4f49a41561b38eae68eaf1650cdab24 | refs/heads/master | 2020-03-09T19:51:10.926129 | 2018-06-02T12:52:11 | 2018-06-02T12:52:11 | 128,244,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "arbotix_controllers"
PROJECT_SPACE_DIR = "/home/viki/catkin_ws/install"
PROJECT_VERSION = "0.10.0"
| [
"sergiogonzalezmuriel@gmail.com"
] | sergiogonzalezmuriel@gmail.com |
ef2e44fd61cee65de22d86008beaca9a7e445c87 | c5ea14333ad364e3b1a698c54e84cd8980ef0497 | /OpenCV-Face-detection/备份/DF&Use Own data.py | 1e746ff991881242afc61279a6725fb79083d219 | [
"MIT"
] | permissive | lingdantiancai/face-FD-FR | 2d0d5a48b9bf9d8dc5393723604a04ab85a91c9c | 48f1acafb4a4fc767c8d389a28e4b4e73246a7ea | refs/heads/master | 2020-03-16T01:08:21.910897 | 2018-05-29T08:41:25 | 2018-05-29T08:41:25 | 132,433,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | import cv2
import matplotlib.pyplot as plt
import time
import os
import numpy as np
from PIL import Image
haar_face_cascade = cv2.CascadeClassifier('classifier\mallick_cascades-master\haarcascades\haarcascade_frontalface_alt2.xml') #LEP分类器
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('gesture.yml')
img_path ='myface/test'
for images in os.listdir(img_path):
if images[-2:] == 'py':
continue
predict_image_pil =Image.open(img_path+'/'+images).convert('L')
predict_image = np.array(predict_image_pil,'uint8')
faces = haar_face_cascade.detectMultiScale(predict_image,scaleFactor=1.1,minNeighbors=7,minSize=(75,75) )
for (x,y,w,h) in faces:
nbr_predicted, conf = recognizer.predict(predict_image[y:y+h,x:x+w])
print("This picture is similiar to %s,and the value of similiarity is %s"%(nbr_predicted,conf))
print("Note:The lower of the similiarity,the more similiarity of picture")
cv2.rectangle(predict_image,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Face',predict_image)
cv2.waitKey(1)
#经过了两次测试,发现还算挺准,但是好慢啊
| [
"lingdantiancai@163.com"
] | lingdantiancai@163.com |
54a8d1148a47715d9bf5f13933dd1376b40d2d27 | 3f2cdf7d2dcbae419ce863bb92e8e636e1ea5bed | /rest-api-python-scripts/central_modules/module_utils.py | 32be86a531288f341597981f104d1d170e943749 | [
"MIT"
] | permissive | Joe-Neville/central-examples-only | 5615436eae15bbf3f4883907be6a9f9ca5c1fb72 | d697d26364ef94b2506494538e718a742b33029f | refs/heads/master | 2021-01-07T03:50:49.674250 | 2020-08-06T11:11:47 | 2020-08-06T11:11:47 | 259,703,755 | 0 | 0 | MIT | 2020-04-28T17:18:37 | 2020-04-28T17:18:36 | null | UTF-8 | Python | false | false | 3,626 | py | import os
import sys
import json, yaml
import logging
import glob
from os.path import dirname, basename, isfile, join
try:
from pip import get_installed_distributions
except:
from pip._internal.utils.misc import get_installed_distributions
C_LOG_LEVEL = {
"CRITICAL": 50,
"ERROR": 40,
"WARNING": 30,
"INFO": 20,
"DEBUG": 10,
"NOTSET": 0
}
C_RES_CODE = {
"-1": "FAILED",
"0": "SKIPPED",
"1": "SUCCESS"
}
C_COLORS = {
"RED": "\033[1;31m",
"BLUE": "\033[1;34m",
"CYAN": "\033[1;36m",
"GREEN": "\033[0;32m",
"RESET": "\033[0;0m",
"BOLD": "\033[;1m",
"REVERSE": "\033[;7m"
}
def update_sys_path(path):
"""
Summary: Function to insert Aruba Central library path to sys path
"""
sys.path.insert(1, path)
def get_subdir_list(dir_name, with_path=True):
"""
Summary: Function returns list of directories within given parent directory
"""
subdir_list = []
d = dir_name
if with_path:
subdir_list = [os.path.join(d, o) for o in os.listdir(d)
if os.path.isdir(os.path.join(d,o))]
else:
subdir_list = [o for o in os.listdir(d)
if os.path.isdir(os.path.join(d,o))]
return subdir_list
def get_files_from_dir(dir_name=".", file_type=".py"):
"""
Summary: Function returns files of specified type within a directory
"""
d = dir_name
file_list = [os.path.join(d, f) for f in os.listdir(d)
if isfile(join(d, f)) and f.endswith(file_type)]
return file_list
def get_file_content(file_name):
"""
Summary: Function to open a file and return the contents of the file
"""
input_args = ""
try:
with open(file_name, "r") as fp:
file_dummy, file_ext = os.path.splitext(file_name)
if ".json" in file_ext:
input_args = json.loads(fp.read())
elif file_ext in ['.yaml', '.yml']:
input_args = yaml.safe_load(fp.read())
else:
raise UserWarning("Provide valid inventory file "
"format/extension [.json/.yaml/.yml]!")
return input_args
except Exception as err:
exit("exiting.. Unable to open file " + \
"%s with error %s!" % (file_name, err))
def console_logger(name, level="DEBUG"):
"""
Summary: This method create an instance of console logger.
Parameters:
name (str): Parent name for log
level (str): One valid logging level [CRITICAL, ERROR, WARNING, INFO
DEBUG, NOTSET]. All logs above and equal to provided level
will be processed
Returns:
logger (class logging): An instance of class logging
"""
channel_handler = logging.StreamHandler()
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
date_format = '%Y-%m-%d %H:%M:%S'
installed_packages = get_installed_distributions()
f = format
if 'colorlog' in [package.project_name for package in installed_packages]:
import colorlog
cformat = '%(log_color)s' + format
f = colorlog.ColoredFormatter(cformat, date_format,
log_colors = { 'DEBUG' : 'bold_cyan', 'INFO' : 'blue',
'WARNING' : 'yellow', 'ERROR': 'red',
'CRITICAL': 'bold_red' })
else:
f = logging.Formatter(format, date_format)
channel_handler.setFormatter(f)
logger = logging.getLogger(name)
logger.setLevel(C_LOG_LEVEL[level])
logger.addHandler(channel_handler)
return logger
| [
"noreply@github.com"
] | Joe-Neville.noreply@github.com |
dd9342b5d54c739cf9849c117a9e29a645ae9176 | 5884ceea5e7f2d6dfa1e2bcfb08af7229923fc1f | /test9/.env/bin/easy_install-3.6 | fb04d2eed2eb425ccfe10daa0996f2e4754018e7 | [] | no_license | Durant21/test9a | 118716614c3e8c45fea3ea28babf8613c7cbb6c0 | 8979cb3fd1bb770dca3a1078a43ca395a14e2c10 | refs/heads/master | 2020-07-11T14:21:23.703613 | 2019-08-26T21:52:49 | 2019-08-26T21:52:49 | 204,566,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | 6 | #!/home/dante/Projects/test9/test9/.env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"durant.crimson@icloud.com"
] | durant.crimson@icloud.com |
6372135fc16eea5d5a7e51601a76f80eaa89bac6 | c61b1c1b65034215e85266db918c39946a3bafe7 | /athletes/serializers.py | 474ff1d5651ddb2dd3fc4d48d21a27ada56cb54d | [] | no_license | enias-oliveira/olympicHistory | 06cdc64f0ef06cf6b33472872539a0c57831f52c | e02f648d2bd127c8ae16c976fb8610005ac27604 | refs/heads/master | 2023-08-16T03:36:33.261214 | 2021-10-05T18:11:52 | 2021-10-05T18:11:52 | 411,797,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | from rest_framework import serializers
from medals.models import Medal
from medals.serializers import MedalSerializer
from games.models import Event
from games.serializers import EventSerializer
from .models import Athlete, Country
class AthleteMedalSerializer(MedalSerializer):
class Meta:
model = Medal
fields = ["id", "medal_class", "event"]
class CountrySerializer(serializers.ModelSerializer):
class Meta:
model = Country
fields = ["id", "name", "noc"]
class AthleteCountrySerializer(CountrySerializer):
class Meta:
model = Country
fields = ["noc", "name"]
class AthleteEventSerializer(EventSerializer):
competition = serializers.SlugRelatedField(
slug_field="name",
read_only=True,
)
class Meta:
model = Event
fields = "__all__"
class AthleteRelatedField(serializers.RelatedField):
def to_representation(self, value):
serialized_data = self.serializer_class(value)
return serialized_data.data
def to_internal_value(self, data):
return self.queryset.get(pk=data)
class AthleteCountryField(AthleteRelatedField):
queryset = Country.objects.all()
serializer_class = AthleteCountrySerializer
def to_internal_value(self, data):
return self.queryset.get(name=data)
class AthleteMedalsField(AthleteRelatedField):
queryset = Medal.objects.all()
serializer_class = AthleteMedalSerializer
class AthleteEventsField(AthleteRelatedField):
queryset = Event.objects.all()
serializer_class = AthleteEventSerializer
def to_representation(self, value):
value.sport = value.competition.sport.name
serialized_data = self.serializer_class(value)
return serialized_data.data
class AthleteSerializer(serializers.ModelSerializer):
medals = AthleteMedalsField(many=True, required=False)
country = AthleteCountryField()
class Meta:
model = Athlete
fields = "__all__"
def to_representation(self, instance):
current_representation = super().to_representation(instance)
current_representation["sex"] = dict(Athlete.SEX_CHOICES)[
current_representation["sex"]
]
return current_representation
def to_internal_value(self, data):
if data.get("sex"):
season_full_to_short = {v: k for k, v in Athlete.SEX_CHOICES}
data["sex"] = season_full_to_short[data["sex"]]
return data
| [
"eniasoliveira27@gmail.com"
] | eniasoliveira27@gmail.com |
52599c15446db56b3ff6fe002afcd2b7a6d62ef2 | f023b5cb59c4b41e28617294b458822968c3aa4c | /lab05/main.py | 1c31e73a6ed9bcc18a25f9768bc6cb008ec367a6 | [] | no_license | wcdbmv/IS | 4efd24bc850740ba6e8a86d8d1bfcb3a36ca7e3b | d81125830b95b19446dd4bff6f99f16155d452ec | refs/heads/master | 2023-01-24T23:27:38.226840 | 2020-12-09T20:58:24 | 2020-12-09T20:58:24 | 297,056,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | import argparse
from sys import exit
from typing import Tuple
from Crypto import Random
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(prog='Digital Signature')
subparsers = parser.add_subparsers(dest='mode', required=True, help='sub-command help')
parser_keygen = subparsers.add_parser('keygen')
parser_keygen.add_argument('pubkey_filename')
parser_keygen.add_argument('prtkey_filename')
parser_sign = subparsers.add_parser('sign')
parser_sign.add_argument('filename')
parser_sign.add_argument('prtkey_filename')
parser_sign.add_argument('signature_filename')
parser_check = subparsers.add_parser('check')
parser_check.add_argument('filename')
parser_check.add_argument('pubkey_filename')
parser_check.add_argument('signature_filename')
parser_check.add_argument('-q', '--quiet', action='store_true')
return parser.parse_args()
def keygen() -> Tuple[bytes, bytes]:
random_generator = Random.new().read
key_pair = RSA.generate(2048, random_generator)
return key_pair.publickey().export_key('PEM'), key_pair.export_key('PEM')
def sign(msg: bytes, prtkey: bytes) -> bytes:
sha256_hash = SHA256.new(msg)
signer = pkcs1_15.new(RSA.import_key(prtkey))
signature = signer.sign(sha256_hash)
return signature
def check(msg: bytes, pubkey: bytes, signature: bytes) -> bool:
sha256_hash = SHA256.new(msg)
verifier = pkcs1_15.new(RSA.import_key(pubkey))
try:
verifier.verify(sha256_hash, signature)
return True
except ValueError:
return False
def read(filename: str) -> bytes:
with open(filename, 'rb') as file:
content = file.read()
return content
def write(filename: str, content: bytes) -> None:
with open(filename, 'wb') as file:
file.write(content)
def main() -> None:
args = vars(parse_args())
if args['mode'] == 'keygen':
pubkey, prtkey = keygen()
write(args['pubkey_filename'], pubkey)
write(args['prtkey_filename'], prtkey)
elif args['mode'] == 'sign':
msg = read(args['filename'])
prtkey = read(args['prtkey_filename'])
signature = sign(msg, prtkey)
write(args['signature_filename'], signature)
elif args['mode'] == 'check':
msg = read(args['filename'])
pubkey = read(args['pubkey_filename'])
signature = read(args['signature_filename'])
if check(msg, pubkey, signature):
if not args['quiet']:
print('Valid signature')
else:
if not args['quiet']:
print('Invalid signature')
exit(1)
if __name__ == '__main__':
main()
| [
"kerimov.dev@yandex.ru"
] | kerimov.dev@yandex.ru |
dd176ed7dd72195f2957482392238c1148615ab4 | 2b4905234339a9ba4c11e1f094672cc8c8d4bf59 | /CS475-Final-main/goFish.py | d78ab6bb89b9281fa1b74e7fd66ad0e7bf541c04 | [] | no_license | fcgriff-html/socket-program | 0e082f877dfdcc12b17c505109dbe1f84dbedd95 | 872d300f11fce02dbb7f26a9ba711d94e88f37d3 | refs/heads/main | 2023-06-25T15:20:53.242117 | 2021-07-22T21:48:14 | 2021-07-22T21:48:14 | 388,605,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,295 | py | #each player needs 7 out of 52 unique cards
#each 52-14 = 38 fishing cards
#turn counter
#major if statement: if [request] is in [players][deck] then send [request] card to [opponet][deck]
#else:
#go fish
#functions: generate deck and fish pile
#generate deck and separate
#creates array w 38 spots, two "hands" with 7 cards each
#randomly populate the hands
#deck = 2Darray [4][12]
#pick 14 random cards and sort them into temp array[14]
#if index of temp is even, send it to player, if odd send to opponet
#copy deck into fishing pile unless the card has been drawn,
#go_fish function to draw random card and update the fishing pile
#game stops when the pile counter hits zero or the quit command is entered
# the game is one by the player with the most "books"
# "book" = four cards of the same rank, i.e. for Queens or four 8s
#skeleton code
import numpy as np
import socket as sock
import random
#each player needs 7 out of 52 unique cards
#each 52-14 = 38 fishing cards
#turn counter
#major if statement: if [request] is in [players][deck] then send [request] card to [opponet][deck]
#else:
#go fish
#functions: generate deck and fish pile
#generate deck and separate
#creates array w 38 spots, two "hands" with 7 cards each
#randomly populate the hands
#deck = 2Darray [4][52]
#pick 14 random cards and sort them into temp array[14]
#if index of temp is even, send it to player, if odd send to opponet
#copy deck into fishing pile unless the card has been drawn,
#go_fish function to draw random card and update the fishing pile
#game stops when the pile counter hits zero or the quit command is entered
#skeleton code
#constructor to generate 2dArray
#deck counter
class deck():
#constructor to generate 2dArray
def __init__(self):
self.cards = []
for suit in range (4):
for rank in range (12):
card = card(suit, rank)
self.cards.append(card)
#deck counter
"""
def deck_popsplit():
cards = []
card_id = 0
split = []
split_key = []
hand_player = []
player_key = []
hand_opp = []
opp_key = []
trackx = []
tracky = []
fishing_pile = []
for i in range(0,3):
cards[i]= []
for j in range(0,11):
card_id+=1
cards[i][j] = card_id
suite_key = ["clubs", "diamonds", "hearts", "spades"]
cn_key = [1, 2, 3, 4, 5, 6, 7, 8, 9, "jack", "queen", "king", "ace"]
turn = 0
choice1 = None
choice2 = None
for i in range(0, 13): #this should generate the split and tie it to a choice number
x = np.rand(0, 51)
y = np.rand(0, 3)
if x != choice1 and y != choice2:
split.append([x, y])
trackx.append(x)
tracky.append(y)
card_num = cn_key[x]
cardname = suite_key[y]
split_key[turn] = [card_num, cardname]
turn+=1
choice1 = x
choice2 = y
#next we want to split the 14 selected cards into two hands
for i in range(0, 13):
if i %2 ==0:
hand_player[i] = split[i]
player_key[i] = split_key[i]
else:
hand_opp[i] = split[i]
opp_key[i] = split_key[i]
count = 0
for i in range(0, 3):
for j in range(0, 11):
sn = i #suite_number
cn = j #card_number
if sn != trackx[count] and cn != tracky[count]:
fish_pile
"""
"""
cards = np.zeros(52)
split = []
player_hand = []
opp_hand = []
fishing_pile = []
card_id = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "jack", "queen", "king", "ace"]
count = 0
for i in range(len(cards)):
if count == 11:
cards[i] = card_id[count]
count = 0
else:
cards[i] = card_id[count]
count+=1
#so now we want to split the deck and make the fish pile
#splitting
prev = np.inf
counter = 0
idx_trace = []
while counter != 13:
randnum = random.randint(0, 51)
if randnum != prev:
split.append(cards[randnum])
counter+=1
idx_trace.append(randnum) #should allow us to trace indicies when making the fishing pile
prev = randnum
else:
randnum = random.randint(0, 51)
for i in range(len(split)):
if i%2 == 0:
player_hand.append(split[i])
else:
opp_hand.append(split[i])
#now that we have split the selected cards into two hands, we need to populate the fishing pile
for i in range(len(cards)):
if i not in idx_trace:
fishing_pile.append(cards[i])
return fishing_pile, player_hand, opp_hand
#the above code should cycle through all 52 cards assigning each index a card id from the possible card_ids
#now we want to copy the deck into the fish pile and remove the 14 cards the players have in their hands
"""
#method for determining if the deck contains a specific card
#returns true if the deck contains the given card, false if not
def contains(card):
ret = False
# loop through each card in the deck
for i in range(len(cards)):
# pick out the current card
currentCard = cards[i]
# check if current card matches the given card
if (currentCard.isSameCard(card)):
# if it does match, return true
ret = True
# if we make it through the deck with no matches, return false
return ret
class Card(object):
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']
ranks = ['Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace']
# checks if two cards are of the same rank
def isSameRank(self, otherCard):
return self.rank == otherCard.rank
# checks if two cards are the same card
def isSameCard(self, otherCard):
return self.rank == otherCard.rank, self.suit
class Player(object):
def __init__(self):
self.hand = [] # start with an empty array
self.score = 0 # score gets +1 every time a book is achieved
def dealHand(self, deck):
# deal 7 random cards to this player
while(len(self.hand) < 7):
self.hand.append(getRandomCard(deck))
#Method to generate random card
def getRandomCard(deck):
#generate any random card
suit = random.randint(0, 3)
rank = random.randint(0, 11)
newCard = __init__(card, suit, rank)
#check if card is in the deck, if not, keep making a new card
while not(deck.contains(newCard)):
suit = random.randint(0, 3)
rank = random.randint(0, 11)
newCard = __init__(card, suit, rank)
#once we know card is in the deck, remove card from the deck and return the random card
deck.removeCard(newCard)
return newCard
| [
"noreply@github.com"
] | fcgriff-html.noreply@github.com |
2ba268102fe8f16978356394926d7b179fdf9dc9 | 69819f1d554e48b6d1f280e4d4785b35734e6235 | /py_learning/test3_4.py | 55da6b2b3bb96778e1fca613dd93c9a228e22b41 | [] | no_license | SamKaiYang/python_code | 8b60b4b68ab7cd4261e237972c0f6e45fc2dced5 | fb16471aa96082efc906fd8129cecd3a8b19e8a0 | refs/heads/master | 2020-04-26T03:26:38.041377 | 2019-04-11T06:38:09 | 2019-04-11T06:38:09 | 173,267,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,202 | py | import numpy
###--------------------ch.3-------------------
years_list = [1996 ,1997 ,1998 ,1999 ,2000]
print("----------------ch3----------------------------")
print("3.1:",years_list)
print("-----------------------------------------------")
print("3.2:",years_list[2])
print("-----------------------------------------------")
print("3.3:",years_list[-1])
print("-----------------------------------------------")
thing = ["mozzarella" , "cinderella" ,"salmonella"]
print("3.4:",thing)
print("-----------------------------------------------")
print("3.5:",thing[1].capitalize())
print("-----------------------------------------------")
thing[0].capitalize()
thing[0] = thing[0].upper()
print("3.6:",thing)
print("-----------------------------------------------")
del thing[2]
print("3.7:",thing)
print("-----------------------------------------------")
surprise = ["Croucho","Chico","Harpo"]
print("3.8:",surprise)
print("-----------------------------------------------")
def reverse(str):
out = ''
li = list(str)
for i in range(len(li), 0, -1):
out += ''.join(li[i-1])
return out
surprise[2] =surprise[2].lower()
surprise[2] =reverse(surprise[2])
surprise[2]= surprise[2].capitalize()
print("3.9:",surprise)
print("-----------------------------------------------")
e2f = {"dog": 'chien', "cat": 'chat', "walrus": 'morse'}
print("3.10:",e2f)
print("-----------------------------------------------")
print("3.11:",e2f["walrus"])
print("-----------------------------------------------")
f2e = {v: k for k, v in e2f.items()}
print("3.12:",e2f.items())
print("-----------------------------------------------")
print("3.13:",f2e["chien"])
print("-----------------------------------------------")
print("3.14:",e2f.keys())
print("-----------------------------------------------")
life = {"animals": {'cats':["Henri","Grumpy","Lucy"],'octopi':None,'emus':None},"plants": None, "other": None, }
print("3.15:",life)
###--------------------ch.4------------------
print("-----------------ch4---------------------------")
guess_me = 7
print("4.1:")
if guess_me < 7 :
print("too low")
elif guess_me >7:
print("too high")
elif guess_me == 7:
print("just righ")
print("-----------------------------------------------")
print("4.2:")
start = 1
while(1):
if start < guess_me:
print("too low")
elif start == guess_me:
print("found it")
break
elif start > guess_me:
print("Oops!!")
break
start += 1
print("-----------------------------------------------")
num = [3,2,1,0]
for i in range(len(num)):
print("4.3:",num[i])
print("-----------------------------------------------")
number_list = [i for i in range(0,10) if i%2==0]
print("4.4:",number_list)
print("-----------------------------------------------")
word = 'squares'
letter_count = {letter:pow(word.count(letter),2) for letter in set(word)}
print("4.5:", letter_count)
print("-----------------------------------------------")
set1 = {number for number in range(10) if number%2 == 1}
print("4.6:", set1)
print("-----------------------------------------------")
def good():
list_a = ['Harry','Ron','hermione']
return(list_a)
print("4.8:",good()) | [
"tt00621212@gmail.com"
] | tt00621212@gmail.com |
f80ff49f69e510f0cee9ed67a6c941c5079b240f | 307b69f77edc6e3979893633341a24e7317a3d71 | /deploy-etcd-cluster.py | 1459c8c0c7c7299d6ba3a3587c93d74259ebeff2 | [] | no_license | ngocngv/mesos-based | b373ce69417d65ae26ec1f19135b64cc52da895c | 627a73fbd77a8329f27a67b6239e5e4e625b0c3c | refs/heads/master | 2021-06-11T06:48:05.201741 | 2016-12-13T09:14:21 | 2016-12-13T09:14:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,073 | py |
# CoreOS Etcd – A highly available key-value store for shared configuration and service discovery.
# CoreOS Etcd is used by Calico to store network configurations.
# create directory /var/lib/etcd, /etc/etcd, add the etcd user and group:
mkdir /var/lib/etcd
mkdir /etc/etcd
groupadd -r etcd
useradd -r -g etcd -d /var/lib/etcd -s /sbin/nologin -c "etcd user" etcd
chown -R etcd:etcd /var/lib/etcd
#
https://github.com/coreos/etcd/releases/download/v3.0.15/etcd-v3.0.15-linux-amd64.tar.gz
tar xvf etcd-*.tar.gz
cp etcd /usr/local/bin/
cp etcdctl /usr/local/bin/
# https://docs.onegini.com/cim/idp/2.39.01-SNAPSHOT/installation/etcd.html
# https://n40lab.wordpress.com/2016/08/01/installing-coreos-etcd-server-on-centos-7/
# http://severalnines.com/blog/mysql-docker-multi-host-networking-mysql-containers-part-2-calico
#
# edit /etc/etcd/etcd.conf
#----------------------------------------------------------------------------------
cat << '__EOF__' | tee /etc/etcd/etcd.conf
# [member]
ETCD_NAME="etcd_1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#ETCD_SNAPSHOT_COUNT="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_LISTEN_PEER_URLS="http://127.0.0.1:2380"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
#ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379"
#ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://172.17.42.1:2379,http://172.16.181.132:2379"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
# [cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://127.0.0.1:2380"
#ETCD_INITIAL_CLUSTER="etcd_1=http://127.0.0.1:2380,etcd_2=http://172.17.42.1:2380,etcd_3=http://172.16.181.132:2380"
ETCD_INITIAL_CLUSTER="etcd_1=http://127.0.0.1:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-1"
#
#ETCD_ADVERTISE_CLIENT_URLS="http://127.0.0.1:2379"
#ETCD_ADVERTISE_CLIENT_URLS="http://127.0.0.1:2379,http://172.17.42.1:2379,http://172.16.181.132:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#[proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#[security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
[logging]
ETCD_DEBUG="false"
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
ETCD_LOG_PACKAGE_LEVELS="info"
#[profiling]
#ETCD_ENABLE_PPROF="false"
__EOF__
# edit /usr/lib/systemd/system/etcd.service
#------------------------------------------------------------------------------------
cat << '__EOF__' | tee /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Cluster
After=network.target
After=network.target
Wants=network.target
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""
#ExecStart=/usr/local/bin/etcd
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
__EOF__
# Make sure to use all our CPUs, because etcd can block a scheduler thread
# export GOMAXPROCS=`nproc`
# systemctl daemon-reload
# systemctl enable etcd
# systemctl start etcd
# systemctl status etcd
# etcd master:
./etcd --name calico0 --initial-advertise-peer-urls http://192.168.56.100:2380 \
--listen-peer-urls http://192.168.56.100:2380 \
--listen-client-urls http://192.168.56.100:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://192.168.56.100:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster calico0=http://192.168.56.100:2380,calico1=http://192.168.56.101:2380 \
--initial-cluster-state new
# etcd slave:
./etcd --name calico1 --initial-advertise-peer-urls http://192.168.56.101:2380 \
--listen-peer-urls http://192.168.56.101:2380 \
--listen-client-urls http://192.168.56.101:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://192.168.56.101:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster calico0=http://192.168.56.100:2380,calico1=http://192.168.56.101:2380 \
--initial-cluster-state new
# master
export ETCD_AUTHORITY=192.168.56.100:2379
calicoctl node --ip=192.168.56.100
# slave
export ETCD_AUTHORITY=192.168.56.100:2379
calicoctl node --ip=192.168.56.101
# Environment
# 172.17.42.30 kube-master
# 172.17.42.31 kube-node1
# 172.17.42.32 kube-node2
# Start calico on master and node:
export ETCD_AUTHORITY=172.17.42.30:2379
calicoctl node --ip=172.17.42.31
#
export ETCD_AUTHORITY=172.17.42.30:2379
calicoctl node --ip=172.17.42.32
# calicoctl status
| [
"noreply@github.com"
] | ngocngv.noreply@github.com |
25342e1430ff6fac61ef251498dfe15a338fe8bb | f0efdee65fa6335aa92873bc37651c57a2bf4730 | /Problems/Easy/257. binary-tree-paths.py | 1e16b1534cd00dfe3c15817d625a9c52a14ff0c0 | [] | no_license | deekshak012/LeetCode | cb45a02926c33d2e77ac72619362ddf0e666a849 | 149fed15556d7e231de79c65d12ab33a0dd7a96b | refs/heads/master | 2021-11-20T20:00:40.649994 | 2021-08-20T19:11:34 | 2021-08-20T19:11:34 | 243,912,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def binaryTreePaths(self, root):
if(not root):
return []
result, stack = [], [(root,"")]
while(stack):
node, ls = stack.pop()
if(node.left is None and node.right is None):
result.append(ls+str(node.val))
if(node.right):
stack.append((node.right,ls+str(node.val)+"->"))
if node.left:
stack.append((node.left, ls+str(node.val)+"->"))
return result
| [
"deekshak012@gmail.com"
] | deekshak012@gmail.com |
905a119de0d7d34cc786486287e9cd094818c475 | c82cefee68d557b0790483500b58e7e2988bd33b | /lib/web.py | 2b001db7cb369359b02c9788fdcdc55fce7b4de8 | [
"MIT"
] | permissive | amadeobrands/electrum | c5def78731faf6908cf48342ce3d291547d1a52f | 459100eda0f3cdcf6d75e304d08345a627364078 | refs/heads/master | 2021-08-29T11:38:29.221991 | 2017-12-13T11:00:16 | 2017-12-13T21:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,964 | py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from decimal import Decimal
import os
import re
import shutil
import urllib
from .address import Address
from . import bitcoin
from .networks import NetworkConstants
mainnet_block_explorers = {
'Blockchair.com': ('https://blockchair.com/bitcoin-cash',
Address.FMT_LEGACY,
{'tx': 'transaction', 'addr': 'address'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBCC',
Address.FMT_LEGACY,
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
Address.FMT_LEGACY,
{'tx': 'tx', 'addr': 'address'}),
}
def BE_info():
if NetworkConstants.TESTNET:
return testnet_block_explorers
return mainnet_block_explorers
def BE_tuple(config):
return BE_info().get(BE_from_config(config))
def BE_from_config(config):
return config.get('block_explorer', 'Blockchair.com')
def BE_URL(config, kind, item):
be_tuple = BE_tuple(config)
if not be_tuple:
return
url_base, addr_fmt, parts = be_tuple
kind_str = parts.get(kind)
if not kind_str:
return
if kind == 'addr':
assert isinstance(item, Address)
item = item.to_string(addr_fmt)
return "/".join([url_base, kind_str, item])
def BE_sorted_list():
return sorted(BE_info())
def create_URI(addr, amount, message):
if not isinstance(addr, Address):
return ""
path = addr.to_ui_string()
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoincash', netloc='', path=path, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
if ':' not in uri:
# Test it's valid
Address.from_string(uri)
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoincash':
raise BaseException("Not a bitcoincash URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
Address.from_string(address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow(10, k)
else:
amount = Decimal(am) * bitcoin.COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def check_www_dir(rdir):
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
| [
"kyuupichan@gmail.com"
] | kyuupichan@gmail.com |
b47a49230bb76f8679dff137a79ff5e845e66162 | 0ad56af74f51280e0f48415e93f4ce80d78e9294 | /tango_with_django_project/rango/migrations/0002_auto_20161103_0137.py | e1e4e55a882d8194b7ef45975f833853d5655b2b | [] | no_license | danmcv/dream_command | 1ebf81ed74a1e9581d9b257a8a2594c147ebabfe | bb568a2f2d447e2b0e3840df68f886476c80d643 | refs/heads/master | 2021-01-13T13:12:29.026101 | 2016-11-03T18:46:50 | 2016-11-03T18:46:50 | 72,687,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-11-03 01:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='page',
name='likes',
field=models.IntegerField(default=0),
),
]
| [
"danmcv5@gmail.com"
] | danmcv5@gmail.com |
8bfaef7ec980dd715efb97bc3942464fa7a91481 | 387b4a53485b175d2c8c7bca7f3429ad2abbb4f0 | /single_stage_model/detect_head/Anchor_utils.py | 473632f0947213666ba958459333e1c0f70b1d1d | [] | no_license | liangzhao123/IOU-SSD | 50cf3a52e8b306b024d0396b76bd3931c8a15434 | b53a1659ffe197da8eeca0f4a35a4a4571db22f4 | refs/heads/main | 2023-06-25T08:15:03.553378 | 2021-07-31T17:40:49 | 2021-07-31T17:40:49 | 321,982,002 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | import torch
import numpy as np
class AnchorGenertor(object):
def __init__(self,anchor_range,anchor_generator_config):
super().__init__()
self.anchor_generator_cfg = anchor_generator_config
self.anchor_range = anchor_range
self.anchor_sizes = [config['sizes'] for config in anchor_generator_config]
self.anchor_rotations = [config['rotations'] for config in anchor_generator_config]
self.anchor_heights = [config['anchor_bottom_heights'] for config in anchor_generator_config]
self.align_center = [config.get('align_center', False) for config in anchor_generator_config]
assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(self.anchor_heights)
self.num_of_anchor_sets = len(self.anchor_sizes)
def generator(self,grid_sizes,torch_enable=False):
assert len(grid_sizes) == self.num_of_anchor_sets
all_anchors = []
num_anchors_per_location = []
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes, self.anchor_sizes, self.anchor_rotations, self.anchor_heights, self.align_center):
num_anchors_per_location.append(len(anchor_rotation) * len(anchor_size) * len(anchor_height))
if align_center:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (grid_size[0] - 1)
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (grid_size[1] - 1)
x_offset, y_offset = 0, 0
x_shifts = torch.arange(
self.anchor_range[0] + x_offset, self.anchor_range[3] + 1e-5, step=x_stride, dtype=torch.float32,
).cuda()
y_shifts = torch.arange(
self.anchor_range[1] + y_offset, self.anchor_range[4] + 1e-5, step=y_stride, dtype=torch.float32,
).cuda()
z_shifts = x_shifts.new_tensor(anchor_height)
num_anchor_size, num_anchor_rotation = anchor_size.__len__(), anchor_rotation.__len__()
anchor_rotation = x_shifts.new_tensor(anchor_rotation)
anchor_size = x_shifts.new_tensor(anchor_size)
x_shifts, y_shifts, z_shifts = torch.meshgrid([
x_shifts, y_shifts, z_shifts
]) # [x_grid, y_grid, z_grid]
anchors = torch.stack((x_shifts, y_shifts, z_shifts), dim=-1) # [x, y, z, 3]
anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1)
anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat([*anchors.shape[0:3], 1, 1])
anchors = torch.cat((anchors, anchor_size), dim=-1)
anchors = anchors[:, :, :, :, None, :].repeat(1, 1, 1, 1, num_anchor_rotation, 1)
anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat(
[*anchors.shape[0:3], num_anchor_size, 1, 1])
anchors = torch.cat((anchors, anchor_rotation), dim=-1) # [x, y, z, num_size, num_rot, 7]
anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous()
# anchors = anchors.view(-1, anchors.shape[-1])
anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers
if torch_enable:
all_anchors.append(anchors)
else:
all_anchors.append(anchors.cpu().numpy())
return all_anchors, num_anchors_per_location
| [
"1094036832@qq.com"
] | 1094036832@qq.com |
dc3c45f84143ef22b653215e6778974cfd0638b6 | 9fed13fac9609711cf7d7357e346cfb4699256ee | /Pipeline_Code/ROSMAP_data_validation_experiments_helpers.py | d427f6a94f59bfd9a56228b72ce578411b0318e1 | [
"MIT"
] | permissive | suinleelab/MD-AD | b6129a293b61b5d192d068d1d0a7651f47abd78d | b7fc786527e6a1462b3bb67b0ee0a20dcc43e167 | refs/heads/main | 2023-05-27T01:31:43.107272 | 2023-05-16T18:35:10 | 2023-05-16T18:35:10 | 322,092,256 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,710 | py | import h5py
import pandas as pd
import numpy as np
from models import multitask_mlp
import keras
from keras.callbacks import CSVLogger
from keras import backend as K
import gc
from sklearn.model_selection import ParameterGrid
import os
from configs import *
class DataValidationExperiment:
def __init__(self, fold_idx, train_datasets, test_datasets):
self.fold_idx = fold_idx
self.train_datasets = train_datasets
self.test_datasets = test_datasets
X_train, X_valid, y_train, y_valid = self.load_data_for_fold(fold_idx)
self.X_train = X_train
self.X_valid = X_valid
self.y_train = y_train
self.y_valid = y_valid
def load_data_for_fold(self, fold_idx):
# Leaving this hardcoded for now. I assume it won't change anytime soon (or ever)
phenotypes = ["CERAD", "PLAQUES", "ABETA_IHC", "BRAAK", "TANGLES", "TAU_IHC"]
num_components = 500
num_cats = {"CERAD": 4, "BRAAK": 6}
data_form = "ACT_MSBBRNA_ROSMAP_PCASplit"
path_to_folders = path_to_MDAD_data_folders
path_to_split_data = path_to_folders + data_form
# ------------ LOAD DATA ---------------------------------------------------- #
with h5py.File(path_to_split_data + "/" + str(fold_idx) + ".h5", 'r') as hf:
if "PCA" in data_form or "KMeans" in data_form:
X_train = hf["X_train_transformed"][:, :num_components].astype(np.float64)
X_valid = hf["X_valid_transformed"][:, :num_components].astype(np.float64)
else:
X_train = hf["X_train"][:].astype(np.float64)
X_valid = hf["X_valid"][:].astype(np.float64)
labels_train = hf["y_train"][:]
labels_valid = hf["y_valid"][:]
labels_names = hf["labels_names"][:]
# ------------ PROCESS DATA FOR MODEL: TRAIN/TEST SETS + LABELS----------------- #
labels_df_train = pd.DataFrame(labels_train, columns=labels_names.astype(str))
labels_df_valid = pd.DataFrame(labels_valid, columns=labels_names.astype(str))
training_points_in_datasets = labels_df_train.filename.str.decode("utf-8").isin(self.train_datasets)
X_train = X_train[training_points_in_datasets]
labels_df_train = labels_df_train[training_points_in_datasets]
valid_points_in_datasets = labels_df_valid.filename.str.decode("utf-8").isin(self.test_datasets)
X_valid = X_valid[valid_points_in_datasets]
labels_df_valid = labels_df_valid[valid_points_in_datasets]
shuffle_idx_train = np.random.permutation(range(len(labels_df_train)))
shuffle_idx_valid = np.random.permutation(range(len(labels_df_valid)))
X_train = X_train[shuffle_idx_train]
X_valid = X_valid[shuffle_idx_valid]
labels_df_train = labels_df_train.reset_index(drop=True).loc[shuffle_idx_train]
labels_df_valid = labels_df_valid.reset_index(drop=True).loc[shuffle_idx_valid]
y_train = {}
y_valid = {}
for phen in phenotypes:
if phen in num_cats.keys():
y_train[phen] = labels_df_train[phen].astype(float).values / (num_cats[phen] - 1)
y_valid[phen] = labels_df_valid[phen].astype(float).values / (num_cats[phen] - 1)
else:
y_train[phen] = labels_df_train[phen].astype(float).values
y_valid[phen] = labels_df_valid[phen].astype(float).values
return X_train, X_valid, y_train, y_valid
def run_experiment(self):
hyperparams = {"epochs": [200],
"nonlinearity": ["relu"],
"hidden_sizes_shared": [[500, 100]],
"hidden_sizes_separate": [[50, 10]],
"dropout": [.1],
"k_reg": [.00001, .001],
"learning_rate": [.0001, .001],
"loss_weights": [[1, 1]],
"grad_clip_norm": [.01, .1],
"batch_size": [20]}
hy_dict_list = list(ParameterGrid(hyperparams))
for hy_dict in hy_dict_list:
num_epochs = hy_dict["epochs"]
nonlinearity = hy_dict["nonlinearity"]
hidden_sizes_shared = hy_dict["hidden_sizes_shared"]
hidden_sizes_separate = hy_dict["hidden_sizes_separate"]
dropout = hy_dict["dropout"]
k_reg = hy_dict["k_reg"]
learning_rate = hy_dict["learning_rate"]
loss_weights = hy_dict["loss_weights"]
grad_clip_norm = hy_dict["grad_clip_norm"]
batch_size = hy_dict["batch_size"]
title = str(self.train_datasets)
#title = "%d_%s_%s_%s_%f_%f_%f_%s_%f_%d" % (
# num_epochs, nonlinearity, str(hidden_sizes_shared), str(hidden_sizes_separate),
# dropout, k_reg, learning_rate, str(loss_weights), grad_clip_norm, batch_size
#)
path_to_results = "MODEL_OUTPUTS/results/md-ad_data_validation/"
path_to_preds = "MODEL_OUTPUTS/predictions/md-ad_data_validation/"
path_to_models = "MODEL_OUTPUTS/models/md-ad_data_validation/"
data_form = "ACT_MSBBRNA_ROSMAP_PCASplit"
model = multitask_mlp(self.X_train, hy_dict)
# https://stackoverflow.com/questions/36895627/python-keras-creating-a-callback-with-one-prediction-for-each-epoch
X_valid = self.X_valid
class prediction_history(keras.callbacks.Callback):
def __init__(self):
self.predhis = []
def on_epoch_end(self, epoch, logs={}):
self.predhis.append(model.predict(X_valid))
predictions = prediction_history()
res_dest = path_to_results + "/" + data_form + "/" + title + "/"
if not os.path.isdir(res_dest):
os.makedirs(res_dest)
preds_dest = path_to_preds + "/" + data_form + "/" + title + "/"
if not os.path.isdir(preds_dest):
os.makedirs(preds_dest)
modelpath = path_to_models + data_form + "/" + title + "/" + str(self.fold_idx) + "/"
if not os.path.isdir(modelpath):
os.makedirs(modelpath)
csv_logger = CSVLogger(res_dest + '%d.log' % self.fold_idx)
print("Fitting model")
History = model.fit(x={'main_input': self.X_train},
y={'BRAAK_out': self.y_train["BRAAK"], 'CERAD_out': self.y_train["CERAD"],
'PLAQUES_out': self.y_train["PLAQUES"], 'TANGLES_out': self.y_train["TANGLES"],
"ABETA_IHC_out": self.y_train["ABETA_IHC"], "TAU_IHC_out": self.y_train["TAU_IHC"]},
validation_data=({'main_input': self.X_valid},
{'BRAAK_out': self.y_valid["BRAAK"], 'CERAD_out': self.y_valid["CERAD"],
'PLAQUES_out': self.y_valid["PLAQUES"], 'TANGLES_out': self.y_valid["TANGLES"],
"ABETA_IHC_out": self.y_valid["ABETA_IHC"],
"TAU_IHC_out": self.y_valid["TAU_IHC"]}),
verbose=0, epochs=num_epochs, batch_size=batch_size, callbacks=[csv_logger, predictions,
keras.callbacks.ModelCheckpoint(
modelpath + "{epoch:02d}.hdf5",
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=100)])
# SAVE PREDICTIONS
with h5py.File(preds_dest + "%d.h5" % self.fold_idx, 'w') as hf:
# loop through epochs -- one group is made per epoch
for i, ep in enumerate(predictions.predhis):
# within each group created for each epoch, we save a dataset of predictions for the validation set
for j, phenotype in enumerate(["BRAAK", "CERAD", "PLAQUES", "TANGLES",
"ABETA_IHC", "TAU_IHC"]):
if "/%s/%s" % (str(i), phenotype) in hf:
del hf["/%s/%s" % (str(i), phenotype)]
hf.create_dataset("/%s/%s" % (str(i), phenotype), data=predictions.predhis[i][j],
dtype=np.float32)
# save true values to /y_true/phenotype
for phenotype in ["BRAAK", "CERAD", "PLAQUES", "TANGLES",
"ABETA_IHC", "TAU_IHC"]:
if "/y_true/" + phenotype in hf:
del hf["/y_true/" + phenotype]
hf.create_dataset("/y_true/" + phenotype, data=self.y_valid[phenotype], dtype=np.float32)
K.clear_session()
gc.collect()
break | [
"nbbwang@gmail.com"
] | nbbwang@gmail.com |
99851ffa805c3ed010783c1fa4bcefd5dc0b55af | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/f05c02c4b8d4e423e57d453c4bd699dc5ff7eaa7-<test_quote_value>-fix.py | b76461e1c58bbd3cbb3da8d06195ffab39dbfdfe | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | def test_quote_value(self):
import MySQLdb
editor = connection.schema_editor()
tested_values = [('string', "'string'"), (42, '42'), (1.754, ('1.754e0' if (MySQLdb.version_info >= (1, 3, 14)) else '1.754')), (False, (b'0' if (MySQLdb.version_info >= (1, 4, 0)) else '0'))]
for (value, expected) in tested_values:
with self.subTest(value=value):
self.assertEqual(editor.quote_value(value), expected) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
f4ecab5300f9f46903856c5e5d9d97513587a53c | a517388a5f8a8d63cbd08451d3c3e17dfd89fd2e | /pocasi_webscraping.py | e5453cd2f0133f01605cba7383c3018b7fd7cf0b | [] | no_license | JandovKa/projekt | 33b3c85ef55e2a6ebb09db3f0e140e88beac7956 | a0152edcac18d735855e83c1620bbc056f0859a1 | refs/heads/main | 2023-01-19T07:11:41.410619 | 2020-11-29T18:10:15 | 2020-11-29T18:10:15 | 316,820,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,352 | py | #!/usr/bin/env python3
import csv
from datetime import date, timedelta
import json
import re
import requests
from requests.adapters import HTTPAdapter
from requests_html import HTML
from urllib3.util import Retry
# www.in-pocasi.cz obcas prestane na chvili odpovidat, protoze posilame moc dotazu
# Proto potrebujeme specialni session pro knihovnu requests, ktera opakuje chybujici requesty
# Adaptovano z https://www.peterbe.com/plog/best-practice-with-retries-with-requests
def requests_retry_session(
retries=10,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
retry_session = requests_retry_session()
# Stahne seznam stanic z www.in-pocasi.cz
# U kazde vrati jeji ID, nazev a polohu
def stahni_seznam_stanic():
print('Stahuji seznam stanic...')
req = retry_session.get('https://www.in-pocasi.cz/aktualni-pocasi/ajax/stations.json.php')
stanice = []
for zaznam_stanice in json.loads(req.text)['points']:
stanice.append({
'id_stanice': zaznam_stanice['id'],
'nazev_stanice': zaznam_stanice['name'],
'lat': zaznam_stanice['lat'],
'lon': zaznam_stanice['lng'],
})
return stanice
# Nascrapuje pocasi daneho regionu v dany den z www.in-pocasi.czc
# Vrati seznam udaju ze vsech stanic a dni obsahujici id_stanice, datum, max_teplota, srazky a naraz_vetru
def stahni_pocasi_regionu(cislo_regionu, datum):
req = retry_session.get(f'https://www.in-pocasi.cz/archiv/archiv.php?historie={datum.isoformat()}®ion={cislo_regionu}')
html = HTML(html=req.text)
# Prvni dve tabulky odpovidajici tomuto selektoru obsahuji data z klimatickych, respektive soukromych stanic
tabulky_s_pocasim = html.find('.page table tbody')[:2]
pocasi_na_stanicich = []
# Projde tabulky radek po radku a vytahni z nich spravna data
for tabulka in tabulky_s_pocasim:
for radek in tabulka.find('tr'):
bunky = radek.find('td')
# ID stanice je obsazene v prvni bunce v HTML odkazu na jeji stranku
# vytahneme jej specialnim regularnim vyrazem
adresa_stanice = bunky[0].find('a', first=True).attrs['href']
id_stanice = re.search(r'/([^/]+)/$', adresa_stanice).group(1)
# Max teplota je v druhe bunce ve formatu -12.3 °C
if bunky[1].text != '-':
max_teplota = bunky[1].text[:-3]
else:
max_teplota = None
# Vitr je ve ctvrte bunce ve formatu 12.3 km/h
if bunky[3].text != '-':
naraz_vetru = bunky[3].text[:-5]
else:
naraz_vetru = None
# Srazky jsou v pate bunce ve formatu 12.3 mm
if bunky[4].text != '-':
srazky = bunky[4].text[:-3]
else:
srazky = None
pocasi_na_stanicich.append({
'id_stanice': id_stanice,
'datum': datum,
'max_teplota': max_teplota,
'srazky': srazky,
'naraz_vetru': naraz_vetru
})
return pocasi_na_stanicich
# Nascrapuje seznam stanic do souboru stanice.csv
# a pocasi v zadanem casovem rozpeti v zadanych regionech do souboru pocasi.csv
def stahni_pocasi(datum_od, datum_do, regiony):
# Stahne seznam stanic a ulozi ho ve spravnem formatu do souboru stanice.csv
seznam_stanic = stahni_seznam_stanic()
with open('stanice.csv', 'w', newline='') as stanice_csv:
fieldnames = ['id_stanice', 'nazev_stanice', 'lat', 'lon']
writer = csv.DictWriter(stanice_csv, fieldnames=fieldnames)
writer.writeheader()
for stanice in seznam_stanic:
writer.writerow(stanice)
# Pripravi si seznam datumu, ktere stahnout
datumy = []
datum = datum_od
den = timedelta(days=1)
while datum <= datum_do:
datumy.append(datum)
datum += den
# Stahne pocasi v zadanem casovem rozpeti v zadanych regionech do souboru pocasi.csv
with open('pocasi.csv', 'w', newline='') as pocasi_csv:
fieldnames = ['datum', 'id_stanice', 'max_teplota', 'srazky', 'naraz_vetru']
writer = csv.DictWriter(pocasi_csv, fieldnames=fieldnames)
writer.writeheader()
for datum in datumy:
for cislo_regionu in regiony:
print(f'\rStahuji pocasi ze dne {datum.isoformat()} z regionu {cislo_regionu}...', end='')
pocasi_v_regionu = stahni_pocasi_regionu(cislo_regionu, datum)
writer.writerows(pocasi_v_regionu)
print()
# Spusteni hlavni funkce skriptu, pokud je skript volany naprimo z terminalu
if __name__ == "__main__":
# Vstupni parametry pro stahovani
datum_od = date(2016, 1, 1)
datum_do = date(2020, 6, 30)
regiony = [
2, # Jihomoravsky
4, # Kralovehradecky
10, # Stredocesky
11, # Ustecky
]
stahni_pocasi(datum_od, datum_do, regiony)
| [
"noreply@github.com"
] | JandovKa.noreply@github.com |
28ba746bf355c7edae5e15647a26b12e4f0101f4 | 96d4c78773e6651d6379ca6ff3506b5c79055c32 | /src/shovel/pit.py | 52510c95525ba82ed1d8786a55dbfb92b8e76536 | [] | no_license | jonathan-eckel/shovel | 6cb30be9452ed7099c8e21e19f93d9219282aba0 | 6119c12a71e77355f7acd4ab2e5376ca40113f20 | refs/heads/master | 2020-06-26T23:49:59.095459 | 2017-07-12T13:23:41 | 2017-07-12T13:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | # -*- coding: utf-8 -*-
from shovel import exceptions, s3
def _version_parser(version):
error_msg = 'versions should be in the format f"v{int(version[1:])}"'
if version[0] != 'v':
raise exceptions.VersionError(error_msg, version)
try:
return int(version[1:])
except ValueError:
raise exceptions.VersionError(error_msg, version)
def _get_latest_version(versions):
return max(versions, key=_version_parser)
class Pit(object):
def __init__(self, bucket, root):
self.bucket = bucket
self.root = root
def bury(self, project, name, version, working_path):
"""Upload the contents of the target path to the pit."""
_version_parser(version)
if self.list_contents(project, name, version):
raise exceptions.VersionExists
s3.put_objects(working_path, self.bucket, self.root, project, name, version)
def dig(self, project, name, version, working_path):
"""Download the contents of the target dataset from the pit."""
_version_parser(version)
if not self.list_contents(project, name, version):
raise exceptions.VersionDoesNotExist
s3.get_objects(working_path, self.bucket, self.root, project, name, version)
def list_projects(self):
"""Return list of projects"""
return list(s3.list_nodes(self.bucket, self.root))
def list_datasets(self, project):
"""Return list of datasets for specified project"""
return list(s3.list_nodes(self.bucket, self.root, project))
def list_versions(self, project, dataset):
"""Return list of versions for specified dataset"""
return sorted(s3.list_nodes(self.bucket, self.root, project, dataset), key=_version_parser)
def list_contents(self, project, dataset, version):
"""Return list of versions for specified dataset"""
return list(s3.list_objects(self.bucket, self.root, project, dataset, version))
| [
"calvin@lyst.com"
] | calvin@lyst.com |
f40508f4b3c67757c27f1cbdcf6a8463522f9b8b | 01e9dc6798446d86a9fe13872c563d5a74b85860 | /clientSocket.py | 05f697cf6b0a3660ee1c671c49ac3d8b61e49637 | [] | no_license | trejonh/Swift-Proton | 120be72d93e5bd1997af2ad2e83d3155b354fc0b | 45a6a23e12486ff0567a1d8bc0edd98ab6a33e7b | refs/heads/master | 2021-01-18T18:39:30.658319 | 2017-08-05T00:36:12 | 2017-08-05T00:36:12 | 86,869,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # Echo client program
import socket
HOST = '192.168.0.18' # The remote host
PORT = 9000 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall('Hello, world')
data = s.recv(1024)
s.close()
print 'Received', repr(data) | [
"houset@msoe.edu"
] | houset@msoe.edu |
9c2648dd3be5bbea12c5a064c93e35dec1b054f1 | 18c3b2c148d5582225185704145629a593fef894 | /poi_flag_email.py | cc2a60dc264266002f8d18777ab616b01f7066c7 | [] | no_license | lk235/Machine_Learning | 6a8f51078f4e471d9de6044f3e990b18fb6bbf7f | a33a76a6da6d5d47e864ea578427eb414dcd7f70 | refs/heads/master | 2021-08-30T10:58:27.170473 | 2017-12-17T15:48:03 | 2017-12-17T15:48:03 | 112,315,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,408 | py | #!/usr/bin/python
###
### in poiFlagEmail() below, write code that returns a boolean
### indicating if a given email is from a POI
###
import sys
import reader
import poi_emails
def getToFromStrings(f):
'''
The imported reader.py file contains functions that we've created to help
parse e-mails from the corpus. .getAddresses() reads in the opening lines
of an e-mail to find the To: From: and CC: strings, while the
.parseAddresses() line takes each string and extracts the e-mail addresses
as a list.
'''
f.seek(0)
to_string, from_string, cc_string = reader.getAddresses(f)
to_emails = reader.parseAddresses(to_string)
from_emails = reader.parseAddresses(from_string)
cc_emails = reader.parseAddresses(cc_string)
return to_emails, from_emails, cc_emails
### POI flag an email
def poiFlagEmail(f):
""" given an email file f,
return a trio of booleans for whether that email is
to, from, or cc'ing a poi """
to_emails, from_emails, cc_emails = getToFromStrings(f)
### poi_emails.poiEmails() returns a list of all POIs' email addresses.
poi_email_list = poi_emails.poiEmails()
to_poi = False
from_poi = False
cc_poi = False
### to_poi and cc_poi are boolean variables which flag whether the email
### under inspection is addressed to a POI, or if a POI is in cc,
### respectively. You don't have to change this code at all.
### There can be many "to" emails, but only one "from", so the
### "to" processing needs to be a little more complicated
if to_emails:
ctr = 0
while not to_poi and ctr < len(to_emails):
if to_emails[ctr] in poi_email_list:
to_poi = True
ctr += 1
if cc_emails:
ctr = 0
while not cc_poi and ctr < len(cc_emails):
if cc_emails[ctr] in poi_email_list:
cc_poi = True
ctr += 1
#################################
######## your code below ########
### set from_poi to True if #####
### the email is from a POI #####
#################################
if from_emails:
ctr = 0
while not from_poi and ctr < len(from_emails):
if from_emails[ctr] in poi_email_list:
from_poi = True
ctr += 1
#################################
return to_poi, from_poi, cc_poi | [
"cool.lk235@icloud.com"
] | cool.lk235@icloud.com |
6be679848ea5764a6096f5a81da5ca39c5a9ca05 | 5d158ae2556f38f409422081907f110d80bd91a3 | /app.py | 84fa8b8cb23f3903649adc2005e1b6bec3b616a4 | [] | no_license | donnyramadhan-dr/crud-tornado-api | 2bf0349a891cbcce9a81f5b920692ec49b4d36c1 | 7ad8ede87a8729b3a8a20e9213d80551bd6853a2 | refs/heads/master | 2023-01-11T00:04:40.777435 | 2020-10-23T06:49:25 | 2020-10-23T06:49:25 | 306,552,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import define, options
import tornado.autoreload
from app.routes import route
define('port', default=4000, help= 'port to listen on')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
app = route()
http_server = HTTPServer(app)
http_server.listen(options.port)
print("======================================")
print('App listening at: http://localhost:%i/' % options.port)
tornado.autoreload.start()
IOLoop.current().start()
# run : pipenv run python app.py | [
"ramadhandonny60@gmail.com"
] | ramadhandonny60@gmail.com |
ba174d7c045383a77c4d188a4434a1ef5461d1b1 | fda7d21dea4a6c5c478804cf3664729c64820176 | /rpmci/util.py | d7f419bd754fcbac254ca7c9aaef565d4e980d9c | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | msehnout/rpmci | 8c3eb4d1865c0c85dac0730294911a3dab6c5048 | e679226951e5a1a377c32853a78cd2c534a67fea | refs/heads/main | 2023-04-07T10:39:13.335099 | 2020-12-07T10:52:38 | 2020-12-07T10:52:38 | 301,734,388 | 0 | 0 | null | 2020-10-06T13:22:39 | 2020-10-06T13:22:39 | null | UTF-8 | Python | false | false | 1,223 | py | """rpmci/util - Utility Helpers
The `util` module provides basic utility helpers that extend the python
standard library. Each set of helpers is self-contained and documented as an
entity. They are meant to augment other libraries where they lack in features.
"""
# pylint: disable=invalid-name,too-few-public-methods
import contextlib
import signal
import subprocess
@contextlib.contextmanager
def manage_process(proc, *, timeout=0):
"""Context-manager for subprocesses
This opens a context for the given process @proc. It yields the process
object back to the caller. Once the context is exited, this manager takes
care to terminate the process. By default, the process is forcibly
terminated. If the timeout is set to anything but 0, a graceful termination
is attempted for that period in blocking mode.
"""
try:
yield proc
finally:
if proc.poll() is None:
if timeout == 0:
proc.send_signal(signal.SIGKILL)
else:
try:
proc.terminate()
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
proc.send_signal(signal.SIGKILL)
| [
"david.rheinsberg@gmail.com"
] | david.rheinsberg@gmail.com |
9701cbdf3e4083bae3a341d9cf78debd7c7df5b1 | 401cd29e9202a807a7f40be399fbfb8c960b3a2c | /pitch_guessing.py | dbd4b5a7f3c5f6113153f52367eb6554a3703328 | [] | no_license | nikcheerla/euphonia | 1b83f4b85f7dea6bd62114dd70d459d8a8b557c2 | bd98e2aa2938d76f068fa0e7a49cd85108fdbf3c | refs/heads/master | 2021-01-01T05:51:09.337184 | 2017-01-02T09:41:32 | 2017-01-02T09:41:32 | 39,851,182 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,435 | py | #code for data generation from heatmaps
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
plt.ioff()
import glob, sys, os, random, time, logging, threading
from sklearn.cross_validation import train_test_split
from scipy import stats
import progressbar
from representations import NoteMatrix, StateMatrix, ExpandedStateMatrix, NoteList
from generators import AbstractTrainSplitGenerator
from models import AbstractModel
from utils import sample
from constants import *
from keras.layers import LSTM, GRU, TimeDistributedDense, Input, Dense, Embedding
from keras.models import Model
from keras.optimizers import adadelta
import IPython
"""2-layer Embedding RNN model (implements AbstractModel from models.py) that predicts next pitch
from sequence of previous pitches. Previous pitches played are represented as integer values, which
are turned to length-32 vectors by the Embedding layer (word2vec style). """
class EmbeddedSequencePredictClass(AbstractModel):
def __init__(self, *args, **kwargs):
super(EmbeddedSequencePredictClass, self).__init__(*args, **kwargs)
def build_model(self):
input_img = Input(shape=(self.window_size,))
x = Embedding(self.vector_size, 32, dropout=0.2) (input_img)
x = GRU(128, dropout_W=0.2, dropout_U=0.2, return_sequences=True) (x)
x = GRU(128, dropout_W=0.2, dropout_U=0.2, return_sequences=False) (x)
x = Dense(64, activation="relu") (x)
x = Dense(self.vector_size, activation="softmax") (x)
model = Model(input_img, x)
model.summary()
model.compile(optimizer=adadelta(lr=0.08, clipvalue=1000), loss='categorical_crossentropy',
metrics=['categorical_accuracy', 'top_k_categorical_accuracy'])
return model
"""Generator that yields train_test batches, based on AbstractTrainSplitGenerator from
generators.py. It uses NoteList to get the list of notes, then takes
a random position and returns the pitch sequence history and the target pitch to be predicted."""
class PitchGuessingGenerator(AbstractTrainSplitGenerator):
def __init__(self, window_size, **kwargs):
super(PitchGuessingGenerator, self).__init__(window_size, **kwargs)
def gen_sample_pair(self, files_list):
music_file = random.choice(files_list)
if self.data is None or random.randint(0, 10) == 1:
self.data = NoteList.load(music_file)
data = self.data
x = random.randint(0, len(data) - self.window_size - 1)
#input is sequential integers representing pitch, to be used in embedding
window = (data[x:(x + self.window_size), 1]).astype(int)
target_idx = (data[x + self.window_size, 1]).astype(int)
target = np.zeros(upperBound)
target[target_idx] = 1 #categorical prediction for target
return window, target
#Create generator and model, then train model on generator
generator = PitchGuessingGenerator(window_size=50, samples_per_epoch=1000, batch_size=5)
model = EmbeddedSequencePredictClass(window_size=50, vector_size=upperBound)
while True:
model.train(generator, epochs=8, checkpoint="results/pitch_guess_model.h5")
#Use trained model to read in song and predict next states
song = NoteList.load("music/beethoven_opus10_1.mid")
for idx in range(50, 200):
window = song[(idx - 50):idx, 1]
target_pred = model.predict(np.array([window]))[0]
print (target_pred)
target_idx = sample(target_pred, temperature=0.2)
song[idx, 1] = target_idx
NoteList.save(song[0:200], file_name="results/pitch_guess_song.mid")
| [
"nikcheerla@gmail.com"
] | nikcheerla@gmail.com |
cc885a87c1893d6f6b618cbc9c0be82ba02df536 | ec45bee420713f64d2d00a5d1c15a9a5f66a940b | /EasyDeep/utils/feature_resnet_utils.py | 936df9547f93debb69e781eaa4dfa16a80f677e4 | [
"MIT"
] | permissive | strawsyz/straw | a7dc5afef9525eeb3b1a471b5a90d869a3ba5084 | cdf785856941f7ea546aee56ebcda8801cbb04de | refs/heads/master | 2023-06-08T17:18:53.073514 | 2023-06-05T05:51:41 | 2023-06-05T05:51:41 | 253,447,370 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,262 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/2/3 14:15
# @Author : strawsyz
# @File : feature_util.py
# @desc:
import os
# from keras.applications.resnet import preprocess_input
import numpy as np
import imutils # pip install imutils
from tqdm import tqdm
import logging
import cv2
import moviepy.editor
# TODO 环境问题需要修改,其他基本没问题
from pytorch_i3d import InceptionI3d
def getDuration(video_path):
"""Get the duration (in seconds) for a video.
Keyword arguments:
video_path -- the path of the video
"""
return moviepy.editor.VideoFileClip(video_path).duration
class FrameCV():
def __init__(self, video_path, FPS=2, transform=None, start=None, duration=None):
"""Create a list of frame from a video using OpenCV.
Keyword arguments:
video_path -- the path of the video
FPS -- the desired FPS for the frames (default:2)
transform -- the desired transformation for the frames (default:2)
start -- the desired starting time for the list of frames (default:None)
duration -- the desired duration time for the list of frames (default:None)
"""
self.FPS = FPS
self.transform = transform
self.start = start
self.duration = duration
# read video
vidcap = cv2.VideoCapture(video_path)
# read FPS
self.fps_video = vidcap.get(cv2.CAP_PROP_FPS)
# read duration
self.time_second = getDuration(video_path)
# loop until the number of frame is consistent with the expected number of frame,
# given the duratio nand the FPS
good_number_of_frames = False
while not good_number_of_frames:
# read video
vidcap = cv2.VideoCapture(video_path)
# get number of frames
self.numframe = int(self.time_second * self.fps_video)
# frame drop ratio
drop_extra_frames = self.fps_video / self.FPS
# init list of frames
self.frames = []
# TQDM progress bar
pbar = tqdm(range(self.numframe), desc='Grabbing Video Frames', unit='frame')
i_frame = 0
ret, frame = vidcap.read()
# loop until no frame anymore
while ret:
# update TQDM
pbar.update(1)
i_frame += 1
# skip until starting time
if self.start is not None:
if i_frame < self.fps_video * self.start:
ret, frame = vidcap.read()
continue
# skip after duration time
if self.duration is not None:
if i_frame > self.fps_video * (self.start + self.duration):
ret, frame = vidcap.read()
continue
if (i_frame % drop_extra_frames < 1):
# crop keep the central square of the frame
if self.transform == "resize256crop224":
frame = imutils.resize(frame, height=256) # keep aspect ratio
# number of pixel to remove per side
off_h = int((frame.shape[0] - 224) / 2)
off_w = int((frame.shape[1] - 224) / 2)
frame = frame[off_h:-off_h,
off_w:-off_w, :] # remove pixel at each side
# crop remove the side of the frame
elif self.transform == "crop":
frame = imutils.resize(frame, height=224) # keep aspect ratio
# number of pixel to remove per side
off_side = int((frame.shape[1] - 224) / 2)
frame = frame[:, off_side:-
off_side, :] # remove them
# resize change the aspect ratio
elif self.transform == "resize":
# lose aspect ratio
frame = cv2.resize(frame, (224, 224),
interpolation=cv2.INTER_CUBIC)
# append the frame to the list
self.frames.append(frame)
# read next frame
ret, frame = vidcap.read()
# check if the expected number of frames were read
if self.numframe - (i_frame + 1) <= 1:
logging.debug("Video read properly")
good_number_of_frames = True
else:
logging.debug("Video NOT read properly, adjusting fps and read again")
self.fps_video = (i_frame + 1) / self.time_second
# convert frame from list to numpy array
self.frames = np.array(self.frames)
def __len__(self):
"""Return number of frames."""
return len(self.frames)
def __iter__(self, index):
"""Return frame at given index."""
return self.frames[index]
def extract_features(video_path, feature_path, model, start=None, duration=None, overwrite=False, FPS=2,
transform="crop"):
print("extract video", video_path, "from", start, duration)
# feature_path = video_path.replace(
# ".mkv", f"_{self.feature}_{self.back_end}.npy")
# feature_path = video_path[:-4] + f"_{self.feature}_{self.back_end}.npy"
if os.path.exists(feature_path) and not overwrite:
return
# if self.grabber == "skvideo":
# videoLoader = Frame(video_path, FPS=self.FPS, transform=self.transform, start=start, duration=duration)
# elif self.grabber == "opencv":
videoLoader = FrameCV(video_path, FPS=FPS, transform=transform, start=start,
duration=duration)
# create numpy aray (nb_frames x 224 x 224 x 3)
# frames = np.array(videoLoader.frames)
# if self.preprocess:
# frames = preprocess_input(videoLoader.frames)
frames = videoLoader.frames
if duration is None:
duration = videoLoader.time_second
# time_second = duration
print("frames", frames.shape, "fps=", frames.shape[0] / duration)
# from torch.nn import Variable
from torch.autograd import Variable
frames = Variable(torch.from_numpy(frames))
frames = frames.permute([3, 0, 1, 2])
frames = frames.unsqueeze(0)
frames = frames.cuda()
out = model(frames)
out = out.squeeze(0)
features = out.cpu().detach().numpy()
# predict the featrues from the frames (adjust batch size for smalled GPU)
# features = model.predict(frames, batch_size=64, verbose=1)
# print("features", features.shape, "fps=", features.shape[0] / duration)
num_frames = features.shape[0]
# save the featrue in .npy format
os.makedirs(os.path.dirname(feature_path), exist_ok=True)
np.save(feature_path, features)
print(f"Save features at {feature_path}")
return num_frames
def extract_features(video_path, feature_path, model, start=None, duration=None, overwrite=False, FPS=2,
transform="crop"):
print("extract video", video_path, "from", start, duration)
# feature_path = video_path.replace(
# ".mkv", f"_{self.feature}_{self.back_end}.npy")
# feature_path = video_path[:-4] + f"_{self.feature}_{self.back_end}.npy"
if os.path.exists(feature_path) and not overwrite:
return
# if self.grabber == "skvideo":
# videoLoader = Frame(video_path, FPS=self.FPS, transform=self.transform, start=start, duration=duration)
# elif self.grabber == "opencv":
videoLoader = FrameCV(video_path, FPS=FPS, transform=transform, start=start,
duration=duration)
# create numpy aray (nb_frames x 224 x 224 x 3)
# frames = np.array(videoLoader.frames)
# if self.preprocess:
# frames = preprocess_input(videoLoader.frames)
frames = videoLoader.frames
if duration is None:
duration = videoLoader.time_second
# time_second = duration
print("frames", frames.shape, "fps=", frames.shape[0] / duration)
# from torch.nn import Variable
from torch.autograd import Variable
frames = Variable(torch.from_numpy(frames))
frames = frames.permute([3, 0, 1, 2])
frames = frames.unsqueeze(0)
frames = frames.cuda()
out = model(frames)
out = out.squeeze(0)
features = out.cpu().detach().numpy()
# predict the featrues from the frames (adjust batch size for smalled GPU)
# features = model.predict(frames, batch_size=64, verbose=1)
# print("features", features.shape, "fps=", features.shape[0] / duration)
num_frames = features.shape[0]
# save the featrue in .npy format
os.makedirs(os.path.dirname(feature_path), exist_ok=True)
np.save(feature_path, features)
print(f"Save features at {feature_path}")
return num_frames
def analyze(self, data: list):
print(f"min : {min(data)}")
print(f"max : {max(data)}")
print(f"mean : {np.mean(data)}")
print(f"middle : {np.middle(data)}")
def get_i3d_model(model_path=r"C:\(lab\OtherProjects\pytorch-i3d-master\models\rgb_imagenet.pt"):
i3d = InceptionI3d(400, in_channels=3)
i3d.replace_logits(157)
i3d.load_state_dict(torch.load(model_path))
i3d.cuda()
return i3d
if __name__ == "__main__":
from timesformer.models.vit import TimeSformer
import torch
# pretrain_path = r"C:\(lab\PretrainedModel\TimeSFormer\TimeSformer_divST_96x4_224_K400.pyth"
#
# device = torch.device("cuda:0")
#
# model = TimeSformer(img_size=224, num_classes=400, num_frames=8, attention_type='divided_space_time',
# pretrained_model=pretrain_path)
#
# model = model.eval().to(device)
# video_path = r"C:\(lab\datasets\UCF101\train\ApplyEyeMakeup\v_ApplyEyeMakeup_g08_c01.avi"
# output_path = r"C:\(lab\datasets\UCF101\timesformer\1.npy"
# output_path = r"C:\(lab\datasets\UCF101\i3d\1.npy"
#
# feature_path = r"C:\(lab\datasets\UCF101\features\resize-resnet"
# video_path = r"C:\(lab\datasets\UCF101\val"
video_path =r"C:\(lab\datasets\UCF101\train\ApplyEyeMakeup\v_ApplyEyeMakeup_g08_c04.avi"
output_path = r"1.npy"
model = get_i3d_model()
num_frames = extract_features(video_path=video_path, feature_path=output_path, model=model)
# print(num_frames)
# transform = "resize"
# # all_num_frames = []
# for label in os.listdir(video_path):
# for filename in os.listdir(os.path.join(video_path, label)):
# video_sample_path = os.path.join(video_path, label, filename)
# feature_folder_path = os.path.join(feature_path, label)
# feature_sample_path = os.path.join(feature_folder_path, filename.split(".")[0])
# # make_directory(feature_path)
# print(f"video path : {video_sample_path}")
# print(f"feature path : {feature_sample_path}")
# num_frames = extract_features(video_path=video_sample_path, feature_path=feature_sample_path,
# model=model, transform=transform)
# all_num_frames.append(num_frames)
#
# analyze(all_num_frames)
| [
"syzyuzhi1119@gmail.com"
] | syzyuzhi1119@gmail.com |
b6a5ed0416da616a2c7f584e7ad77d7f96bb9d7a | 65e73c6c4a9e66715be2cbdd93339ebcab93976e | /windmill/fundo/migrations/0032_vertice_corretora.py | a3eb1329352877a6515e34208cf08afdae56e014 | [] | no_license | AnimaTakeshi/windmill-django | 3577f304d5e7f74750c7d95369e87d37209f1ac6 | 78bde49ace1ed215f6238fe94c142eac16e164dc | refs/heads/master | 2022-12-13T11:13:21.859012 | 2019-02-07T20:50:01 | 2019-02-07T20:50:01 | 150,470,109 | 0 | 0 | null | 2022-12-08T01:29:36 | 2018-09-26T18:13:54 | Python | UTF-8 | Python | false | false | 493 | py | # Generated by Django 2.0 on 2018-11-29 19:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fundo', '0031_auto_20181129_1104'),
]
operations = [
migrations.AddField(
model_name='vertice',
name='corretora',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='fundo.Corretora'),
),
]
| [
"33721822+AnimaTakeshi@users.noreply.github.com"
] | 33721822+AnimaTakeshi@users.noreply.github.com |
1af6075d430656e7cec546d15abcbd17fbae6e6e | c4c78f84cecfd2fc96c044883b937acbacc0ef01 | /billing/bill/migrations/0004_remind.py | 81e62fc436768c471695edbcdfd1c2f3ba85e63f | [] | no_license | ektaarora3501/budget_manager | e56a6ced06f7f71829c224714348cd6c93ec514d | c595de9318077276e8f46ec5579007ce88ee1aec | refs/heads/master | 2022-02-19T20:22:59.511935 | 2019-09-30T19:07:08 | 2019-09-30T19:07:08 | 198,245,060 | 2 | 2 | null | 2019-09-30T19:07:09 | 2019-07-22T14:54:31 | Python | UTF-8 | Python | false | false | 787 | py | # Generated by Django 2.2.2 on 2019-08-04 16:18
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bill', '0003_budget_date'),
]
operations = [
migrations.CreateModel(
name='remind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('pay', models.CharField(max_length=100)),
('amount', models.DecimalField(decimal_places=4, default=0.0, max_digits=8, max_length=100)),
('date', models.DateField(default=datetime.date.today, verbose_name='Date')),
],
),
]
| [
"ektaarora3501@gmail.com"
] | ektaarora3501@gmail.com |
dbdd02976dd342da01954fbbebcbd958e352edd8 | ad61fc6bd7b1e11c44dce624459684cd3f2106ca | /WeatherApp/forms.py | b4d6e84c37aa6118b71207bacc757a79aa5d04bc | [] | no_license | kapil-garg/WeatherForecast_project | bb5dec0a75b9e925a6acefc25530882beabdf0a1 | 781453f931547322a093cab14ecdf2ae14fd7482 | refs/heads/master | 2023-05-07T01:05:37.236146 | 2021-05-30T06:05:37 | 2021-05-30T06:05:37 | 371,983,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django.forms import ModelForm, TextInput, forms
from .models import City
class CityForm(ModelForm):
class Meta:
model = City
fields = ['name']
widgets = {
'name': TextInput(attrs={'class': 'input', 'placeholder': 'City Name'}),
}
| [
"garg.kapil01798@gmail.com"
] | garg.kapil01798@gmail.com |
8c4736ff731c0f45c7fcbe734a61b43dc25e6413 | 5f2953f604b5caf12f29042cf91c111ec9932d69 | /Automation/automation_resources_todo/template/LocalFiles/recurfile.py | da5798c7dd4aee3bbefc4f8ba3a88941a6863000 | [] | no_license | danielphilipjohnson/Computer-Science | cd92fa54f5b873345c5a883f452ae3421d389909 | 634498f764803fe11610d1b031bbbff0b5345d9c | refs/heads/master | 2023-02-02T19:13:16.728174 | 2021-06-26T09:52:06 | 2021-06-26T09:52:06 | 245,429,998 | 1 | 0 | null | 2023-01-26T23:06:04 | 2020-03-06T13:42:52 | CSS | UTF-8 | Python | false | false | 383 | py | import os
for root, dirs, files in os.walk('.'):
for file in files:
full_file_path = os.path.join(root, file)
print(full_file_path)
# pdf
for root, dirs, files in os.walk('.'):
for file in files:
if file.endswith('.pdf'):
full_file_path = os.path.join(root, file)
print(full_file_path)
# os.stat(('zen_of_python.txt')
| [
"noreply@github.com"
] | danielphilipjohnson.noreply@github.com |
f67cdbfe0e4394de9e3098f44b044ab56b238f57 | a9fbbfc990ad79f412d8078d27b8937e5ef00bde | /inheritance/exercise/problem_03/knight.py | 334ddf65c77ff19a4a263ea75168b1dd9a6af0cf | [
"MIT"
] | permissive | BoyanPeychinov/object_oriented_programming | e2d23ec0ff681ca2c6cf1805e581af3d601aafee | a960721c7c17710bd7b151a9025647e953435962 | refs/heads/main | 2023-03-31T16:19:20.239216 | 2021-03-30T19:43:42 | 2021-03-30T19:43:42 | 342,281,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | from problem_03.hero import Hero
class Knight(Hero):
pass
| [
"BoyanPeychinov@gmail.com"
] | BoyanPeychinov@gmail.com |
7806e1b3eef24b8ec3b153502139320fc9c9a305 | bc4b6e30b505c684aaa593b2fa4d5763a7132ac1 | /apps/auth/migrations/0007_auto_20190228_1142.py | 7471f1576cdec2c1c3c4db40575f826279b09091 | [] | no_license | arao/workflow-api | e9890faab6c14e9f4b9e6c8359bca7ee4b36172a | bedb1d7cf25188619d4afc748d17b7ffe20b6992 | refs/heads/master | 2022-11-29T00:51:37.018500 | 2019-07-17T19:57:46 | 2019-07-17T19:57:46 | 197,452,667 | 0 | 0 | null | 2022-11-22T03:35:01 | 2019-07-17T19:47:46 | Python | UTF-8 | Python | false | false | 594 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-02-28 11:42
from __future__ import unicode_literals
import apps.auth.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workflow_auth', '0006_auto_20190206_1807'),
]
operations = [
migrations.AlterField(
model_name='user',
name='profile_photo',
field=models.ImageField(blank=True, default='user/profile/fallback.png', help_text='User profile photo', upload_to=apps.auth.models.usr_profil_dir),
),
]
| [
"akhilesh.rao@joshtechnologygroup.com"
] | akhilesh.rao@joshtechnologygroup.com |
e391758e06144caaf3135d1b38b6e1d1d36a6270 | 5cd08f5c00b60f809c258d34e3ad1321d93568a1 | /cookiecutter/resource/{{cookiecutter.resource_name}}/__init__.py | cea7f9bb5b6918a88a2e29af1471065a02f7eada | [] | no_license | cheickram/sowba | d4ca6b2c3d78f17c7db2547f538552dffdac258e | 47afb30fb83a0f624a1d7ba9cd9c3ae446d6487b | refs/heads/master | 2022-12-01T17:15:44.438956 | 2020-08-06T02:32:12 | 2020-08-06T02:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from sowba.resources.{{ cookiecutter.resource_name }}.crud import router
from sowba.resources.{{ cookiecutter.resource_name }}.crud import {{ cookiecutter.resource_name|capitalize }}
from sowba.resources.{{ cookiecutter.resource_name }}.api import {{ cookiecutter.resource_name }}_service
PATH_PREFIX = "/{{ cookiecutter.resource_name }}"
| [
"sekou.omar.kone@gmail.com"
] | sekou.omar.kone@gmail.com |
a164d0312f3a5559e47223aa73fad366d912287a | ce6739d9e76f16d1cbdfbb3292cdabac62d23ee1 | /mod_2/lesson_8/homework_2/main.py | 388347f374c64f6a6f86d8e8dd8ed5640d330d2c | [] | no_license | adrbed/python_poczatek | 1a863c0dc40c2315ced8985cf0b31d1b84e3f940 | 134626ccbe2255b0a28183e7d5d212db12c7deb4 | refs/heads/master | 2023-02-10T15:06:51.888544 | 2021-01-09T18:23:56 | 2021-01-09T18:23:56 | 328,218,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py |
# Dodaj do programu obsługę polityki rabatowej.
# Zaimplementuj funkcje reprezentujące politykę podatkową i przekaż je do konstruktora zamówienia.
# Jeżeli polityka została przekazana to podczas liczenia łącznej kwoty zamówienia należy naliczyć rabat.
# Jeżeli nie - obliczamy łączną kwotę jak dotychczas.
# Zaimplementuj dwie polityki rabatowe:
# Dla stałych klientów: 5% rabatu na każdą pozycję
# Rabat świąteczny: rabat 20 PLN dla zamówień o łącznej kwocie powyżej 100 PLN
import random
from shop.order_element import OrderElement
from shop.product import Product
from shop.discount_policy import loyal_customer_policy, christmas_policy
from shop.order import Order
def generate_order_elements():
order_elements = []
for product_number in range(5):
product_name = f"Produkt-{product_number}"
category_name = "Inne"
unit_price = random.randint(1, 30)
product = Product(product_name, category_name, unit_price)
quantity = random.randint(1, 10)
order_elements.append(OrderElement(product, quantity))
return order_elements
def run_homework():
first_name = "Mikołaj"
last_name = "Lewandowski"
order_elements = generate_order_elements()
normal_order = Order(first_name, last_name, order_elements)
loyal_customer_order = Order(first_name, last_name, order_elements, discount_policy=loyal_customer_policy)
christmas_order = Order(first_name, last_name, order_elements, discount_policy=christmas_policy)
print(normal_order)
print(loyal_customer_order)
print(christmas_order)
if __name__ == '__main__':
run_homework()
| [
"adrianbednarczyk1@gmail.com"
] | adrianbednarczyk1@gmail.com |
e9e9799b02db2f0a528239390a1408c0943ce0df | c3e776b885ac9552ad4b96b63594ab759f12cc3a | /test/test_shape.py | c641e71e3eca4afe9db6faef214267b55670c856 | [] | no_license | wing328/petstore-python | e9ff3c2a5ff0fdddb1e6b0f12fc7e1d48f65590a | fa65c111bb2a040ebb0ed0db3ff9c52c8821922e | refs/heads/master | 2021-10-20T22:22:33.677235 | 2021-10-17T10:33:24 | 2021-10-17T10:33:24 | 54,948,706 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import petstore_api
from petstore_api.model.quadrilateral import Quadrilateral
from petstore_api.model.triangle import Triangle
globals()['Quadrilateral'] = Quadrilateral
globals()['Triangle'] = Triangle
from petstore_api.model.shape import Shape
class TestShape(unittest.TestCase):
"""Shape unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testShape(self):
"""Test Shape"""
# FIXME: construct object with mandatory attributes with example values
# model = Shape() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"wing328hk@gmail.com"
] | wing328hk@gmail.com |
5ee86744d7a992c96220b7bf91c50fc30ffd4288 | f17aae896577ee167d890eecb476ffbe0ebbde7f | /musica/migrations/0007_auto_20170115_0447.py | 0c6be5e6ef0505edf73aa2d7d03b063e4071cc4b | [] | no_license | kenca23/proy_la | e0c2c8a7b46402c443da36315abd938576ecef2d | c27340fdacb783e38d1264f5b8165ece78799b8f | refs/heads/master | 2021-01-15T12:02:37.060449 | 2018-02-23T01:40:04 | 2018-02-23T01:40:04 | 99,645,591 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-15 10:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musica', '0006_auto_20170115_0406'),
]
operations = [
migrations.AlterField(
model_name='disco',
name='imagen',
field=models.ImageField(upload_to='media/Discos/'),
),
]
| [
"kenca23@gmail.com"
] | kenca23@gmail.com |
f864a08fb990e254e5d91632ab4a69183a3350fc | a91f472e4babaf73b044ce5663ea9574cd91a9eb | /test_lsh_h5/modules/classifypage/ClassifyPageTest.py | c99f683f35ec291bb92fc82ae85d2d36f5dfc32b | [] | no_license | lsh-test/test-lsh-market | f052265c49d30d7d4b790c042f42339c6c31d5f7 | e67ab6b26cce389ceaa06c656df5a826ecfb9e15 | refs/heads/master | 2021-01-22T13:36:25.666105 | 2017-09-30T03:47:31 | 2017-09-30T03:47:31 | 100,672,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,614 | py | #-*- coding: utf-8 -*-
import time
import os
from xlutils.copy import copy
from test_lsh_h5.base.TestCase import TestCase
from test_lsh_h5.base.RequestRule import RequestRule
from test_lsh_h5.base.H5Basic import H5Basic
requestRule = RequestRule()
class ClassifyPageTest():
def __init__(self,enverionment,host,h5ConfPath,testCasePath,testCaseDoc,testResultsPath):
self.enverionment = enverionment
self.host = host
self.h5ConfPath = appConfPath
self.testCasePath = testCasePath
self.testCaseDoc = testCaseDoc
self.testResultsPath = testResultsPath
print self.testResultsPath
def ClassifyPageTest(self):
print "---------------分类列表页面接口测试开始---------------"
h5Basic = H5Basic(self.enverionment,self.h5ConfPath)
session = h5Basic.getCookie()
testCase = TestCase()
excel = testCase.getAppTestCase(self.testCasePath,self.testCaseDoc)
sheet = excel.sheets()[0]
nrows = sheet.nrows
wb = copy(excel)
ws = wb.get_sheet(0)
amount = 0
for i in range(1, nrows):
url = sheet.cell(i, 3).value
# post请求
if sheet.cell(i, 2).value == 'post':
params = eval(sheet.cell(i, 4).value)
results = requestRule.post(session,self.host, url, params)
# get请求
elif sheet.cell(i, 2).value == 'get':
params = sheet.cell(i, 4).value
results = requestRule.get(session,self.host, url,params)
resultTime = results[0]
resultStatus = results[1]
resultText = results[2]
ws.write(i, 7, resultTime)
status = sheet.cell(i, 5).value
if resultStatus == status:
print "第%d条用例:pass" % i
ws.write(i, 6, "pass")
amount += 1
else:
print "第%d条用例:failure" % i
ws.write(i, 6, resultText)
a = (amount / float(i))*100
ws.write(i, 9, "%.2f" % a + "%")
print "case通过率为:%.2f" % a + "%"
resultTime = time.strftime('%Y-%m-%d')
#wb.save(os.path.dirname(os.getcwd()) + '/appTestResults/loginTestResult' + resultTime + '.xls')
wb.save(self.testResultsPath + 'ClassifyPageTestResult_' + resultTime + '.xls')
print "---------------分类列表页面接口测试结束---------------"
| [
"zhaoyanbin@lsh123.com"
] | zhaoyanbin@lsh123.com |
e25ae70c32caa15609a723290c0dc024ad0b509e | 29be3f39c4d2a05780a8e2f477ace77c7ecbc54d | /tempy/translate.py | 6eeaa5dba8e98961b4d104893cc0f1e3209d844c | [
"Apache-2.0"
] | permissive | Algy/tempy | 0342c0bbc09c65c5132b13e2a2ed504e879585e4 | 3fe3da6380628a6ab4af56e5ad738efbcb9eaedb | refs/heads/master | 2020-05-21T00:03:38.568050 | 2015-03-08T08:41:53 | 2015-03-08T08:41:53 | 23,047,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112,109 | py | from tag import is_tag_name, HTML_TAGS
from lisn import loads, loads_file, LISNSyntaxException
from lisn.utils import LISNVisitor
from lisn.match import LISNPattern
from functools import wraps
from copy import copy
from pprint import pprint
from errors import TempyCompileError, TempySyntaxError, CompileError
'''
Utils
'''
def identity(x):
return x
def NOT_REACHABLE():
raise Exception("Not reachable")
def dotify(name_or_name_list):
if isinstance(name_or_name_list, basestring):
dotted_name = name_or_name_list
else:
dotted_name = ".".join(name_or_name_list)
return dotted_name
class Promise:
'''
Lazy Class
Not to be confused with Premise class!
This class is for lazy evaluation, while Premise is used for translation
'''
def __init__(self, fun, args=(), kwds={}):
self.fun = fun
self.args = args
self.kwds = kwds
def __call__(self):
return self.force()
def force(self):
return self.fun(*self.args, **self.kwds)
def delay(*app_args, **app_kwds):
@wraps(delay)
def decorator(fun):
return Promise(fun, app_args, app_kwds)
return decorator
def is_delayed(obj):
return isinstance(obj, Promise)
'''
Addition Utils for LISN object
'''
def suite_to_node_list(suite):
return [obj["param"] for obj in suite["exprs"]]
def check_multi_xexpr(node, head_label=None):
return node["type"] == "xexpr" and \
node["has_head_label"] and \
(head_label is None or
node["head_label"] == head_label)
def force_name_to_head_expr(node):
if not check_multi_xexpr(node):
return None
return force_name(node["head_expr"])
def force_name(node):
if node["type"] != "name":
return None
else:
return node["name"]
def force_dotted_name(node):
'''
DottedName ::= Name
| DottedName "." Name
otherwise, it raise ValueError
Returns -
string list
None when format is not accpetable
'''
def concat(a, b):
if a is not None and b is not None:
return a + b
else:
return None
def _iter(node):
if node is None:
return None
elif node["type"] == "trailer" and node["trailer_type"] == "attr":
return concat(_iter(node["scope"]), [node["attr"]])
elif node["type"] == "name":
return [node["name"]]
else:
return None
result = _iter(node)
return result
def force_one_parg(xexpr):
if xexpr["type"] != "xexpr":
return None
arg_info = xexpr["arg_info"]
if arg_info["kargs"] or \
arg_info["has_star"] or \
arg_info["has_dstar"] or \
arg_info["has_amp"] or \
arg_info["has_damp"]:
return None
else:
pargs = arg_info["pargs"]
if len(pargs) == 1:
return pargs[0]
else:
return None
'''
MetaID conversion rule
--
local | argument | function | lambda | immediate
use convert-env (dict)
1. preserve name of all global-scope variables
2. preserve function name: (original), (original)_f#
3. preserve local name: (original), (original)_#
4. preserve argument name: (original), (original)_arg_#
5. name immediate locals: _imd_#
'''
'''
Python AST Classes & Meta ID Conversion tools
'''
class PyStmt:
def to_string(self, indent, acc_indent):
raise NotImplementedError
def convert_meta_id(self, driver, local_dict):
raise NotImplementedError
class PyMetaComment(PyStmt):
def __init__(self, cmt_str):
self.cmt_str = cmt_str
def to_string(self, indent, acc_indent):
if '\n' in self.cmt_str:
res = "\n".join([" "*acc_indent + "# " + line
for line in self.cmt_str.splitlines()])
res += "\n"
return res
else:
return " "*acc_indent + "# " + self.cmt_str + "\n"
def convert_meta_id(self, driver, local_dict):
pass
def stmt_list_to_string(stmt_list, indent, acc_indent):
return "".join([stmt.to_string(indent, acc_indent+indent)
for stmt in stmt_list])
class PyDefun(PyStmt):
def __init__(self, fun_name, pos_args, kwd_args, stmt_list, star=None, dstar=None, docstring=""):
'''
Argument -
kwd_args: (string | PyMetaID, PyExpr) list
'''
self.fun_name = fun_name # string or PyMetaID
self.pos_args = pos_args or []
self.kwd_args = kwd_args or []
self.star = star
self.dstar = dstar
self.docstring = docstring
self.stmt_list = stmt_list
def convert_meta_id(self, driver, local_dict):
local_dict = {}
if isinstance(self.fun_name, PyMetaID):
self.fun_name = self.fun_name.convert_meta_id(driver, local_dict).name
self.pos_args = [(pos_arg.convert_meta_id(driver, local_dict).name
if isinstance(pos_arg, PyMetaID)
else pos_arg)
for pos_arg in self.pos_args]
self.kwd_args = [(keyword.convert_meta_id(driver, local_dict).name
if isinstance(keyword, PyMetaID)
else keyword,
kexpr.convert_meta_id(driver, local_dict))
for keyword, kexpr in self.kwd_args]
if self.star:
if isinstance(self.star, PyMetaID):
self.star = self.star.convert_meta_id(driver, local_dict).name
if self.dstar:
if isinstance(self.dstar, PyMetaID):
self.dstar = self.dstar.convert_meta_id(driver, local_dict).name
meta_convert_stmt_list(self.stmt_list, driver, local_dict)
def to_string(self, indent, acc_indent):
arglst = []
arglst += [(pos_arg.to_string() if isinstance(pos_arg, PyMetaID) else pos_arg)
for pos_arg in self.pos_args]
for keyword, arg_expr in self.kwd_args:
if isinstance(keyword, PyMetaID):
keyword = keyword.to_string()
arglst.append(keyword + "=" + arg_expr.to_string())
if self.star is not None:
star = self.star
if isinstance(star, PyMetaID):
star = star.to_string()
arglst.append("*" + star)
if self.dstar is not None:
dstar = self.dstar
if isinstance(dstar, PyMetaID):
dstar = dstar.to_string()
arglst.append("**" + dstar)
if isinstance(self.fun_name, PyMetaID):
fun_name = self.fun_name.to_string()
else:
fun_name = self.fun_name
return " "*acc_indent + "def {0}({1}):\n{2}" \
.format(fun_name,
", ".join(arglst),
stmt_list_to_string(self.stmt_list, indent, acc_indent))
class PyReturn(PyStmt):
def __init__(self, ret_expr=None):
self.ret_expr = ret_expr
def to_string(self, indent, acc_indent):
if self.ret_expr is not None:
ret_expr_str = self.ret_expr.to_string()
else:
ret_expr_str = ""
return " "*acc_indent + "return %s\n"%ret_expr_str
def convert_meta_id(self, driver, local_dict):
if self.ret_expr is not None:
self.ret_expr = self.ret_expr.convert_meta_id(driver, local_dict)
class PyBreak(PyStmt):
def to_string(self, indent, acc_indent):
return " "*acc_indent + "break\n"
def convert_meta_id(self, driver, local_dict):
pass
class PyContinue(PyStmt):
def to_string(self, indent, acc_indent):
return " "*acc_indent + "continue\n"
def convert_meta_id(self, driver, local_dict):
pass
class PyPass(PyStmt):
def to_string(self, indent, acc_indent):
return " "*acc_indent + "pass\n"
def convert_meta_id(self, driver, local_dict):
pass
class PyRaise(PyStmt):
def __init__(self, to_be_throwed=None):
self.to_be_throwed = to_be_throwed
def to_string(self, indent, acc_indent):
result = " "*acc_indent + "raise"
if self.to_be_throwed is not None:
result += " "
result += self.to_be_throwed.to_string()
result += "\n"
return result
def convert_meta_id(self, driver, local_dict):
if self.to_be_throwed is not None:
self.to_be_throwed = self.to_be_throwed.convert_meta_id(driver, local_dict)
def meta_convert_stmt_list(stmt_list, driver, local_dict):
for stmt in stmt_list:
stmt.convert_meta_id(driver, local_dict)
class PyForStmt(PyStmt):
def __init__(self, elem_name, _in, stmt_list):
# elem_name can be either string or PyTupleExpr
assert isinstance(elem_name, (str, PyMetaID, PyTupleExpr))
self.elem_name = elem_name
self._in = _in
self.stmt_list = stmt_list
def to_string(self, indent, acc_indent):
if isinstance(self.elem_name, PyExpr):
elem_name = self.elem_name.to_string()
return " "*acc_indent + \
"for {0} in {1}:\n{2}"\
.format(elem_name,
self._in.to_string(),
stmt_list_to_string(self.stmt_list, indent, acc_indent))
def convert_meta_id(self, driver, local_dict):
if isinstance(self.elem_name, PyExpr):
self.elem_name = self.elem_name.convert_meta_id(driver, local_dict)
self._in = self._in.convert_meta_id(driver, local_dict)
meta_convert_stmt_list(self.stmt_list, driver, local_dict)
class PyWhileStmt(PyStmt):
def __init__(self, cond_expr, stmt_list):
self.cond_expr = cond_expr
self.stmt_list = stmt_list
def to_string(self, indent, acc_indent):
return " "*acc_indent + \
"while %s:\n%s"%(self.cond_expr.to_string(),
stmt_list_to_string(self.stmt_list,
indent,
acc_indent))
def convert_meta_id(self, driver, local_dict):
self.cond_expr = self.cond_expr.convert_meta_id(driver, local_dict)
meta_convert_stmt_list(self.stmt_list, driver, local_dict)
class PyIfStmt(PyStmt):
def __init__(self, if_pair, elif_pairs=None, else_stmt_list=None):
'''
Arguments -
if_pair: (expr, stmt list)
elif_pairs: (expr, stmt list) list
else_stmt_list: stmt list
'''
self.if_pair = if_pair
self.elif_pairs = elif_pairs or []
self.else_stmt_list = else_stmt_list or []
def convert_meta_id(self, driver, local_dict):
meta_convert_stmt_list(self.if_pair[1], driver, local_dict)
self.if_pair = (self.if_pair[0].convert_meta_id(driver, local_dict),
self.if_pair[1])
new_elif_pairs = []
for elif_cond_expr, elif_stmt_list in self.elif_pairs:
meta_convert_stmt_list(elif_stmt_list, driver, local_dict)
new_elif_pairs.append((elif_cond_expr.convert_meta_id(driver,
local_dict),
elif_stmt_list))
self.elif_pairs = new_elif_pairs
meta_convert_stmt_list(self.else_stmt_list, driver, local_dict)
def to_string(self, indent, acc_indent):
if_expr, if_stmt_list = self.if_pair
acc_str = " "*acc_indent + \
"if %s:\n%s"%(if_expr.to_string(),
stmt_list_to_string(if_stmt_list,
indent,
acc_indent))
if self.elif_pairs:
def elif_chunk(elif_expr, elif_expr_stmt_list):
res = " "*acc_indent + \
"elif %s:\n%s"%(elif_expr.to_string(),
stmt_list_to_string(elif_expr_stmt_list,
indent,
acc_indent))
return res
acc_str += "".join([elif_chunk(l, r) for l, r in self.elif_pairs])
if self.else_stmt_list:
acc_str += " "*acc_indent + \
"else:\n%s"%stmt_list_to_string(self.else_stmt_list,
indent,
acc_indent)
return acc_str
class PyImportStmt(PyStmt):
def __init__(self, name_or_name_list, alias):
self.name_or_name_list = name_or_name_list
self.alias = alias # string or PyMetaID
def to_string(self, indent, acc_indent):
if isinstance(self.alias, PyMetaID):
alias_str = self.alias.to_string().name
elif isinstance(self.alias, basestring):
alias_str = self.alias
else:
alias_str = ""
if alias_str:
alias_str = " as " + alias_str
return " "*acc_indent + "import " + dotify(self.name_or_name_list) + alias_str + "\n"
def convert_meta_id(self, driver, local_dict):
if isinstance(self.alias, PyMetaID):
self.alias = self.alias.convert_meta_id(driver, local_dict).name
class PyImportFromStmt(PyStmt):
def __init__(self, name_or_name_list, import_names):
self.name_or_name_list = name_or_name_list
self.import_names = import_names
def to_string(self, indent, acc_indent):
result = " "*acc_indent + \
"from " + dotify(self.name_or_name_list) + \
" import "
addhoc = ""
for name_or_pair in self.import_names:
if addhoc:
addhoc += ", "
if isinstance(name_or_pair, tuple):
src, dest = name_or_pair
if isinstance(dest, PyMetaID):
dest = dest.to_string().name
addhoc += "%s as %s"%(src, dest)
else:
addhoc += name_or_pair
return result + addhoc + "\n"
def convert_meta_id(self, driver, local_dict):
new_ins = []
for item in self.import_names:
if isinstance(item, tuple):
src, dest = item
new_ins.append((src, dest.convert_meta_id(driver, local_dict).name))
else:
new_ins.append(item)
self.import_names = new_ins
def PyAssignmentToName(name, expr):
return PyAssignment(PyAssignment.ASSIGN_NAME, name, None, None, None, expr)
def PyAssignmentToAttr(scope_expr, attr_name, expr):
return PyAssignment(PyAssignment.ASSIGN_ATTR,
None, scope_expr, attr_name, None, expr)
def PyAssignmentToItem(scope_expr, index_expr, expr):
return PyAssignment(PyAssignment.ASSIGN_ITEM,
None, scope_expr, None, index_expr, expr)
class PyAssignment(PyStmt):
ASSIGN_NAME = 0
ASSIGN_ATTR = 1
ASSIGN_ITEM = 2
def __init__(self, _type, name, scope_expr, attr_name, item_expr, expr):
self._type = _type
self.name = name # it can be either string or PyMetaID
self.scope_expr = scope_expr
self.attr_name = attr_name
self.item_expr = item_expr
self.expr = expr
def to_string(self, indent, acc_indent):
if self._type == PyAssignment.ASSIGN_NAME:
name_str = self.name.to_string() \
if isinstance(self.name, PyExpr) else self.name
result = " "*acc_indent
result += "%s = %s\n"%(name_str, self.expr.to_string())
return result
elif self._type == PyAssignment.ASSIGN_ATTR:
virtual_parent = PyAttrAccess(self.scope_expr, self.name)
result = " "*acc_indent
result += "%s.%s = %s\n"%(expr_to_string(self.scope_expr, virtual_parent),
self.attr_name,
self.expr.to_string())
return result
elif self._type == PyAssignment.ASSIGN_ITEM:
virtual_parent = PyItemAccess(self.scope_expr, self.item_expr)
result = " "*acc_indent
result += "%s[%s] = %s\n"%(expr_to_string(self.scope_expr, virtual_parent),
self.item_expr.to_string(),
self.expr.to_string())
return result
else:
raise Exception("NOT REACHABLE")
def convert_meta_id(self, driver, local_dict):
if self._type == PyAssignment.ASSIGN_NAME:
if isinstance(self.name, PyMetaID):
self.name = self.name.convert_meta_id(driver, local_dict).name
elif self._type == PyAssignment.ASSIGN_ATTR:
self.scope_expr = self.scope_expr.convert_meta_id(driver,
local_dict)
elif self._type == PyAssignment.ASSIGN_ITEM:
self.scope_expr = self.scope_expr.convert_meta_id(driver,
local_dict)
self.item_expr = self.item_expr.convert_meta_id(driver,
local_dict)
else:
raise Exception("NOT REACHABLE")
self.expr = self.expr.convert_meta_id(driver, local_dict)
class PyExprStmt(PyStmt):
def __init__(self, expr):
assert isinstance(expr, PyExpr)
self.expr = expr
def to_string(self, indent, acc_indent):
return " "*acc_indent + self.expr.to_string() + "\n"
def convert_meta_id(self, driver, local_dict):
self.expr = self.expr.convert_meta_id(driver, local_dict)
class PyExpr:
def get_expr_pred(self):
'''
Expression Precedence
--
1. tuple, list, dictionary, string quotion, name ...
2. attr, array, slice, call
3. operator
4. if-else expression
5. lambda
'''
raise NotImplementedError
def should_put_par(self, under):
self_pred = self.get_expr_pred()
under_pred = under.get_expr_pred()
# TODO: somehow conservative condition (in case of the same pred)
return self_pred <= under_pred
def may_have_side_effect(self):
# conservative condition
return True
def to_string(self):
raise NotImplementedError
def convert_meta_id(self, driver, local_dict):
raise NotImplementedError
class PyDataReprExpr(PyExpr):
def get_expr_pred(self):
return 1
class PyTupleExpr(PyDataReprExpr):
def __init__(self, exprs):
self.exprs = exprs
def may_have_side_effect(self):
return any((expr.may_have_side_effect() for expr in self.exprs))
def to_string(self):
if len(self.exprs) == 1:
return "(" + self.exprs[0].to_string() + ", )"
else:
return "(" + \
", ".join([expr.to_string() for expr in self.exprs]) + \
")"
def convert_meta_id(self, driver, local_dict):
return PyTupleExpr([elem.convert_meta_id(driver, local_dict)
for elem in self.exprs])
class PyListExpr(PyDataReprExpr):
def __init__(self, exprs):
self.exprs = exprs
def may_have_side_effect(self):
return any((expr.may_have_side_effect() for expr in self.exprs))
def to_string(self):
return "[" + \
", ".join([expr.to_string() for expr in self.exprs]) + \
"]"
def convert_meta_id(self, driver, local_dict):
return PyListExpr([elem.convert_meta_id(driver, local_dict)
for elem in self.exprs])
class PyDictExpr(PyDataReprExpr):
def __init__(self, expr_dict):
self.expr_dict = expr_dict
def may_have_side_effect(self):
return any((k.may_have_side_effect() or v.may_have_side_effect()
for k, v in self.expr_dict.items()))
def to_string(self):
return "{" + \
", ".join(("%s: %s"%(k.to_string(), v.to_string())
for k, v in self.expr_dict.items())) + \
"}"
def convert_meta_id(self, driver, local_dict):
return PyDictExpr(
dict([(k.convert_meta_id(driver, local_dict),
v.convert_meta_id(driver, local_dict))
for k, v in self.expr_dict.items()]))
class PyOperatorExpr(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 3
def operator_pred(self):
raise NotImplementedError
def should_put_par(self, under):
if isinstance(under, PyOperatorExpr):
return self.operator_pred() <= under.operator_pred()
else:
# coerce
return PyExpr.should_put_par(self, under)
def put_par(s): return "(" + s + ")"
def expr_to_string(expr, parent=None):
if parent is None:
return expr.to_string()
elif parent.should_put_par(expr):
return put_par(expr.to_string())
else:
return expr.to_string()
BINOP_PRED = {
"**": 1,
"*": 3, "/": 3, "%": 3, "//": 3,
"+": 4, "-": 4,
">>": 5, "<<": 5,
"&": 6,
"^": 7,
"|": 8,
"<=": 9, "<": 9, ">": 9, ">=": 9, "<>": 9, "==": 9, "!=": 9,
"is": 9, "is not": 9, "in": 9, "not in": 9,
"and": 11,
"or": 12
}
BINOP_RIGHT_ASSOC = set(["**"])
UNOP_PRED = {
"+": 2, "-": 2, "~": 2,
"not": 10
}
class PyBinop(PyOperatorExpr):
'''
1. **
3. * / % //
4. + -
5. >> <<
6. &
7. ^
8. |
9. <= < > >= <> == != `is` `is not` `in` `not in`
11. and
12. or
'''
def __init__(self, op, lhs, rhs):
assert op in BINOP_PRED
self.op = op
self.lhs = lhs
self.rhs = rhs
def operator_pred(self):
return BINOP_PRED[self.op]
def may_have_side_effect(self):
return self.lhs.may_have_side_effect() or \
self.rhs.may_have_side_effect()
def should_put_par(self, under):
if self.op in BINOP_PRED and \
isinstance(self.rhs, PyBinop) and \
self.rhs.op == self.op:
return False
elif isinstance(self.lhs, PyBinop) and self.lhs.op == self.op:
return False
else:
# coerce
return PyOperatorExpr.should_put_par(self, under)
def to_string(self):
return expr_to_string(self.lhs, self) + \
" " + self.op + " " + \
expr_to_string(self.rhs, self)
def convert_meta_id(self, driver, local_dict):
return PyBinop(self.op,
self.lhs.convert_meta_id(driver, local_dict),
self.rhs.convert_meta_id(driver, local_dict))
class PyUnop(PyOperatorExpr):
'''
2. ~ + -
10. not
'''
def __init__(self, op, param):
assert op in UNOP_PRED
self.op = op
self.param = param
def operator_pred(self):
return UNOP_PRED[self.op]
def may_have_side_effect(self):
return self.param.may_have_side_effect()
def should_put_par(self, under):
if isinstance(under, PyUnop) and \
self.operator_pred() == self.operator_pred():
return False
else:
# coerce
return PyOperatorExpr.should_put_par(self, under)
def to_string(self):
space = " " if self.op == "not" else ""
return self.op + space + expr_to_string(self.param, self)
def convert_meta_id(self, driver, local_dict):
return PyUnop(self.op, self.param.convert_meta_id(driver, local_dict))
class PyItemAccess(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 2
def __init__(self, scope_expr, item_expr):
self.scope_expr = scope_expr
self.item_expr = item_expr
def may_have_side_effect(self):
return True
def to_string(self):
return "%s[%s]"%(expr_to_string(self.scope_expr, self),
self.item_expr.to_string())
def convert_meta_id(self, driver, local_dict):
return PyItemAccess(self.scope_expr.convert_meta_id(driver, local_dict),
self.item_expr.convert_meta_id(driver, local_dict))
class PyAttrAccess(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 2
def __init__(self, scope_expr, attr_name):
assert isinstance(scope_expr, PyExpr)
self.scope_expr = scope_expr
self.attr_name = attr_name
def may_have_side_effect(self):
return True
def to_string(self):
return "%s.%s"%(expr_to_string(self.scope_expr, self),
self.attr_name)
def convert_meta_id(self, driver, local_dict):
return PyAttrAccess(self.scope_expr.convert_meta_id(driver, local_dict),
self.attr_name)
class PyArraySlice(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 2
def __init__(self, scope_expr, left_slice=None, right_slice=None):
self.scope_expr = scope_expr
self.left_slice = left_slice
self.right_slice = right_slice
def may_have_side_effect(self):
return True
def to_string(self):
lslice_str = self.left_slice.to_string() if self.left_slice else ""
rslice_str = self.right_slice.to_string() if self.right_slice else ""
return "%s[%s:%s]"%(expr_to_string(self.scope_expr, self),
lslice_str,
rslice_str)
def convert_meta_id(self, driver, local_dict):
new_lslice = self.left_slice.convert_meta_id(driver, local_dict) \
if self.left_slice else None
new_rslice = self.right_slice.convert_meta_id(driver, local_dict) \
if self.right_slice else None
return PyArraySlice(self.scope_expr.convert_meta_id(driver, local_dict),
new_lslice,
new_rslice)
class PyCall(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 2
def __init__(self, callee_expr, arg_exprs, kw_exprs,
star_expr=None, dstar_expr=None):
'''
Arguments -
arg_exprs: expr list
kw_exprs: (string | PyMetaID, expr) list
'''
self.callee_expr = callee_expr
self.arg_exprs = arg_exprs or []
self.kw_exprs = kw_exprs or []
self.star_expr = star_expr
self.dstar_expr = dstar_expr
def may_have_side_effect(self):
return True
def to_string(self):
arglst = [x.to_string() for x in self.arg_exprs] + \
[keyword + "=" + x.to_string() for keyword, x in self.kw_exprs]
if self.star_expr is not None:
arglst.append("*" + self.star_expr.to_string())
if self.dstar_expr is not None:
arglst.append("**" + self.dstar_expr.to_string())
return "%s(%s)"%(expr_to_string(self.callee_expr, self),
", ".join(arglst))
def convert_meta_id(self, driver, local_dict):
callee_expr = self.callee_expr.convert_meta_id(driver, local_dict)
arg_exprs = [pos_expr.convert_meta_id(driver, local_dict)
for pos_expr in self.arg_exprs]
kw_exprs = [(keyword.convert_meta_id(driver, local_dict).name
if isinstance(keyword, PyMetaID) else keyword,
kexpr.convert_meta_id(driver, local_dict))
for keyword, kexpr in self.kw_exprs]
star_expr = self.star_expr.convert_meta_id(driver, local_dict) \
if self.star_expr else None
dstar_expr = self.dstar_expr.convert_meta_id(driver, local_dict) \
if self.dstar_expr else None
return PyCall(callee_expr,
arg_exprs,
kw_exprs,
star_expr,
dstar_expr)
class PyLiteral(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 1
def __init__(self, literal):
self.literal = literal # itself
def may_have_side_effect(self):
return False
def to_string(self):
# Because we use python to compile sth and its target file is python itself, literal is just python object
# and just to repr it is sufficient to represent all literals(list, dict, string and so on) in target python source
# Funny!
return repr(self.literal)
def convert_meta_id(self, driver, local_dict):
return self
class PyMetaID(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 1
def __init__(self, _id):
self._id = _id
def may_have_side_effect(self):
return False
def to_string(self):
return "__meta_id{0}__".format(self._id)
def convert_meta_id(self, driver, local_dict):
return PyName(driver(self._id, local_dict))
class PyName(PyExpr):
# @implement PyExpr
def get_expr_pred(self):
return 1
def __init__(self, name):
self.name = name
def may_have_side_effect(self):
return False
def to_string(self):
return self.name
def convert_meta_id(self, driver, local_dict):
return self
class PyLambda(PyExpr):
def get_expr_pred(self):
return 5
def may_have_side_effect(self):
return False
def __init__(self, pos_args, kwd_args, expr, star=None, dstar=None, docstring=""):
'''
Argument -
kwd_args: (string, PyExpr) list
'''
self.pos_args = pos_args or []
self.kwd_args = kwd_args or []
self.star = star
self.dstar = dstar
self.docstring = docstring
self.expr = expr
def convert_meta_id(self, driver, local_dict):
pos_args = [(pos_arg.convert_meta_id(driver, local_dict).name
if isinstance(pos_arg, PyMetaID)
else pos_arg)
for pos_arg in self.pos_args]
kwd_args = [(keyword.convert_meta_id(driver, local_dict).name
if isinstance(keyword, PyMetaID) else keyword,
kexpr.convert_meta_id(driver, local_dict))
for keyword, kexpr in self.kwd_args]
expr = self.expr.convert_meta_id(driver, local_dict)
if self.star:
if isinstance(self.dstar, PyMetaID):
star = self.star.convert_meta_id(driver, local_dict).name \
if self.star else None
else:
star = self.star
else:
star = None
if self.dstar:
if isinstance(self.dstar, PyMetaID):
dstar = self.dstar.convert_meta_id(driver, local_dict).name
else:
dstar = self.dstar
else:
dstar = None
return PyLambda(pos_args,
kwd_args,
expr,
star,
dstar,
self.docstring)
def to_string(self):
arglst = [(pos_arg.to_string() if isinstance(pos_arg, PyMetaID) else pos_arg)
for pos_arg in self.pos_args]
for keyword, arg_expr in self.kwd_args:
if isinstance(keyword, PyMetaID):
keyword = keyword.to_string()
arglst.append(keyword + "=" + arg_expr.to_string())
if self.star is not None:
star = self.star
if isinstance(star, PyMetaID):
star = star.to_string()
arglst.append("*" + star)
if self.dstar is not None:
dstar = self.dstar
if isinstance(dstar, PyMetaID):
dstar = dstar.to_string()
arglst.append("**" + dstar)
return "lambda {0}: {1}" \
.format(", ".join(arglst),
self.expr.to_string())
'''
Translation
'''
'''
Hinting Name
ID Hint
==
* duplication in original syntax
- closure
re-initialize
- let
<variable_name>_l#dup_depth
* immediate variable
1. imd_used_as_argument
- _imd_arg_1
- _imd_arg_2
2. imd_used_as_lhs_or_rhs
_ _imd_operand
3. else
- _imd_el_1
- _imd_el_2
- ...
* [lambda lifting]
<Lambda lifting's are not required due to python's lexical nature>
* reserved word
__line__, __tags__
python runtime names
add suffix _rwd_#seq as least seq as possible
* conflict each other finally
add suffix _cfl#seq
ID Hint Object
--
* Original Name
* source: "local" | "argument" | "function" | "lambda" | "immediate"
* 'let' Duplication Depth
* usage: string set
"return"
"local"
'''
'''
Compiling Procedures
Compiling Environments
--
* name_env: (string -> id, ref)
* external_globals
* extern_fun_map: string -> function
* name_syntax_map: string -> syntax_expander | syntax_compiler (only as global environment)
* dname_syntax_map: string -> dsyntax_expander | dsyntax_compiler (only as global environment)
Config Value
---
# config is global premise
* emit_line_info(li)
* source code comment verbosity(v): Always True in this revision
* expression_lifting_style (el): 'stack' | 'ssa' | 'stack_only_name' | 'stack_call_2' Always 'stack' in this revision
* remove name in the end of 'let' (letdel)
Premise Values (don't expect side-effect)
--
* use_return_value
* prebound_id
Conclusion (Return value, frozen)
--
* preseq_stmts: Stmt list
* result_expr: None | PyExpr
* comment: None | string
'''
'''
Data structures for compiling
'''
class NoMoreErrorAcceptable(Exception):
'''
Exception for internal use
'''
pass
class IDHint:
def __init__(self, original_name, name_source, usage):
assert name_source in ["argument", "local", "immediate", "lambda", "function"]
self.original_name = original_name
self.name_source = name_source
self.usage = usage
def __repr__(self):
return "<%s (%s, %s)>"%(self.original_name,
self.name_source,
self.usage)
class IDStorage:
def __init__(self):
self.id_dict = {}
self.available_id = 0
def get(self, _id):
return self.id_dict[_id]
class IDInfo:
def is_var(self):
return isinstance(self, Var)
def is_global_scope_var(self):
return isinstance(self, GlobalScopeVar)
def is_runtime_extern(self):
return isinstance(self, RuntimeExtern)
def is_expander(self):
return isinstance(self, Expander)
def is_converter(self):
return isinstance(self, Converter)
class Var(IDInfo):
def __init__(self, hint):
self.hint = hint
def __repr__(self):
return "<Var %s>"%repr(self.hint)
class GlobalScopeVar(IDInfo):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<GlobalScope %s>"% self.name
class RuntimeExtern(IDInfo):
def __init__(self, name):
self.name = name
class Expander(IDInfo):
'''
expander -
translator x lisn x Premise x Context -> (pre bound) lisn
'''
def __init__(self, expander, name=""):
self.expander = expander
self.name = name
def expand(self, translator, lisn, premise, context):
return self.expander(translator, lisn, premise, context)
def __repr__(self):
return "<Syntax expander %s>"%self.name
class Converter(IDInfo):
'''
converter -
translator x lisn x Premise x Context -> Conclusion
'''
def __init__(self, converter, name=""):
self.converter = converter
self.name = name
def convert(self, translator, lisn, premise, context):
return self.converter(translator, lisn, premise, context)
def __repr__(self):
return "<Syntax converter %s>"%self.name
class EnvFrame:
def __init__(self, frame_type):
assert frame_type in ["def", "toplevel", "let", "lambda"]
self.frame_type = frame_type # "def" | "toplevel" | "let" | "lambda"
class Config:
def __init__(self, emit_line_info=True, expression_lifting_style="stack", letdel=False, max_error_cnt=20, indent=4):
self.emit_line_info = emit_line_info
self.expression_lifting_style = expression_lifting_style
self.letdel = letdel
self.max_error_cnt = max_error_cnt
self.indent = indent
class CompEnv:
def __init__(self):
'''
Fields -
local_env: None | LinkedDict
'''
self.global_env = {} # name -> id
self.local_env = LinkedDict(EnvFrame("toplevel")) # LinkedDict
self.id_info_dict = {}
self.available_id = 0
def issue_id(self, id_info):
assert isinstance(id_info, IDInfo)
_id = self.available_id
self.available_id += 1
self.id_info_dict[_id] = id_info
return _id
def issue_local_immediate(self):
imd_id = self.issue_id(Var(IDHint("", "immediate", "local")))
return imd_id
def get_id_info(self, _id):
return self.id_info_dict[_id]
def add_local(self, name, id_info):
_id = self.issue_id(id_info)
self.local_env.set_shallow(name, _id)
return _id
def add_global(self, name, id_info):
_id = self.issue_id(id_info)
self.global_env[name] = _id
return _id
def has_name(self, name):
return self.local_env.has(name) or name in self.global_env
def has_local_name(self, name, recursive=False):
if recursive:
return self.local_env.has(name)
else:
return self.local_env.has_shallow(name)
def lookup_name(self, name):
'''
Returns -
(id, info)
id: id of corressponding name
info: IDInfo
Exceptions -
KeyError
'''
if self.local_env.has(name):
_id = self.local_env.get(name)
else:
_id = self.global_env[name]
return (_id, self.get_id_info(_id))
def lookup_global_name(self, name):
_id = self.global_env[name]
return (_id, self.get_id_info(_id))
def local_names(self, recursive=False):
return self.local_env.keys(recursive=recursive)
def setup_local_frame(self, frame_type):
self.local_env = LinkedDict(EnvFrame(frame_type), prev=self.local_env)
def contract_local_frame(self):
assert self.local_env is not None
ret = self.local_env
self.local_env = self.local_env.prev
def get_hint_dict(self):
'''
Dict
(id -> string | IDHint)
'''
return dict([(_id, (info.hint if info.is_var() else info.name))
for _id, info in self.id_info_dict.items()
if info.is_var() or info.is_global_scope_var()])
def ensure_local_name(comp_env, name, id_hint):
if comp_env.has_local_name(name, recursive=False):
local_id, info = comp_env.lookup_name(name)
if not info.is_var():
# make new one
local_id = comp_env.add_local(name, Var(id_hint))
else:
local_id = comp_env.add_local(name, Var(id_hint))
return local_id
def ensure_local_var_name(comp_env, name):
return ensure_local_name(comp_env, name, IDHint(name, "local", "local"))
def ensure_lambda_name(comp_env, name):
return ensure_local_name(comp_env, name, IDHint(name, "lambda", "local"))
def ensure_local_arg_name(comp_env, name):
return ensure_local_name(comp_env, name, IDHint(name, "argument", "local"))
def ensure_function_name(comp_env, name):
return ensure_local_name(comp_env, name, IDHint(name, "function", "local"))
class LinkedDict:
def __init__(self, env_data, initials={}, prev=None):
self.env_data = env_data
self.prev = prev
self.namemap = {}
for k, v in initials.items():
self.set_shallow(k, v)
def keys(self, recursive=False):
name_set = set(self.namemap.keys())
if self.prev and recursive:
return name_set.union(self.prev.keys(recursive=True))
else:
return name_set
def get_env_data(self):
return self.env_data
def set_shallow(self, name, _id):
self.namemap[name] = _id
def set(self, name, _id, shallow=True):
if shallow:
self.set_shallow(name, _id)
else:
if name in self.namemap:
self.namemap[name] = _id
elif self.prev:
return self.prev.set(name, _id)
else:
raise KeyError(_id)
def has(self, name):
if name not in self.namemap:
if self.prev:
return self.prev.has(name)
else:
return False
else:
return True
def has_shallow(self, name):
return name in self.namemap
def who_has(self, name):
if name not in self.namemap:
if self.prev:
return self.prev.who_has(name)
else:
return None
else:
return self.env_data
def all_dict_with_name(self, name):
'''
Get matching names in several linked dict and return ids of those as list.
First encounted name is located in front of the list.
e.g)
If a linked dict is constructed as shown below
{a: ..} with data "A" -prev-> {b: ..} -prev-> {a: ..} with env_data "B"
^ front subdict ^ rear subdict
The result of this function is ["A", "B"]
'''
prev_result = self.prev.backtrace_name(name) if self.prev else []
if name in self.namemap:
return [self] + prev_result
else:
return prev_result
def all_env_data_with_name(self, name):
return [subdict.get_env_data()
for subdict in self.all_dict_with_name(name)]
def get(self, name):
if name in self.namemap:
return self.namemap[name]
elif self.prev:
return self.prev.get(name)
else:
raise KeyError(name)
class Premise:
def __init__(self, use_return_value=False):
self.use_return_value = use_return_value
def copy(self):
return copy(self)
class Conclusion:
def __init__(self, preseq_stmts, result_expr, error=False, comment=None):
assert preseq_stmts is None or all(map(lambda x: isinstance(x, PyStmt), preseq_stmts))
assert result_expr is None or isinstance(result_expr, PyExpr)
self.preseq_stmts = preseq_stmts or []
self.result_expr = result_expr
self.error = error
self.comment = comment
def error_occurred(self):
return self.error
def has_result(self):
return self.result_expr is not None
def is_pure_expr(self):
return len(self.preseq_stmts) == 0
def make_stmt_list(self):
if self.has_result() and self.result_expr.may_have_side_effect():
return self.preseq_stmts + [PyExprStmt(self.result_expr)]
def expr_conclusion(expr, comment=None):
assert isinstance(expr, PyExpr)
return Conclusion([],
expr,
error=False,
comment=comment)
def stmt_conclusion(stmts, comment=None):
if isinstance(stmts, PyStmt):
stmts = [stmts]
return Conclusion(stmts,
None,
error=False,
comment=comment)
def stmt_result_conclusion(stmts, result_expr, comment=None):
assert isinstance(result_expr, PyExpr)
if isinstance(stmts, PyStmt):
stmts = [stmts]
return Conclusion(stmts,
result_expr,
error=False,
comment=comment)
_cached_error_conclusion = Conclusion(None,
None,
error=True,
comment=None)
def error_conclusion(comment=None):
if comment is None:
return _cached_error_conclusion
else:
conclusion = Conclusion(None,
None,
error=True,
comment=comment)
return conclusion
def noreturn_conclusion(conclusions):
if any([x.error_occurred() for x in conclusions]):
return error_conclusion()
# assume that result is not emmited with these conclusions
stmts = []
for concl in conclusions:
stmts += concl.preseq_stmts
if concl.has_result() and concl.result_expr.may_have_side_effect():
stmts.append(PyExprStmt(concl.result_expr)) # Result might have side-effect so that we should put result expr as well
return stmt_conclusion(stmts)
def seq_conclusion(conclusions):
if any([x.error_occurred() for x in conclusions]):
return error_conclusion()
stmts = []
rest_concl = noreturn_conclusion(conclusions[:-1])
last_concl = conclusions[-1]
return stmt_result_conclusion(rest_concl.preseq_stmts + last_concl.preseq_stmts,
last_concl.result_expr)
def make_integrator(allow_None):
def integrate_conclusion(comp_env, result_proc, *conclusions):
'''
result_proc: A x A x A x .. -> PyExpr | ((PyStmt | PyStmt list), (None | PyExpr))
'''
preseq_stmts = []
success_box = [True]
def convert(a):
'''
A ::= A list
A ::= A tuple
A ::= Conclusion
A ::= None?
'''
if isinstance(a, list):
return list(map(convert, a))
elif isinstance(a, tuple):
return tuple(map(convert, a))
elif isinstance(a, Conclusion):
if a.error_occurred():
success_box[0] = False
return None
result_expr = a.result_expr
if a.is_pure_expr():
return result_expr
elif not result_expr.may_have_side_effect():
preseq_stmts.extend(a.preseq_stmts)
return result_expr
else:
preseq_stmts.extend(a.preseq_stmts)
result_id = comp_env.issue_local_immediate()
preseq_stmts.extend(stmtify_expr(result_expr, True, result_id))
return PyMetaID(result_id)
elif a is None:
if allow_None:
return None
else:
raise TypeError("NoneType is not allowed")
else:
raise TypeError("%s is not allowed"%a.__class__.__name__)
argument_exprs = convert(conclusions)
if not success_box[0]:
return error_conclusion()
result = result_proc(*argument_exprs)
if isinstance(result, PyExpr):
return stmt_result_conclusion(preseq_stmts, result)
elif isinstance(result, tuple):
if len(result) != 2:
raise ValueError("length of result tuple should be 2")
if not isinstance(result[0], PyStmt) and not all(map(lambda x: isinstance(x, PyStmt), result[0])):
raise TypeError("The first elem of result should be a PyStmt or a list of PyStmt")
additional_stmts, result_expr = result
if isinstance(additional_stmts, PyStmt):
additional_stmts = [additional_stmts]
if result_expr is not None and not isinstance(result_expr, PyExpr):
raise TypeError("The second elem of result should be None or PyExpr")
if result_expr is None:
return stmt_conclusion(preseq_stmts + additional_stmts)
else:
return stmt_result_conclusion(preseq_stmts + additional_stmts, result_expr)
else:
raise TypeError("Invalid return type of 'result_proc'")
return integrate_conclusion
integrate_conclusion = make_integrator(False)
xintegrate_conclusion = make_integrator(True)
def integrate_list(comp_env, conclusions):
'''
CompEnv, Conclusion list ->
Results -
(
bool, # success?
stmt list, # preseq_stmts
expr list, # results
expr list # used_imd_ids
}
'''
preseq_stmts = []
result = []
used_imd_ids = []
success = True
for concl in conclusions:
if concl.error_occurred():
success = False
elif not concl.has_result():
preseq_stmts += concl.preseq_stmts
result.append(PyLiteral(None))
else:
expr = concl.result_expr
preseq_stmts += concl.preseq_stmts
if expr.may_have_side_effect():
imd_id = comp_env.issue_local_immediate()
used_imd_ids.append(imd_id)
preseq_stmts.append(PyAssignmentToName(PyMetaID(imd_id),
expr))
result.append(PyMetaID(imd_id))
else:
result.append(expr)
return (success, preseq_stmts, result, used_imd_ids)
class RuntimeStore:
def __init__(self, runtime_obj_id, importer_id, line_info_id):
self.runtime_obj_id = runtime_obj_id
self.importer_id = importer_id
self.line_info_id = line_info_id
class Context:
def __init__(self, comp_env, config, rt_store, filename):
self.comp_env = comp_env
self.config = config
self.rt_store = rt_store
self.errors = []
self.filename = filename
def add_error(self, error_obj):
self.errors.append(error_obj)
if self.config.max_error_cnt <= len(self.errors):
raise NoMoreErrorAcceptable
def any_error(self):
return len(self.errors) > 0
def set_comp_error(context, error_obj):
if not error_obj.filename:
error_obj.filename = context.filename
context.add_error(error_obj)
#
# Translators
#
node_translator = LISNVisitor()
def stmtify_expr(expr, use_return_value, imd_id=None):
'''
CompEnv x bool x id -> stmt list
'''
assert isinstance(expr, PyExpr)
if use_return_value:
return [PyAssignmentToName(PyMetaID(imd_id), expr)]
else:
return [PyExprStmt(expr)]
def basic_emitter(fun):
@wraps(fun)
def wrapper(translator, lisn, premise, context):
conclusion = fun(translator, lisn, premise, context)
assert isinstance(conclusion, Conclusion)
if context.config.emit_line_info:
locinfo = lisn["locinfo"]
line_directive = \
PyCall(PyMetaID(context.line_info_id),
[PyLiteral(locinfo["sline"]),
PyLiteral(locinfo["eline"]),
PyLiteral(locinfo["scol"]),
PyLiteral(locinfo["ecol"] - 1)],
[])
conclusion.preseq_stmts.insert(0, line_directive)
return conclusion
else:
return conclusion
return wrapper
@node_translator.add.trailer
def nt_trailer(translator, lisn, premise, context):
trailer_type = lisn["trailer_type"]
scope = lisn["scope"]
if scope is None:
set_comp_error(context,
CompileError("Scope",
"scope should be specified",
lisn["locinfo"]))
return error_conclusion()
scope_concl = translator(scope,
Premise(True),
context)
if trailer_type == "attr":
def f(scope_expr):
return PyAttrAccess(scope_expr, attr)
attr = lisn["attr"]
return integrate_conclusion(context.comp_env, f, scope_concl)
elif trailer_type == "array":
index_param = lisn["index_param"]
item_concl = translator(index_param,
Premise(True),
context)
return integrate_conclusion(context.comp_env,
PyItemAccess,
scope_concl,
item_concl)
elif trailer_type == "slice":
left_slice = lisn["left_slice"]
right_slice = lisn["right_slice"]
left_slice_concl = translator(left_slice,
Premise(True),
context) \
if left_slice else None
right_slice_concl = translator(right_slice,
Premise(True),
context) \
if right_slice else None
if left_slice is None and right_slice is None:
return integrate_conclusion(context.comp_env,
lambda scope_expr: PyArraySlice(scope_expr, None, None),
scope_concl
)
elif left_slice is None:
return integrate_conclusion(context.comp_env,
lambda scope_expr, right_expr: PyArraySlice(scope_expr, None, right_expr),
scope_concl,
right_slice_concl
)
elif right_slice is None:
return integrate_conclusion(context.comp_env,
lambda scope_expr, left_expr: PyArraySlice(scope_expr, left_expr, None),
scope_concl,
left_slice_concl
)
else:
return integrate_conclusion(context.comp_env,
lambda scope_expr, left_expr, right_expr: \
PyArraySlice(scope_expr, left_expr, right_expr),
scope_concl,
left_slice_concl,
right_slice_concl
)
else:
NOT_REACHABLE()
def python_native_literal(name):
if name == "True":
return PyLiteral(True)
elif name == "False":
return PyLiteral(False)
elif name == "None":
return PyLiteral(None)
else:
return None
def python_control_name(name):
if name == "break":
return PyBreak()
elif name == "continue":
return PyContinue()
else:
return None
@node_translator.add.name
def nt_name(translator, lisn, premise, context):
name = lisn["name"]
use_return_value = premise.use_return_value
def set_noreturn_error(name):
set_comp_error(context, CompileError("NoReturnValue",
"'%s' cannot have return value" % name,
lisn["locinfo"]))
native_literal = python_native_literal(name)
if native_literal is not None:
return expr_conclusion(native_literal)
if name == "pass":
if use_return_value:
return stmt_result_conclusion([PyPass()], PyLiteral(None))
else:
return stmt_conclusion([PyPass()])
control_stmt = python_control_name(name)
if control_stmt is not None:
if use_return_value:
set_noreturn_error(name)
return error_conclusion()
else:
return stmt_conclusion([control_stmt])
if not hasattr(lisn, "meta_id"):
if context.comp_env.has_name(name):
name_id, info = context.comp_env.lookup_name(name)
else:
set_comp_error(context,
CompileError("UnboundVariable",
"Name '%s' is not found"%name,
lisn["locinfo"]))
return error_conclusion()
else:
name_id = lisn["meta_id"]
info = context.comp_env.get_id_info(name_id)
if info.is_var() or info.is_global_scope_var():
return expr_conclusion(PyMetaID(name_id))
elif info.is_runtime_extern():
return expr_conclusion(PyAttrAccess(PyMetaID(context.runtime_obj_id), name))
elif info.is_expander() or info.is_converter():
err_obj = CompileError("IllegalName",
repr(info) + " cannot be used as a variable",
lisn["locinfo"])
set_comp_error(context, err_obj)
return error_conclusion()
else:
NOT_REACHABLE()
@node_translator.add.literal
def nt_literal(translator, lisn, premise, context):
literal_type = lisn["literal_type"]
if literal_type == "string":
return expr_conclusion(PyLiteral(lisn["content"]))
elif literal_type == "integer":
return expr_conclusion(PyLiteral(int(lisn["content"])))
elif literal_type == "float":
return expr_conclusion(PyLiteral(float(lisn["content"])))
else:
NOT_REACHABLE()
@node_translator.add.binop
def nt_binop(translator, lisn, premise, context):
# && || -> and or
if lisn["op"] == "&&":
pyop = "and"
elif lisn["op"] == "||":
pyop = "or"
else:
pyop = lisn["op"]
def integrate_result(lhs_result, rhs_result):
return PyBinop(pyop, lhs_result, rhs_result)
lhs_conclusion = translator(lisn["lhs"], Premise(True), context)
rhs_conclusion = translator(lisn["rhs"], Premise(True), context)
return integrate_conclusion(context.comp_env, integrate_result, lhs_conclusion, rhs_conclusion)
@node_translator.add.unop
def nt_unop(translator, lisn, premise, context):
# ! -> not
if lisn["op"] == "!":
pyop = "not"
else:
pyop = lisn["op"]
param_conclusion = translator(lisn["param"], Premise(True), context)
return integrate_conclusion(context.comp_env,
lambda param_expr: PyUnop(pyop, param_expr),
param_conclusion)
@node_translator.add.assign
def nt_assign(translator, lisn, premise, context):
op = lisn["op"]
lvalue_type = lisn["lvalue_type"]
param = lisn["param"]
param_concl = translator(param, Premise(True), context)
if lvalue_type == "name":
lvalue_name = lisn["lvalue_name"]
if python_native_literal(lvalue_name) or \
python_control_name(lvalue_name):
set_comp_error(context,
CompileError("IllegalAssignName",
"cannot assign to %s"%lvalue_name,
lisn["locinfo"]))
return error_conclusion()
local_id = ensure_local_var_name(context.comp_env, lvalue_name)
return integrate_conclusion(context.comp_env,
lambda param_expr: \
(PyAssignmentToName(PyMetaID(local_id),
param_expr),
PyLiteral(None)),
param_concl)
elif lvalue_type == "attr":
lvalue_name = lisn["lvalue_name"]
lvalue_scope = lisn["lvalue_scope"]
scope_concl = translator(lvalue_scope,
Premise(True),
context)
return integrate_conclusion(context.comp_env,
lambda param_expr, scope_expr: \
(PyAssignmentToAttr(scope_expr,
lvalue_name,
param_expr),
PyLiteral(None)),
param_concl,
scope_concl)
elif lvalue_type == "array":
lvalue_scope = lisn["lvalue_scope"]
lvalue_index = lisn["lvalue_index"]
scope_concl = translator(lvalue_scope,
Premise(True),
context)
index_concl = translator(lvalue_index,
Premise(True),
context)
return integrate_conclusion(context.comp_env,
lambda param_expr, scope_expr, index_expr: \
(PyAssignmentToItem(scope_expr,
index_expr,
param_expr),
PyLiteral(None)),
param_concl,
scope_concl,
index_concl)
else:
NOT_REACHABLE()
@node_translator.add.suite
def nt_suite(translator, lisn, premise, context):
return translate_suite(translator, lisn, premise, context)
@node_translator.add.xexpr
def nt_xexpr(translator, lisn, premise, context):
# 1. check if multi name is def, import, import_from
# 2. lookahead name part to check if name is expander/converter
# 3. lastly treat label as just function
if lisn["has_head_label"]:
head_label = lisn["head_label"]
if head_label == "def":
return translate_def(translator, lisn, Premise(False), context)
elif head_label == "import":
return translate_import(translator, lisn, Premise(False), context)
elif head_label == "import_from":
return translate_import_from(translator, lisn, Premise(False), context)
elif head_label == "pyimport":
return translate_pyimport(translator, lisn, Premise(False), context)
elif head_label == "pyimport_from":
return translate_pyimport_from(translator, lisn, Premise(False), context)
else:
set_comp_error(context,
CompileError(
"UnknownHeadLabel",
"Unknown head label: %s"%head_label,
lisn["locinfo"]))
return error_conclusion()
else:
head_expr = lisn["head_expr"]
head_expr_name = force_name(head_expr)
arg_info = lisn["arg_info"]
success = True
# lookahead
if head_expr_name is not None and \
context.comp_env.has_name(head_expr_name):
_, info = context.comp_env.lookup_name(head_expr_name)
if info.is_expander():
return translator(info.expand(translator,
lisn,
premise,
context),
premise,
context)
elif info.is_converter():
return info.convert(translator, lisn, premise, context)
applicant_concl = translator(head_expr, Premise(True), context)
parg_concls = [translator(parg, Premise(True), context) for parg in arg_info["pargs"]]
karg_keywords = [k for k, _ in arg_info["kargs"]]
karg_concls = [translator(karg, Premise(True), context) for _, karg in arg_info["kargs"]]
star_concl = translator(arg_info["star"],
Premise(True),
context) if arg_info["has_star"] else None
dstar_concl = translator(arg_info["dstar"],
Premise(True),
context) if arg_info["has_dstar"] else None
amp_concl = translator(arg_info["amp"],
Premise(True),
context) if arg_info["has_amp"] else None
damp_concl = translator(arg_info["damp"],
Premise(True),
context) if arg_info["has_damp"] else None
# function-style xexpr
varg_concls = []
vattr_concls = []
vattr_keywords = []
if lisn["has_vert_suite"]:
for obj in lisn["vert_suite"]["exprs"]:
param = obj["param"]
concl = translator(param,
Premise(True),
context)
if obj["is_arrow"]:
label = obj["arrow_lstring"]
vattr_keywords.append(label)
vattr_concls.append(concl)
else:
varg_concls.append(concl)
if not success:
return error_conclusion()
def integrator(callee_expr, parg_exprs, karg_exprs, star_expr, dstar_expr,
amp_expr, damp_expr, varg_exprs, vattr_exprs):
tuple_maker_id, _ = context.comp_env.lookup_global_name("tuple")
dict_maker_id, _ = context.comp_env.lookup_global_name("dict")
real_kargs = zip(karg_keywords, karg_exprs)
preseq_stmts = []
if amp_expr:
amp_tup = PyCall(PyMetaID(tuple_maker_id), [amp_expr], None)
if varg_exprs:
varg_tup = PyTupleExpr(varg_exprs)
if vattr_exprs:
vattr_dict = PyCall(PyMetaID(dict_maker_id), [], zip(vattr_keywords, vattr_exprs))
if amp_expr is None and varg_exprs:
real_kargs.append(("__varg__", varg_tup))
elif amp_expr is not None and not varg_exprs:
real_kargs.append(("__varg__", amp_tup))
elif amp_expr is not None and varg_exprs:
real_kargs.append(("__varg__",
PyBinop("+",
amp_tup,
varg_tup)))
if damp_expr is None and vattr_exprs:
real_kargs.append(("__vattr__", vattr_dict))
elif damp_expr is not None and not vattr_exprs:
real_kargs.append(("__vattr__", damp_expr))
elif damp_expr is not None and vattr_exprs:
imd_id = context.comp_env.issue_local_immediate()
preseq_stmts.append(PyAssignmentToName(PyMetaID(imd_id), damp_expr))
preseq_stmts.append(PyExprStmt(PyCall(PyAttrAccess(PyMetaID(imd_id),
"update"),
[vattr_dict],
None)))
real_kargs.append(("__vattr__", PyMetaID(imd_id)))
result_expr = PyCall(callee_expr,
parg_exprs,
real_kargs,
star_expr,
dstar_expr)
return (preseq_stmts, result_expr)
return xintegrate_conclusion(context.comp_env,
integrator,
applicant_concl,
parg_concls,
karg_concls,
star_concl,
dstar_concl,
amp_concl,
damp_concl,
varg_concls,
vattr_concls)
@LISNPattern
def branch_pat(case, default):
'''
Returns -
(success, failure_reason, cond_pairs, else_pair_or_None)
'''
@case
def f(predicates, consequents, **kwds):
'''
NAME$head>
__kleene_plus__(predicates): $expr
--
__kleene_plus__(consequents): $expr
'''
predicates = [d["expr"] for d in predicates]
consequents = [d["expr"] for d in consequents]
if len(predicates) == len(consequents):
return (True, "", zip(predicates, consequents), None)
elif len(predicates) + 1 == len(consequents):
return (True, "", zip(predicates, consequents[:-1]), consequents[-1])
else:
conseq_cnt = len(consequents)
return (False, "Number of predicates is expected to be %d or %d" % (conseq_cnt - 1, conseq_cnt),
[], None)
@default
def el():
return (False, "Bad Form", [], None)
def translate_branch(translator, lisn, premise, context):
use_return_value = premise.use_return_value
if use_return_value:
result_id = context.comp_env.issue_local_immediate()
else:
result_id = None
def conclusion_to_stmts(concl):
if use_return_value:
return concl.preseq_stmts + \
stmtify_expr(concl.result_expr,
True,
result_id)
elif concl.has_result():
if concl.is_pure_expr() or \
concl.result_expr.may_have_side_effect():
return stmtify_expr(concl.result_expr, False, None)
else:
return concl.preseq_stmts + \
stmtify_expr(concl.result_expr, False, None)
else:
return concl.preseq_stmts
def set_branch_error(lisn, msg):
set_comp_error(context,
CompileError("Branch",
msg,
lisn["locinfo"]))
success, error_reason, cond_pairs, else_lisn = branch_pat(lisn)
if not success:
set_branch_error(lisn, error_reason)
return error_conclusion()
preseq_stmts = []
first = True
success = True
if_stmt = PyIfStmt(None)
iter_if_stmt = if_stmt
for pred, conseq in cond_pairs:
cur_success = True
pred_concl = translator(pred, Premise(True), context)
conseq_concl = translator(conseq, premise.copy(), context)
if pred_concl.error_occurred():
success = False
cur_success = False
elif conseq_concl.error_occurred():
success = False
cur_success = False
if cur_success:
if iter_if_stmt.if_pair is None:
preseq_stmts.extend(pred_concl.preseq_stmts)
iter_if_stmt.if_pair = (pred_concl.result_expr,
conclusion_to_stmts(conseq_concl))
else:
if pred_concl.is_pure_expr():
iter_if_stmt.elif_pairs.append(
(pred_concl.result_expr,
conclusion_to_stmts(conseq_concl)))
else:
iter_if_stmt.else_stmt_list.extend(pred_concl.preseq_stmts)
nested_if_stmt = PyIfStmt((
pred_concl.result_expr,
conclusion_to_stmts(conseq_concl)))
iter_if_stmt.else_stmt_list.append(nested_if_stmt)
iter_if_stmt = nested_if_stmt
if else_lisn:
else_concl = translator(else_lisn, premise.copy(), context)
if else_concl.error_occurred():
success = False
else:
else_concl = expr_conclusion(PyLiteral(None))
if not success:
return error_conclusion()
iter_if_stmt.else_stmt_list = conclusion_to_stmts(else_concl)
preseq_stmts.append(if_stmt)
if use_return_value:
return stmt_result_conclusion(preseq_stmts,
PyMetaID(result_id))
else:
return stmt_conclusion(preseq_stmts)
@LISNPattern
def cond_case_pat(case, default):
'''
Returns -
(success, failure_reason, cond_pairs, else_pair_or_None)
'''
@case
def f(cases, else_opt):
'''
cond:
__kleene_star__(cases):
case($case_expr):
__kleene_plus__(case_body): $expr
__optional__(else_opt):
else:
__kleene_plus__(else_body): $expr
'''
cases = [(case_obj["case_expr"],
[d["expr"] for d in case_obj["case_body"]])
for case_obj in cases]
if else_opt:
else_body = [d["expr"] for d in else_opt["else_body"]]
else:
else_body = None
return (True, "", cases, else_body)
@default
def el():
return (False, "Bad Form", None, None)
def is_def_node(node):
return check_multi_xexpr(node, "def")
def xtranslate_seq(translator, node_list, premise, context):
use_return_value = premise.use_return_value
success = True
concls = []
if node_list:
last_node = node_list[-1]
for node in node_list:
child_premise = Premise(use_return_value and node is last_node)
if is_def_node(node):
def_name = force_name_to_head_expr(node)
if def_name is None:
set_comp_error(context,
CompileError("DefName",
"Name of def node is not appropriate",
node["locinfo"]))
success = False
else:
function_id = ensure_function_name(context.comp_env, def_name)
child_premise.prebound_id = function_id # HACK?
@delay(node, child_premise, context)
def translation_promise(node, child_premise, context):
return translator(node, child_premise, context)
concls.append(translation_promise)
else:
concls.append(translator(node, child_premise, context))
# force evaluation of translation of defs
for idx in range(len(concls)):
concl = concls[idx]
if is_delayed(concl):
concls[idx] = concl.force()
if not success:
return error_conclusion()
if use_return_value:
return seq_conclusion(concls)
else:
return noreturn_conclusion(concls)
else:
# empty suite
if use_return_value:
return expr_conclusion(PyLiteral(None))
else:
return stmt_conclusion(PyPass())
def ltranslate_in_app_order(translator, node_list, context):
'''
Returns -
(success,
Pre-sequential stmts,
list of expr )
'''
preseq_stmts = []
result_exprs = []
success = True
for node in node_list:
concl = translator(node, Premise(True), context)
if concl.error_occurred():
success = False
continue
preseq_stmts.extend(concl.preseq_stmts)
if concl.has_result() and concl.result_expr.may_have_side_effect():
imd_id = context.comp_env.issue_local_immediate()
preseq_stmts.append(PyAssignmentToName(PyMetaID(imd_id), concl.result_expr))
result_exprs.append(PyMetaID(imd_id))
else:
result_exprs.append(concl.result_expr)
return (success, preseq_stmts, result_exprs)
def translate_suite(translator, suite, premise, context):
node_list = suite_to_node_list(suite)
return xtranslate_seq(translator, node_list, premise, context)
def translate_def(translator, lisn, premise, context):
assert is_def_node(lisn)
def gather_formal_info():
arg_info = lisn["arg_info"]
parg_strs = []
kwd_pairs = []
star_str = None
dstar_str = None
argument_test_success = True
for parg_node in arg_info["pargs"]:
parg_str = force_name(parg_node)
if parg_str is None:
argument_test_success = False
else:
parg_strs.append(parg_str)
for karg_str, karg_val_node in arg_info["kargs"]:
kwd_pairs.append((karg_str, karg_val_node))
if arg_info["has_star"]:
star_str = force_name(arg_info["star"])
if star_str is None:
set_comp_error(context,
CompileError("FormalArgument",
"Invalid star formal argument",
lisn["head_expr"]["locinfo"]))
argument_test_success = False
if arg_info["has_dstar"]:
dstar_str = force_name(arg_info["dstar"])
if dstar_str is None:
set_comp_error(context,
CompileError("FormalArgument",
"Invalid double-star formal argument",
lisn["head_expr"]["locinfo"]))
argument_test_success = False
if arg_info["has_damp"] or arg_info["has_amp"]:
set_comp_error(context,
CompileError("NotSupported",
"& or && argument is not supported",
lisn["head_expr"]["locinfo"]))
argument_test_success = False
def_name = force_name(lisn["head_expr"])
if def_name is None:
set_comp_error(context,
CompileError("DefName",
"Name of def node is not appropriate",
lisn["head_expr"]["locinfo"]))
argument_test_success = False
if argument_test_success:
return (def_name, parg_strs, kwd_pairs, star_str, dstar_str)
else:
return None
def make_defun(def_xexpr, def_id, parg_ids, karg_id_pairs, star_id, dstar_id):
'''
Return -
PyDefun
'''
if def_xexpr["has_vert_suite"]:
def_stmts = []
suite = def_xexpr["vert_suite"]
concl = translator(suite, Premise(True), context)
def_stmts = concl.preseq_stmts
def_stmts.append(PyReturn(concl.result_expr))
else:
def_stmts = [PyPass()]
return PyDefun(def_id,
parg_ids,
karg_id_pairs,
def_stmts,
star_id,
dstar_id)
formal_info = gather_formal_info()
if formal_info is None:
return error_conclusion()
# def_name: string
# parg_strs: string list
# kwd_pairs: (string, node) list
def_name, parg_strs, kwd_pairs, star_str, dstar_str = formal_info
keywords = [k for k, _ in kwd_pairs]
kw_concls = [translator(knode, Premise(True), context)
for _, knode in kwd_pairs]
kw_success, preseq_stmts, karg_default_exprs, _ = integrate_list(context.comp_env, kw_concls)
if not kw_success:
return error_conclusion()
prebound_id = premise.prebound_id if hasattr(premise, "prebound_id") else None
if prebound_id is None:
function_id = ensure_function_name(context.comp_env, def_name) # function name
else:
assert context.comp_env.has_local_name(def_name, recursive=False)
_id, info = context.comp_env.lookup_name(def_name)
assert _id == prebound_id
assert info.is_var()
function_id = prebound_id # name is already bound to
# formal arguments duplication check
name_set = set([])
no_duplication_found = True
for name in parg_strs + \
keywords + \
([star_str] if star_str else []) + \
([dstar_str] if dstar_str else []):
if name not in name_set:
name_set.add(name)
else:
no_duplication_found = False
set_comp_error(context,
CompileError("FormalArgument",
"Duplicated name of formal argument: %s"%name,
lisn["locinfo"]))
if not no_duplication_found:
return error_conclusion()
## NEW ENV
context.comp_env.setup_local_frame("def")
# set names of arguments into new local environment
parg_ids = []
karg_id_pairs = []
for name in parg_strs:
parg_ids.append(PyMetaID(ensure_local_arg_name(context.comp_env, name)))
for name, kexpr in zip(keywords, karg_default_exprs):
k_id = ensure_local_arg_name(context.comp_env, name)
karg_id_pairs.append((PyMetaID(k_id), kexpr))
star_id = None
dstar_id = None
if star_str:
star_id = PyMetaID(ensure_local_arg_name(context.comp_env, star_str))
if dstar_str:
dstar_id = PyMetaID(ensure_local_arg_name(context.comp_env, dstar_str))
defun = make_defun(lisn,
PyMetaID(function_id),
parg_ids,
karg_id_pairs,
star_id,
dstar_id)
## DEL ENV
context.comp_env.contract_local_frame()
if premise.use_return_value:
return stmt_result_conclusion(preseq_stmts + [defun], PyLiteral(None))
else:
return stmt_conclusion(preseq_stmts + [defun])
def translate_let(translator, lisn, premise, context):
if lisn["has_vert_suite"]:
arg_info = lisn["arg_info"]
# TODO: syntax checking ( keyword duplication, prohbiting star&dstar argument)
kargs = arg_info["kargs"]
keywords = [k for k, _ in kargs]
concls = [translator(node, Premise(True), context)
for _, node in kargs]
success, preseq_stmts, let_exprs, _ = integrate_list(context.comp_env, concls)
context.comp_env.setup_local_frame("let")
for name, expr in zip(keywords, let_exprs):
let_id = ensure_local_var_name(context.comp_env, name)
preseq_stmts += stmtify_expr(expr, True, let_id)
suite_result = translator(lisn["vert_suite"], premise.copy(), context)
if suite_result.error_occurred():
success = False
result_expr = None
else:
preseq_stmts += suite_result.preseq_stmts
if suite_result.has_result():
result_expr = suite_result.result_expr
else:
result_expr = PyLiteral(None)
context.comp_env.contract_local_frame()
if not success:
return error_conclusion()
elif premise.use_return_value:
return stmt_result_conclusion(preseq_stmts, result_expr)
else:
if result_expr.may_have_side_effect():
preseq_stmts += stmtify_expr(result_expr, False, None)
return stmt_conclusion(preseq_stmts)
else:
return expr_conclusion(PyLiteral(None))
def translate_seq(translator, lisn, premise, context):
if lisn["has_vert_suite"]:
return translator(lisn["vert_suite"], premise.copy(), context)
else:
return expr_conclusion(PyLiteral(None))
@LISNPattern
def lambda_pat(case, default):
'''
(success, failure_reason, parg, karg, star, dstar, body)
'''
@case
def lam(pargs, karg, star, dstar, body):
'''
lambda>
kleene_star(parg): NAME$name
keyword -> dict(karg)
*__optional__(star): NAME$name
**__optional__(dstar): NAME$name
--
__kleene_plus__(body): $expr
'''
parg = [x["name"] for x in parg]
karg = karg["__rest__"]
star = star["name"] if star else None
dstar = dstar["name"] if dstar else None
body = [x["expr"] for x in body]
return (True, "", parg, karg, star, dstar, body)
@default
def el():
return (False, "Bad form", None, None, None, None, [])
def translate_lambda(translator, lisn, premise, context):
# pure expr -> use function
# expr with pre-sequential stmts -> def
# TODO
pass
@LISNPattern
def for_pat(case, default):
@case
def fr(elem, iterable, opt, body, **kwds):
'''
NAME$for>
NAME$elem
keyword -> seq:
__optional__(opt):
index -> NAME$index_name
in -> $iterable
--
__kleene_plus__(body): $expr
'''
index = opt["index_name"] if opt else None
body = [x["expr"] for x in body]
return (True, "", elem, index, iterable, body)
@case
def fr2(elems, opt, body, iterable, **kwds):
'''
NAME$for>
_>
__kleene_plus__(elems): $elem
keyword -> seq:
__optional__(opt):
index -> NAME$index_name
in -> $iterable
--
__kleene_plus__(body): $expr
'''
raise Exception
elem = [s["elem"] for s in elems]
index = opt["index_name"] if opt else None
body = [x["expr"] for x in body]
return (True, "", elem, index, iterable, body)
@default
def el():
return (False, "Bad form", None, None, None, None)
def _translate_iter_head(translator, lisn, premise, context,
body_kont, error_handler):
success, failure_reason, elem, index, iterable, body = for_pat(lisn)
if not success:
error_handler(failure_reason)
return error_conclusion()
enumerate_id, _ = context.comp_env.lookup_global_name("enumerate")
iterable_concl = translator(iterable, Premise(True), context)
preseq_stmts = iterable_concl.preseq_stmts
iterable_expr = iterable_concl.result_expr
elem_obj = None
index_id = None
if isinstance(elem, tuple):
elem_ids = [PyMetaID(ensure_local_var_name(context.comp_env, x))
for x in elem]
if index is not None:
index_id = ensure_local_var_name(context.comp_env, index)
elem_ids.insert(0, PyMetaID(index_id))
iterable_expr = PyCall(PyMetaID(enumerate_id), [iterable_expr], None)
elem_obj = PyTupleExpr(elem_ids)
else:
elem_id = ensure_local_var_name(context.comp_env, elem)
if index is not None:
index_id = ensure_local_var_name(context.comp_env, index)
elem_obj = PyTupleExpr([PyMetaID(index_id), PyMetaID(elem_id)])
iterable_expr = PyCall(PyMetaID(enumerate_id), [iterable_expr], None)
else:
elem_obj = PyMetaID(elem_id)
return body_kont(preseq_stmts, elem_obj, iterable_expr, body)
def translate_for(translator, lisn, premise, context):
use_return_value = premise.use_return_value
if use_return_value:
result_id = context.comp_env.issue_local_immediate()
else:
result_id = None
def kont(head_preseq_stmts, elem_obj, iterable_expr, body):
body_concl = xtranslate_seq(translator,
body,
premise.copy(),
context)
stmts = head_preseq_stmts
body_stmts = body_concl.preseq_stmts
body_result_expr = body_concl.result_expr
if use_return_value:
body_stmts.append(PyExprStmt(PyCall(PyAttrAccess(PyMetaID(result_id), "append"),
[body_result_expr],
None)))
stmts.append(PyForStmt(elem_obj, iterable_expr, body_stmts))
if use_return_value:
stmts.insert(0, PyAssignmentToName(PyMetaID(result_id), PyLiteral([])))
return stmt_result_conclusion(stmts, PyMetaID(result_id))
else:
return stmt_conclusion(stmts)
def error_handler(reason):
set_comp_error(context,
CompileError("for",
reason,
lisn["locinfo"]))
return _translate_iter_head(translator, lisn, premise.copy(), context,
kont, error_handler)
def translate_each(translator, lisn, premise, context):
use_return_value = premise.use_return_value
if use_return_value:
result_id = context.comp_env.issue_local_immediate()
else:
result_id = None
def kont(head_preseq_stmts, elem_obj, iterable_expr, body):
success, body_stmts, result_exprs = ltranslate_in_app_order(translator,
body,
context)
if not success:
return error_conclusion()
stmts = head_preseq_stmts
if use_return_value:
body_stmts.append(PyExprStmt(PyCall(PyAttrAccess(PyMetaID(result_id), "extend"),
[PyListExpr(result_exprs)],
None)))
stmts.append(PyForStmt(elem_obj,
iterable_expr,
body_stmts))
if use_return_value:
stmts.insert(0, PyAssignmentToName(PyMetaID(result_id), PyLiteral([])))
return stmt_result_conclusion(stmts, PyMetaID(result_id))
else:
return stmt_conclusion(stmts)
def error_handler(reason):
set_comp_error(context,
CompileError("each",
reason,
lisn["locinfo"]))
return _translate_iter_head(translator, lisn, premise.copy(), context,
kont, error_handler)
@LISNPattern
def html_node_pat(case, default):
@case
def cond(html_node, attr, updater, body):
'''
NAME$html_node>
keyword -> dict(attr)
**__optional__(updater): $expr
--
__kleene_star__(body): $expr
'''
tag_name = html_node
attr = attr["__rest__"]
body = [x["expr"] for x in body]
if updater:
updater = updater["expr"]
return (True, "", tag_name, attr, body, updater)
@default
def el():
return (False, "Bad Form", None, None, None, None)
HTML_TAGPOOL_NAME = "__html__"
def translate_html_node(translator, lisn, premise, context):
tagpool_id, _ = context.comp_env.lookup_global_name(HTML_TAGPOOL_NAME)
success, failure_reason, tag_name, attr, body, updater = html_node_pat(lisn)
if not success:
set_comp_error(context,
CompileError("HtmlNode",
failure_reason,
lisn["locinfo"]))
return error_conclusion()
attr_pairs = attr.items()
attr_keys = [k for k, _ in attr_pairs]
success, stmts, param_exprs = \
ltranslate_in_app_order(translator,
[v for _, v in attr_pairs] +
body +
([updater] if updater else []),
context)
attr_exprs = param_exprs[:len(attr_pairs)]
body_exprs = param_exprs[len(attr_pairs):-1] if updater else param_exprs[len(attr_pairs):]
updater_expr = param_exprs[-1] if updater else None
dict_expr = PyDictExpr(dict(zip(map(PyLiteral, attr_keys), attr_exprs)))
attr_expr = None
if updater is None:
attr_expr = dict_expr
else:
local_imd = context.comp_env.issue_local_immediate()
stmts.extend(stmtify_expr(dict_expr, True, local_imd))
stmts.extend(stmtify_expr(PyCall(PyAttrAccess(PyMetaID(local_imd), "update"),
[updater_expr], []),
False))
attr_expr = PyMetaID(local_imd)
caller_pargs = [attr_expr]
caller_pargs.extend(body_exprs)
mk = PyCall(PyAttrAccess(PyMetaID(tagpool_id),
tag_name),
caller_pargs,
None)
return stmt_result_conclusion(stmts, mk)
def _make_import_accessor(context, names):
return PyCall(PyMetaID(context.rt_store.importer_id), map(PyLiteral, names), None)
def translate_import(translator, lisn, premise, context):
assert check_multi_xexpr(lisn, "import")
head_node = lisn["head_expr"]
names = force_dotted_name(head_node)
if names is None:
set_comp_error(context,
CompileError("IllegalImportName",
"Not appropriate import name",
head_node["locinfo"]))
return error_conclusion()
else:
last_name = names[-1]
accessor = _make_import_accessor(context, names)
module_id = ensure_local_var_name(context.comp_env, last_name)
assign = PyAssignmentToName(PyMetaID(module_id), accessor)
return stmt_conclusion([assign])
def set_error(context, lisn, _type, name):
set_comp_error(context, CompileError(_type, name, lisn["locinfo"]))
def translate_pyimport(translator, lisn, premise, context):
head_node = lisn["head_expr"]
names = force_dotted_name(head_node)
last_name = names[-1]
if names is None:
set_error(context, head_node, "IllegalPyImportName", "Not appropriate pyimport name")
return error_conclusion()
if lisn["has_vert_suite"]:
set_error(context, head_node, "IllegalPyImportForm", "No vertical body expected for pyimport")
return error_conclusion()
preseq_stmts = []
importee_id = ensure_local_var_name(context.comp_env, last_name)
return stmt_result_conclusion([PyImportStmt(names, PyMetaID(importee_id))], PyLiteral(None))
@LISNPattern
def pyimport_from_pat(case, default):
@case
def c1(_from, imported_names):
'''
pyimport_from $_from:
__kleene_plus__(imported_names):
__or__(opt):
NAME$src_name
NAME$src_name -> NAME$dest_name
'''
names = force_dotted_name(_from)
if names is None:
return (False, "Bad pyimport name", None, None)
result = []
for x in imported_names:
import_obj = x["opt"]
if "dest_name" in import_obj:
result.append((import_obj["src_name"], import_obj["dest_name"]))
else:
result.append(import_obj["src_name"])
return (True, "", names, imported_names)
@default
def d():
return (False, "Bad form", None, None)
def translate_pyimport_from(translator, lisn, premise, context):
success, failure_reason, module_names, dest_name_or_pairs = pyimport_from_pat(lisn)
if not success:
set_error(context, lisn, "PyImportFrom", failure_reason)
return error_conclusion()
new_dnp = []
for dp in dest_name_or_pairs:
if isinstance(dp, tuple):
src, dest = dp
dest_id = ensure_local_var_name(context.comp_env, dest)
new_dnp.append((src, PyMetaID(dest_id)))
else:
dest_id = ensure_local_var_name(context.comp_env, dp)
new_dnp.append((dp, PyMetaID(dest_id)))
return stmt_result_conclusion([PyImportFromStmt(module_names, new_dnp)],
PyLiteral(None))
def translate_import_from(translator, lisn, premise, context):
assert check_multi_xexpr(lisn, "import_from")
def extract_import_names(suite):
'''
Returns -
(success?, NAME_OR_NAME_PAIR list)
where NAME_OR_NAME_PAIR is (string | (string, string)) list
'''
success = True
result = []
for obj in suite["exprs"]:
if obj["param"]["type"] != "name":
set_comp_error(context,
CompileError("IllegalImportName",
"Invalid import name",
suite["param"]["locinfo"]))
success = False
continue
dest_name = obj["param"]["name"]
if obj["is_arrow"]:
source_name = obj["arrow_lstring"]
result.append((source_name, dest_name))
else:
result.append(dest_name)
return (success, result)
head_node = lisn["head_expr"]
import_name_valid, import_names = extract_import_names(lisn["vert_suite"])
if not import_name_valid:
return error_conclusion()
module_names = force_dotted_name(head_node)
if module_names is None:
set_comp_error(context,
CompileError("IllegalImportName",
"Not appropriate import name",
head_node["locinfo"]))
return error_conclusion()
else:
last_name = module_names[-1]
accessor = _make_import_accessor(context, module_names)
module_id = context.comp_env.issue_local_immediate()
stmts = [PyAssignmentToName(PyMetaID(module_id), accessor)]
for name_or_name_pair in import_names:
if isinstance(name_or_name_pair, tuple):
source_name, dest_name = name_or_name_pair
dest_id = ensure_local_var_name(context.comp_env, dest_name)
stmts.append(PyAssignmentToName(PyMetaID(dest_id), PyAttrAccess(PyMetaID(module_id), source_name)))
else:
dest_name = name_or_name_pair
dest_id = ensure_local_var_name(context.comp_env, dest_name)
stmts.append(PyAssignmentToName(PyMetaID(dest_id), PyAttrAccess(PyMetaID(module_id), dest_name)))
return stmt_conclusion(stmts)
@LISNPattern
def raise_pat(case, default):
@case
def c1(expr):
'''
raise ($expr)
'''
return expr
@case
def c2(obj):
'''
raise()
'''
return None
@default
def d():
return False
def translate_raise(translator, lisn, premise, context):
throwee = raise_pat(lisn)
if throwee == False:
set_error(context, lisn, "Raise", "Bad Form")
return error_conclusion()
else:
if throwee:
concl = translator(throwee,
Premise(True),
context)
else:
concl = None
return xintegrate_conclusion(context.comp_env,
lambda expr: (PyRaise(expr), None),
concl)
@LISNPattern
def try_pat(case, default):
@case
def with_fallthrough(_with, body, exc_part):
'''
try>
keyword -> dict(_with)
--
__kleene_star__(body): $expr
__kleene_star__(exc_part):
NAME$name -> $action
'''
pass
# TODO
@LISNPattern
def class_pat(case, default):
@case
def c1(parent, decls):
'''
class x>
__optional__(parent): NAME$name
--
__kleene_star__(decls): $defun
'''
pass
# TODO
@LISNPattern
def list_like_pat(case, default):
@case
def c1(decl_s, decl_star, decl_amp, decl_v, **kwds):
'''
NAME$__head__>
__kleene_star__(decl_s): $expr
*__optional__(decl_star): $expr
&__optional__(decl_amp): $expr
--
__kleene_star__(decl_v): $expr
'''
decls = [x["expr"] for x in decl_s]
if decl_star:
decls.append(decl_star["expr"])
if decl_amp:
decls.append(decl_amp["expr"])
decls.extend([x["expr"] for x in decl_v])
return decls
@default
def df():
return None
def make_list_like_translator(factory, debug_tag):
def translate_list(translator, lisn, premise, context):
decls = list_like_pat(lisn)
if decls is None:
set_error(context, lisn, debug_tag, "Bad form")
return error_conclusion()
concls = [translator(decl, Premise(True), context) for decl in decls]
success, preseq_stmts, results, _ = integrate_list(context.comp_env, concls)
if not success:
return error_conclusion()
return stmt_result_conclusion(preseq_stmts, factory(results))
return translate_list
@LISNPattern
def dict_pat(case, default):
@case
def c1(vkw1, vkw2, **kwds):
'''
NAME$__double_dollar__>
keyword -> seq:
__kleene_star__(vkw1):
NAME$key -> $value
--
__kleene_star__(vkw2):
NAME$key -> $value
'''
k = []
for x in vkw1 + vkw2:
k.append(expr_conclusion(PyLiteral(x["key"])))
k.append(x["value"])
return k
@case
def c2(kvpairs_s, kvpairs_v, **kwds):
'''
NAME$__double_dollar__>
__kleene_star__(kvpairs_s):
$key
$value
--
__kleene_star__(kvpairs_v):
$key
$value
'''
kvpairs = []
for x in kvpairs_s:
kvpairs.append(x["key"])
kvpairs.append(x["value"])
for x in kvpairs_v:
kvpairs.append(x["key"])
kvpairs.append(x["value"])
return kvpairs
@default
def df():
return None
def translate_dict_expr(translator, lisn, premise, context):
kvpairs = dict_pat(lisn)
if kvpairs is None:
set_error(context, lisn, "Dict", "Bad Form")
return error_conclusion()
kvconcls = []
for decl in kvpairs:
if isinstance(decl, Conclusion):
kvconcls.append(decl)
else:
kvconcls.append(translator(decl, Premise(True), context))
success, preseq_stmts, results, _ = integrate_list(context.comp_env, kvconcls)
if not success:
return error_conclusion()
idx = 0
_dict = {}
while idx < len(results):
_dict[results[idx]] = results[idx + 1]
idx += 2
return stmt_result_conclusion(preseq_stmts, PyDictExpr(_dict))
def add_python_native(comp_env):
glob = {}
exec("", glob)
for name in glob["__builtins__"].keys():
comp_env.add_global(name, GlobalScopeVar(name))
comp_env.add_global(name, GlobalScopeVar("print"))
def setup_base_syntax(comp_env):
add_python_native(comp_env)
comp_env.add_global("raise",
Converter(translate_raise,
"raise"))
comp_env.add_global("if",
Converter(translate_branch,
"if"))
comp_env.add_global("$if",
Converter(translate_branch,
"if"))
comp_env.add_global("let",
Converter(translate_let,
"let"))
comp_env.add_global("$let",
Converter(translate_let,
"let"))
comp_env.add_global("seq",
Converter(translate_seq,
"seq"))
comp_env.add_global("$seq",
Converter(translate_seq,
"seq"))
comp_env.add_global("for",
Converter(translate_for,
"for"))
comp_env.add_global("$for",
Converter(translate_for,
"for"))
comp_env.add_global("each",
Converter(translate_each,
"each"))
comp_env.add_global("$each",
Converter(translate_each,
"each"))
comp_env.add_global("_",
Converter(make_list_like_translator(PyTupleExpr, "Tuple"),
"_"))
comp_env.add_global("$",
Converter(make_list_like_translator(PyListExpr, "List"),
"$"))
comp_env.add_global("$$",
Converter(translate_dict_expr,
"$$"))
def setup_html_runtime(comp_env):
for tag_name in HTML_TAGS:
comp_env.add_global(tag_name,
Converter(translate_html_node,
tag_name))
comp_env.add_global("rawstring",
Converter(translate_html_node,
"rawstring"))
_PYTHON_RESERVED_WORDS = set([
'and',
'del',
'from',
'not',
'while',
'as',
'elif',
'global',
'or',
'with',
'assert',
'else',
'if',
'pass',
'yield',
'break',
'except',
'import',
'print',
'class',
'exec',
'in',
'raise',
'continue',
'finally',
'is',
'return',
'def',
'for',
'lambda',
'try'])
def is_python_reserved_word(name):
return name in _PYTHON_RESERVED_WORDS
def main_translate(suite, filename, config=None, extimport=None):
config = config or Config()
extimport = extimport or {}
comp_env = CompEnv()
# html tag pool -> "tempy.tag.TagPool"
extimport[HTML_TAGPOOL_NAME] = ("name", ("tempy.tag", "TagPool"))
setup_base_syntax(comp_env)
setup_html_runtime(comp_env)
result_stmts = []
comp_env.setup_local_frame("def")
runtime_obj_id = comp_env.add_local("__runtime__",
Var(IDHint("__runtime__",
"argument",
"local")))
importer_id = comp_env.add_local("__importer__",
Var(IDHint("__importer__",
"argument",
"local")))
line_info_id = comp_env.add_local("__line__",
Var(IDHint("__line__",
"argument",
"local")))
context = Context(comp_env,
config,
RuntimeStore(runtime_obj_id, importer_id, line_info_id),
filename)
def_stmts = []
success = True
error_flooded = False
for name_in_src, mod_obj in extimport.items():
_type, name_obj = mod_obj
_id = comp_env.add_global(name_in_src, Var(IDHint(name_in_src, "local", "local")))
if _type == "module":
def_stmts.append(PyImportStmt(name_obj, PyMetaID(_id)))
elif _type == "name":
mod_name, name_str = name_obj
def_stmts.append(PyImportFromStmt(mod_name, [(name_str, PyMetaID(_id))]))
else:
raise ValueError("%s is not appropriate type tag for dynscope value"%_type)
try:
main_concl = node_translator(suite, Premise(False), context)
if main_concl.error_occurred():
success = False
else:
def_stmts += main_concl.preseq_stmts
except NoMoreErrorAcceptable:
error_flooded = True
if context.errors:
success = False
if not success:
raise TempyCompileError(context.errors, error_flooded)
dict_sym_id, dict_sym_info = comp_env.lookup_global_name("dict")
assert dict_sym_info.is_global_scope_var()
kw_arguments = []
for local_name in comp_env.local_names():
local_id, local_info = comp_env.lookup_name(local_name)
if not local_info.is_var():
continue
elif local_name.startswith("_"):
continue
kw_arguments.append((local_name, PyMetaID(local_id)))
def_stmts.append(PyReturn(PyCall(PyMetaID(dict_sym_id),
None,
kw_arguments)))
comp_env.contract_local_frame()
def_mk_tmp = PyDefun("__tempy_main__",
[PyMetaID(runtime_obj_id),
PyMetaID(importer_id),
PyMetaID(line_info_id)],
None,
def_stmts)
result_stmts.append(def_mk_tmp)
hint_dict = comp_env.get_hint_dict()
def naive_renaming_driver(_id, local_dict):
id_hint = hint_dict[_id]
if isinstance(id_hint, IDHint):
name_source = id_hint.name_source
if name_source == "argument":
suffix = "_arg"
elif name_source == "local":
suffix = "_"
elif name_source == "immediate":
suffix = "_imd"
elif name_source == "lambda":
suffix = "_lam"
elif name_source == "function":
suffix = "_f"
else:
print "INVALID NAME SOURCE:", name_source
NOT_REACHABLE()
first_name = id_hint.original_name \
if not is_python_reserved_word(id_hint.original_name) \
else "_" + id_hint.original_name
if first_name == "":
first_name = "_"
trial_count = 0
while True:
trial_name = first_name if trial_count == 0 else (first_name + suffix + str(trial_count))
if trial_name not in local_dict or local_dict[trial_name] == _id:
local_dict[trial_name] = _id
return trial_name
trial_count += 1
NOT_REACHABLE()
else:
return id_hint
local_dict = {}
for stmt in result_stmts:
stmt.convert_meta_id(naive_renaming_driver, local_dict)
return result_stmts
def _raise_formated_syntax_error(err, filename):
if isinstance(err.args, basestring) or len(err.args) <= 1:
raise TempySyntaxError(err.args)
else:
locinfo = err.args[1]
errmsg = "%s in \"%s\" [line %d-%d, col %d-%d]"%(
err.args[0],
filename or "",
locinfo["sline"],
locinfo["eline"],
locinfo["scol"],
locinfo["ecol"] - 1,
)
raise TempySyntaxError((errmsg,))
def translate_string(s, config=None, extimport=None, filename="<string>"):
'''
Translate tempy file to python code string
Returns -
compiled source(PyStmt list)
extimport:
dict of
string(name in compiled source) to
("module", string)
("name", (string, string))
'''
try:
suite = loads(s)
except LISNSyntaxException as e:
_raise_formated_syntax_error(e, filename)
return main_translate(suite, filename, config, extimport)
def translate_file(filepath, config=None, extimport=None, filename=None):
'''
Translate tempy file to python code string
Returns -
compiled source(PyStmt list)
extimport:
dict of
string(name in compiled source) to
("module", string)
("name", (string, string))
'''
filename = filename or filepath
try:
node = loads_file(filepath)
except LISNSyntaxException as e:
_raise_formated_syntax_error(e, filename)
return main_translate(node,
filename,
config,
extimport)
def pystmts_to_string(stmts, indent=4):
return "".join(stmt.to_string(indent, 0) for stmt in stmts)
| [
"a9413miky@gmail.com"
] | a9413miky@gmail.com |
59cb555e05c2c4d20928eb4e3d6788358e3fe20f | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-dynamic-programming/number_of_matching_subsequences.py | 51e26fdfd42fb058995690655c5dc5fa2a7682fe | [] | no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 1,133 | py | # https://leetcode.com/problems/number-of-matching-subsequences
from typing import List
class Solution:
# runtime; 1704ms, 9.56%
# memory; 14MB, 100.00%
def numMatchingSubseq(self, S: str, words: List[str]) -> int:
if S is None or 0 == len(S) or words is None or 0 == len(words):
return 0
d = {}
def isSubsequence(seq, cand):
if cand in d:
return d[cand]
i, candLen = 0, len(cand)
for s in seq:
if s == cand[i]:
i += 1
if i == candLen:
d[cand] = True
return True
res = i == candLen
d[cand] = res
return res
return sum(1 if isSubsequence(S, word) else 0 for word in words)
s = Solution()
data = [('abcde', ['a', 'bb', 'acd', 'ace'], 3),
("qlhxagxdqh", ["qlhxagxdq","qlhxagxdq","lhyiftwtut","yfzwraahab"], 2),
]
for S, words, expected in data:
real = s.numMatchingSubseq(S, words)
print(f'{S} {words} expected {expected} real {real} result {expected == real}')
| [
"hyunjun.chung@agoda.com"
] | hyunjun.chung@agoda.com |
babd2372b495d88d553898f9c7c5190d72252e43 | 7bf26eb14b9f9e0f4fe6c061cbec8d245a25c50d | /9.面对对象高级编程/使用__slots__.py | f87ffc4ab281600cb0ef6f75a0d02a6ef767dd9f | [] | no_license | Cbbsdtc/Liaoxuefeng | d8a10012b0dabffef6953b6965992643e4851fad | 909cfd63d1269052139caa7694cb7d3f7f2e2301 | refs/heads/master | 2021-05-07T21:01:49.027251 | 2017-10-31T11:45:10 | 2017-10-31T11:45:10 | 108,987,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | #限制实例属性,Student实例只能含有name和age的属性
class Student(object):
__slots__ = ('name', 'age')
s = Student()
s.name = "Michael"
s.age = 25
s.score = 99
#__slot__定义的属性仅对当前类实例起作用,对继承的子类是不起作用的
class GranduateStudent(Student):
pass
g = GranduateStudent()
g.score = 9999
#除非在子类中也定义__slots__,这样
#子类实例允许定义的属性就是自身的__slots__加上父类的__slots__。
| [
"30397464+Cbbsdtc@users.noreply.github.com"
] | 30397464+Cbbsdtc@users.noreply.github.com |
6964898e391447b3d1130c2528211f4e8de6b23d | cdbf40c2de7b22a10e07a1f612cb49059a3fc858 | /main/cache.py | 970f75c7d612d8b1dcad2a10c9422a214c3a4833 | [
"MIT"
] | permissive | riccitensor/gae-init | 2e86a77e5a99a8a0ad4889a275974409945a1940 | d9061cf32a26ebc03873431838ead0b1150374b9 | refs/heads/master | 2020-12-31T03:35:49.650513 | 2014-09-15T23:45:57 | 2014-09-15T23:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # coding: utf-8
from google.appengine.api import memcache
###############################################################################
# Helpers
###############################################################################
def bump_counter(key, time=3600, limit=4):
client = memcache.Client()
for _ in range(limit):
counter = client.gets(key)
if counter is None:
client.set(key, 0, time=time)
counter = 0
if client.cas(key, counter + 1):
break
| [
"lipiridis@gmail.com"
] | lipiridis@gmail.com |
ea57a4f571f3a4a2ff78c8d35b94f5c4e1b00d3e | 3924891778857654280709f34a84e7a086257d19 | /mysite/mysite/settings.py | c57ab361fe2b59af7c8cfa02ce05f08757870f31 | [] | no_license | Sandra84-code/my-first-blog | 84e5e8fdd822144fc8b511c320822df7ced68e28 | e013f72913dd2ee3efd0e38ddacbb668e65175cc | refs/heads/master | 2020-09-11T15:13:18.610004 | 2019-11-16T14:48:34 | 2019-11-16T14:48:34 | 222,107,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,216 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.25.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w0g=4&hwt*778g8t(_o!6!$@jtgv^_0@5_!s#+fd4)6_dss-s='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"srochasa7@gmail.com"
] | srochasa7@gmail.com |
88f88994f9933a06b60464c68de100d5c21d76b0 | 24cdf62f41fa7cfa3eed963ea1bf7426b4bcbf82 | /blackjack.py | d6b6d5c2cbdc7e46d1c49449f27df14018dc7ad3 | [] | no_license | slih01/Blackjack | ba12d5bccc99722bfe260d045b404c81cfe48fc7 | 18608e86babe654b27833d3971697a0470c3bb1b | refs/heads/master | 2023-04-15T22:30:40.592675 | 2021-04-30T17:16:56 | 2021-04-30T17:16:56 | 363,210,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,658 | py | import math
import random
try:
import tkinter
except ImportError: # python 2
import Tkinter as tkinter
def load_images(card_images):
suits = ["heart", "spade", "club", "diamond"]
face_cards = ["jack", "queen", "king"]
if tkinter.TkVersion >= 8.6:
extension = "png"
else:
extension = "ppm"
for suit in suits:
for card in range(1, 11):
name = "cards\\{}_{}.{}".format(str(card), suit, extension)
image = tkinter.PhotoImage(file=name)
card_images.append((card, image))
for face_card in face_cards:
name = "cards\\{}_{}.{}".format(str(face_card), suit, extension)
image = tkinter.PhotoImage(file=name)
card_images.append((10, image))
def deal_card(frame):
# pop the next card of the top of deck
top_card = deck.pop(0)
deck.append(top_card)
tkinter.Label(frame, image=top_card[1], relief="raised").pack(side="left")
# return the cards face value:
return top_card
def score_hand(hand):
score = 0
ace = False
for card in hand:
card_value = card[0]
if card_value == 1 and not ace:
ace = True
card_value = 11
score += card_value
if score > 21 and ace:
ace = False
score -= 10
return score
def deal_dealer():
dealer_score = score_hand(dealer_hand)
while 0 < dealer_score < 17:
dealer_hand.append(deal_card(dealer_card_frame))
dealer_score = score_hand(dealer_hand)
dealer_score_label.set(dealer_score)
player_score = score_hand(player_hand)
if dealer_score >= 17:
if player_score > 21 and dealer_score > 21:
result_text.set("You both bust!")
elif player_score > 21:
result_text.set("You've bust")
elif dealer_score > 21 or player_score > dealer_score:
result_text.set("Player Wins")
elif dealer_score > player_score:
result_text.set("Dealer Wins")
else:
result_text.set("It's a tie")
def deal_player():
player_hand.append(deal_card(player_card_frame))
player_score = score_hand(player_hand)
player_score_label.set(player_score)
if player_score > 21:
result_text.set("You've bust!")
# global player_score
# global player_ace
# card_value = deal_card(player_card_frame)[0]
# if card_value == 1 and not player_ace:
# player_ace = True
# card_value = 11
# if player_score > 21 and player_ace:
# player_score -= 10
# player_ace = False
# player_score += card_value
# player_score_label.set(player_score)
# if player_score > 21:
# result_text.set("Dealer Wins")
def reset_cards():
global dealer_card_frame
global player_card_frame
global dealer_hand
global player_hand
dealer_card_frame.destroy()
dealer_card_frame = tkinter.Frame(card_frame, background="green")
dealer_card_frame.grid(row=0, column=1, sticky="ew", rowspan=2)
player_card_frame.destroy()
player_card_frame = tkinter.Frame(card_frame, background="green")
player_card_frame.grid(row=2, column=1, sticky="ew", rowspan=2)
result_text.set("")
dealer_hand = []
player_hand = []
deal_player()
dealer_hand.append(deal_card(dealer_card_frame))
dealer_score_label.set(score_hand(dealer_hand))
deal_player()
mainWindow = tkinter.Tk()
mainWindow.title("Black Jack")
mainWindow.geometry("640x480")
mainWindow.config(background="green")
result_text = tkinter.StringVar()
result = tkinter.Label(mainWindow, textvariable=result_text)
result.grid(row=0, column=0, columnspan=3)
card_frame = tkinter.Frame(mainWindow, relief="sunken", borderwidth=1, background="green")
card_frame.grid(row=1, column=0, sticky="ew", columnspan=3, rowspan=2)
dealer_score_label = tkinter.IntVar()
tkinter.Label(card_frame, text="Dealer", fg="white", background="green").grid(row=0, column=0)
tkinter.Label(card_frame, textvariable=dealer_score_label, fg="white", background="green").grid(row=1, column=0)
# embedded frame to hold the card images
dealer_card_frame = tkinter.Frame(card_frame, background="green")
dealer_card_frame.grid(row=0, column=1, sticky="ew", rowspan=2)
player_score_label = tkinter.IntVar()
tkinter.Label(card_frame, text="Player", fg="white", background="green").grid(row=2, column=0)
tkinter.Label(card_frame, textvariable=player_score_label, fg="white", background="green").grid(row=3, column=0)
player_card_frame = tkinter.Frame(card_frame, background="green")
player_card_frame.grid(row=2, column=1, sticky="ew", rowspan=2)
button_frame = tkinter.Frame(mainWindow)
button_frame.grid(row=3, column=0, columnspan=3, sticky="w")
dealer_button = tkinter.Button(button_frame, text="Dealer", relief="raised", borderwidth=1, command=deal_dealer)
dealer_button.grid(row=0, column=0, sticky="ew")
player_button = tkinter.Button(button_frame, text="Player", relief="raised", borderwidth=1, command=deal_player)
player_button.grid(row=0, column=1, sticky="ew")
reset_button = tkinter.Button(mainWindow, text="New Game", command=reset_cards)
reset_button.grid(row=4, column=0)
# load cards
cards = []
load_images(cards)
# create a new deck and shuffle
# could say deck = cards but then cards would be removed from cards. By creating a new list cards remains untouched
deck = list(cards)*3
random.shuffle(deck)
dealer_hand = []
player_hand = []
deal_player()
dealer_hand.append(deal_card(dealer_card_frame))
dealer_score_label.set(score_hand(dealer_hand))
deal_player()
mainWindow.mainloop()
| [
"darrenhazan@gmail.com"
] | darrenhazan@gmail.com |
fb35888f8746ccdfb9e117c3112b53d1ccfeff25 | 069d8e24c16f06f703ec76c92b7c99b4407e9502 | /django_project/superStore_website/apps.py | 38e3b1852a495e18850fbb12754eb10ebbb65fc6 | [] | no_license | ogarza98/superStore_CS3773 | 1defab0c29f706f0c5fd7db7b08b61bcc315f13f | c075b3cda883e1ead37bc88b2e5ef131091b54d6 | refs/heads/master | 2020-09-10T01:30:28.984713 | 2019-12-10T05:06:17 | 2019-12-10T05:06:17 | 221,615,203 | 0 | 0 | null | 2019-11-25T01:28:18 | 2019-11-14T04:58:39 | Python | UTF-8 | Python | false | false | 110 | py | from django.apps import AppConfig
class SuperstoreWebsiteConfig(AppConfig):
name = 'superStore_website'
| [
"ogarza98@live.com"
] | ogarza98@live.com |
59180aba8f1122c30d204d486e575fd87466e383 | bc601be177de0550bb45a92fd636d1d19246e444 | /python/model_metrics.py | 233188dded771234aa0fe3b91cde18379382a4b8 | [] | no_license | SivoDaskalov/network-regression | 9a8aef5e29d88945490b4bdc41b0aa583a15065c | 5714629fd07c065d277ce96c18d2d31fdc52b83b | refs/heads/master | 2021-08-28T06:14:06.282206 | 2017-12-11T09:58:24 | 2017-12-11T09:58:24 | 79,929,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,924 | py | from __future__ import division
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
from commons import load, dump
from model_fitting import full_method_list as method_order
def evaluate_model(setup, model):
y_true = setup.y_test
y_pred = model.predict(setup.x_test)
estimated_coef = model.coef_
if np.isnan(estimated_coef).any():
return [None, None, None, None, None, None]
mse = mean_squared_error(y_true=y_true, y_pred=y_pred)
n_predictors = np.count_nonzero(estimated_coef)
if setup.true_coefficients is None:
correlation = sensitivity = specificity = precision = None
else:
true_coef = setup.true_coefficients
correlation = np.corrcoef(true_coef, estimated_coef)[0, 1]
true_predictors = np.count_nonzero(true_coef)
false_positive_coef = np.count_nonzero(estimated_coef[true_predictors:])
sensitivity = np.count_nonzero(estimated_coef[:true_predictors]) / true_predictors
specificity = (len(true_coef) - true_predictors - false_positive_coef) / (len(true_coef) - true_predictors)
if np.count_nonzero(estimated_coef) == 0:
precision = 0.0
else:
precision = np.count_nonzero(estimated_coef[:true_predictors]) / np.count_nonzero(estimated_coef)
return [mse, n_predictors, correlation, sensitivity, specificity, precision]
def batch_evaluate_models(fits, filename=None):
result_fields = ["setup", "model", "mse", "predictors", "correlation", "sens", "spec", "prec", "params"]
results = []
for (setup, models) in fits:
for model_name, model in models.items():
mse, n_predictors, corr, sens, spec, prec = evaluate_model(setup, model)
params = ', '.join(['{}={}'.format(k, v) for k, v in model.params_.iteritems()])
new_row = [setup.label, model_name, mse, n_predictors, corr, sens, spec, prec, params]
results = np.append(results, new_row)
results.shape = (int(results.shape[0] / len(result_fields)), len(result_fields))
results = pd.DataFrame(data=results, columns=result_fields)
results = results.sort_values(['model', 'setup'])
if filename is None:
filename = "results/p%d" % fits[0][0].x_test.shape[1]
results.to_csv("%s.csv" % filename, sep=',')
return results
def load_results_from_csv(p):
return pd.read_csv("results/p%d.csv" % p, sep=',', index_col=0)
def summarize_results(results, filename):
subframe = pd.DataFrame(data=results, columns=['model', 'mse', "correlation", "sens", "spec", "prec"])
summary = subframe.apply(pd.to_numeric, errors='ignore').groupby('model').aggregate([np.mean, np.std])
summary = summary.reindex(method_order).dropna(axis=0, how='all')
summary.columns = [' '.join(col).strip() for col in summary.columns.values]
summary.to_csv("%s.csv" % filename, sep=',')
return summary
| [
"sivodaskalov@gmail.com"
] | sivodaskalov@gmail.com |
443393462df95206d1a8553c7a58452ab0e6ddf6 | 9279ee976a362470f11af45b3daea80707a9c9b0 | /queue_consumer.py | b466bd2264fde67eedeeda72d0f27b335790939d | [] | no_license | TomCzHen/jiandaoyun_push_tool | 78f0a6dde8d62df25fcb6b93a4ad7a5f36f34616 | ee47aac6322b872205d63bb47623c80b98bf65f0 | refs/heads/master | 2022-12-13T21:15:43.981099 | 2020-05-13T17:12:04 | 2020-05-13T17:12:04 | 162,792,140 | 5 | 3 | null | 2022-12-08T09:31:16 | 2018-12-22T07:45:25 | Python | UTF-8 | Python | false | false | 2,976 | py | from api import JianDaoYun, APIException, NetworkError, HTTPError
from time import sleep
from log import logger
from database_queue import Queue, QueueMessage, QueueException
from handlers import QueueMessageHandler
from cache import queue_cache as cache
from handlers.exceptions import InvalidPayload, SafeDataLimitException
from notifications.channels import Notification, Channel
class Consumer:
def __init__(self, queue: Queue, api: JianDaoYun, channel: Channel):
self._queue = queue
self._api = api
self._channel = channel
self._handler = QueueMessageHandler(self._api)
self.__cache = cache
def _get_message(self) -> QueueMessage:
message = self.__cache.get(f'{self._queue.name}')
if not message:
message = self._queue.dequeue_message()
self.__cache.set(f'{self._queue.name}', message)
return message
def start(self):
logger.info('Starting Queue Consumer...')
while True:
message = None
try:
message = self._get_message()
except QueueException as e:
logger.warning('获取队列消息发送错误,10 秒后重试。')
sleep(10)
if message:
try:
logger.debug(f'Message Payload : {message}')
self._handler.handle(message)
except Exception as error:
logger.info(f'推送失败:{message}')
self._handle_exception(error, message)
else:
self.__cache.delete(f'{self._queue.name}')
logger.info(f'推送成功:{message}')
else:
sleep(3)
def _handle_exception(self, error, message=None):
err_msg = None
try:
raise error
except InvalidPayload:
err_msg = '无效的消息 Payload。'
logger.warning(err_msg)
self.__cache.delete(f'{self._queue.name}')
except SafeDataLimitException as e:
err_msg = '匹配表单数据超出安全限制。'
logger.warning(err_msg)
self.__cache.delete(f'{self._queue.name}')
except NetworkError as e:
err_msg = '请求 API 发生连接错误,请检查日志。'
logger.warning(err_msg)
except APIException as e:
err_msg = '请求 API 返回错误信息,请检查日志。'
logger.warning(err_msg)
sleep(1)
except HTTPError as e:
err_msg = '请求 API 返回 HTTP 错误,请检查日志。'
logger.warning(err_msg)
logger.error(e, exc_info=True)
sleep(1)
except Exception as e:
raise e
finally:
if err_msg:
notice = Notification(message=f'{err_msg}', payload=message.payload)
self._channel.send(notice)
| [
"tom.czhen@gmail.com"
] | tom.czhen@gmail.com |
36a167bf62afc3441dd8e8f603ff6f6f844c695b | 1dc1a700950cfc16eafc7ca47fa8a06fb550b8c8 | /solutions/problem56.py | b8b05a07422db388e6da43fdefee3421db84d871 | [] | no_license | MarkDunne/project-euler | 27df69c022886718e41a19c35a8687d3d375dc82 | 24bb0fa6a7f32d38f506f1636ded9330a0b73422 | refs/heads/master | 2016-09-11T13:15:47.504380 | 2012-10-18T21:11:19 | 2012-10-18T21:11:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | def digitalSum(n):
return sum(map(int, list(str(n))))
m = 0
for a in range(90, 100):
for b in range(90, 100):
s = digitalSum(pow(a, b))
if s > m:
m = s
print m | [
"mark.dunne02@gmail.com"
] | mark.dunne02@gmail.com |
5cb21fd52c38e315be7d0ac581ad00df74e77795 | 748f5e69b7cb9049c96447140dfd4f7ecc5cab28 | /Others/Others_UtilScript_Publish_FirstStep.py | 624c74765bf3a272037ef1446cf1bf2de7fc252b | [] | no_license | xtvjxk123456/scriptLib | 0587f05ae1d3fd3e1d79cacf9fd357e292c5428c | 0be8e9570838a01e192e7cdc62778f919c0a09c1 | refs/heads/master | 2020-03-21T13:43:34.046483 | 2017-06-13T06:44:15 | 2017-06-13T06:44:15 | 138,622,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # coding:utf-8
import maya.cmds as mc
def run():
oldcurrent = mc.currentTime(q=True)
import Publish_Anim_Export_AnimABC_noUI
Publish_Anim_Export_AnimABC_noUI.run()
import pixoMaya.shelf.playblast as pb
pb.run()
# ------------------------------------------
import Publish_Cam_CamPublish
Publish_Cam_CamPublish.run()
# ------------------------------------------
mc.currentTime(oldcurrent)
| [
"zhangxuqiang@antsanimation.com"
] | zhangxuqiang@antsanimation.com |
b2cf943b1a1b3a2203b6ff5a2fb39b7d71140a03 | 1d4b57049a266854d9ea289651918e1b3285c561 | /CreditCardFraudDetection/creditcarddetection.py | 23a5e71f914141e22282759b832987011b2724ed | [] | no_license | mahmoudkhodor/credit-card-fraud-detection | 832af7fafc0cecda05563175391ce3c2c9d841fc | f0ac96085856703ad6f3c938a4f154ca8979d3ce | refs/heads/master | 2020-08-29T17:23:19.816259 | 2019-11-13T18:40:15 | 2019-11-13T18:40:15 | 218,109,851 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
from sklearn.metrics import classification_report, accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
# Load the data from the csv file
data = pd.read_csv('Data/creditcard.csv')
# Just exploring the dataset
# You can see the columns name v1, v2 ... this is the result of PCA dimensionality reduction
# that was used to protect the sensitive the information in this dataset
print(data.columns)
# This will display how many transaction we have and how many columns
print(data.shape)
# This give a useful information about each colum this will give the mean, min and max
print(data.describe)
# In order to save on time and computational requirement, we are going to take
# only 10% of the dataset
data = data.sample(frac=0.1, random_state=1)
print(data.shape)
# Plot histograms of each parameter
data.hist(figsize=(20, 20))
plt.show()
# Determine the number of fraud cases in the dataset
Fraud = data[data['Class'] == 1]
Valid = data[data['Class'] == 0]
outlier_fraction = len(Fraud)/float(len(Valid))
# Correlation matrix
# This will tell us if there is any strong correlation between different variables
# in our dataset, it will tell us if we need to remove certain variables
corrmat = data.corr()
fig = plt.figure(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True)
plt.show()
# Get all the data from the dataframes
columns = data.columns.tolist()
# Filter the columns to remove data that we do not want
columns = [c for c in columns if c not in ["Class"]]
# Store the variable we'll be predecting on
target = "Class"
X = data[columns]
Y = data[target]
# Print the shapes of X and Y
print(X.shape)
print(Y.shape)
# To this point we successfully extracted and we preproccessed our data, now the good part begins
# The algorithm that being used
# Local Outlier Factor
# is usupervised outlier detection method, it calculate the anomly score
# of each sample, it measure the local deviation of density of a given sample camparin to it neighbors
# Isolation Forest
# It isolates the observations by randomly a feature and randomly and randomly select a split value
# between the max and min of the select feature
# Define a random state
state = 1
# Define the outlier methods
classifiers = {
"Isolation Forest": IsolationForest(max_samples=len(X),
contamination=outlier_fraction,
random_state=state),
"Local Outlier Factor": LocalOutlierFactor(n_neighbors=20,
contamination=outlier_fraction)
}
# Fit the model
n_outliers = len(Fraud)
for i, (clf_name, clf) in enumerate(classifiers.items()):
# Fit the data and tag the outlier
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_funciton(X)
y_pred = clf.predict(X)
# Reshape the prediction valeus to 0 for valid, 1 for fraud
y_pred[y_pred == 1] = 0
y_pred[y_pred == -1] = 1
n_errors = (y_pred != Y).sum()
# Run classification metrics
print(f'{clf_name}:{n_errors}')
print(accuracy_score(Y, y_pred))
print(classification_report(Y, y_pred))
| [
"mahmoud.khodor@hotmail.com"
] | mahmoud.khodor@hotmail.com |
6eb7a56bf2421d220640bf15f7be20b3ffb9569f | c294437db3b8163244151f7c0283647f743c7ce6 | /ex40.py | 60f0037d45c36d95c105f237079b9185d140b3e6 | [] | no_license | AVillalobos1130/helloworld | 3c511503b582c6f9532432f270f6e92817c67706 | 3a0427f58982d1d4bd1dc16a0740d3d02b6dbf12 | refs/heads/master | 2020-03-30T09:56:09.389950 | 2019-04-08T12:59:12 | 2019-04-08T12:59:12 | 151,098,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | #ex40
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print(line)
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right there"])
bulls_on_parade = Song(["They rally around tha family",
"With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song() | [
"noreply@github.com"
] | AVillalobos1130.noreply@github.com |
13380fc12fde90af704561196abaaaeb2da887fc | c57f50f01fb03eae8356efe5c9f47dc443c8fdd9 | /app/db/select_payment_participants.py | f4314aed6c3efdcf2bf7a7c26455bf7f61d1d9d7 | [
"MIT",
"Apache-2.0"
] | permissive | codelableidenvelux/agestudy | ba57d8a20f7013a2c16adbbe7c8903d2e752306c | 34fecff25683a98cc62b2075f344721dbfe78cd3 | refs/heads/master | 2023-05-13T13:32:55.911228 | 2023-03-26T15:35:01 | 2023-03-26T15:35:01 | 228,604,020 | 1 | 1 | MIT | 2023-05-01T21:19:34 | 2019-12-17T11:42:18 | HTML | UTF-8 | Python | false | false | 2,228 | py | from db.postgresql import Db
db = Db("")
import csv
import numpy as np
from datetime import datetime,timedelta
from application import check_can_collect_payment
# Get participants that participate for payment and are not withdrawn
select = 'SELECT user_id FROM session_info WHERE consent IS NULL AND user_type = 1'
participating_participants = db.execute(select, ('',),1);
participating_participants = set(np.array(participating_participants).flatten())
# get participants that have performed atleast one task
select2 = 'SELECT user_id FROM TASK_COMPLETED WHERE user_id IS NOT NULL'
participants_task_executed = db.execute(select2, ('',),1);
participants_task_executed = set(np.array(participants_task_executed).flatten())
# get date 6 months ago
date_6_months_ago = datetime.now() - timedelta(weeks=26)
len(participating_participants)
len(participants_task_executed)
intersect = participants_task_executed.intersection(participating_participants)
inter = tuple(map(int, intersect))
participants_task_executed_active = []
participants_task_executed_inactive = []
for p in inter:
select_time = f"SELECT time_exec FROM TASK_COMPLETED WHERE time_exec= (SELECT MAX(time_exec) FROM TASK_COMPLETED WHERE USER_ID = (%s));"
last_task = db.execute(select_time,(int(p),), 1)
can_collect = check_can_collect_payment(int(p))[0]
# you did a task in the last six months
if last_task[0][0] > date_6_months_ago and can_collect:
participants_task_executed_active.append(int(p))
elif can_collect:
participants_task_executed_inactive.append(int(p))
# select emails and save in csv
# copy ID from participants_task_executed_active
select_active = 'SELECT email FROM session_info WHERE user_id IN (<IDs>)'
email_adresses_a = db.execute(select_active, ("",),1)
with open("active.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(email_adresses_a)
# copy ID from participants_task_executed_inactive
select_inactive = 'SELECT email FROM session_info WHERE user_id IN (<IDs>)'
email_adresses_i = db.execute(select_inactive, ("",),1)
with open("inactive.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(email_adresses_i)
| [
"ruchella_kock@hotmail.com"
] | ruchella_kock@hotmail.com |
32b125c421cc16caea121a8692ce403e096ba7ff | 3f4b535e537666b669b4cfbfca05c7529d2fb631 | /Algorithms/generate_palindromes_recursion.py | 4b79f741f374bae1608a3aa0949e2745f2171c29 | [] | no_license | iliachigogidze/Python | ded0a78a1751a536fcdf1fd864fc296ef52f6164 | 6db759b3ee4f4b866421b2cb3a775b7aec32b0c9 | refs/heads/master | 2020-04-09T08:15:07.107069 | 2019-03-11T10:35:23 | 2019-03-11T10:35:23 | 160,187,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | '''
18. Given a list of distinct letters and a positive number N, generate all possible palindromes
having the length not greater than N. Single word can be used multiple times per palindrome. Example: ['a', 'b'], N=3 a aa aaa b bb bbb aba bab
'''
def main(ls, n):
print('List: ', ls)
word = 'a' * n
return recursion(word, ls)
def recursion(word,ls):
if len(word) == 0: return ['']
elif len(word) == 1: return [c for c in ls]
else:
word = word[1:-1]
word = recursion(word,ls)
palindromes = []
for w in word:
palindromes.append(w)
for c in ls:
if c is w:
palindromes.append(c+w)
palindromes.append(c+w+c)
return palindromes
print(main(['a','b'], 4)) | [
"iliachigogidze@gmail.com"
] | iliachigogidze@gmail.com |
6ad5bd1c9e925b4867fdf55aa7e9a60dc3e8c366 | e75a1edbd1544dad8584dbc18d23d34ae8177a92 | /src/3_examples/Structural/facade.py | 1274460c81c2fb5875ead403917be47ab0c093be | [
"MIT"
] | permissive | andres925922/Python-Design-Patterns | 66b84347d61c11107211fcb32aae4046f1c7b1bd | 2a30e24e062bc623e390f8ced9c228c3ff038e54 | refs/heads/master | 2023-07-05T12:26:42.928351 | 2021-04-14T20:22:30 | 2021-04-14T20:22:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | # ------------------------------------------------------------------------------
# Facade Implementation.
#--------------------------------------------------------------------------------
# Provide a unified interface to a set of interfaces in a subsystem.
# Façade defines a higher-level interface that makes the subsystem easier to use.
# -------------------------------------------------------------------------------
class ResourceManager:
def __init__(self):
print("Update all resources.")
def update_resources(self):
self.reportDb = ReportDb()
self.reportDb.update()
self.intranetDb = InntranetDb()
self.intranetDb.save()
self.site = Site()
self.site.update()
self.Api = Api()
self.Api.post()
class ReportDb:
def __init__(self):
print ("ReportDb called.")
def update(self):
print("ReportDb updated.")
class InntranetDb:
def __init__(self):
print("InntranetDb called.")
def save(self):
print("InntranetDb updated.")
class Site:
def __init__(self):
print("SiteDb called.")
def update(self):
print("Site updated.")
class Api:
def __init__(self):
print("Api called.")
def post(self):
print("Api post called.")
# ------------
# CLient code
# ------------
resource_manager = ResourceManager()
resource_manager.update_resources()
| [
"tomas.ftn.e2@gmail.com"
] | tomas.ftn.e2@gmail.com |
9baf2660cc9fa21d1bb8c3a35013e783982f39b3 | de78652f2d804755b19a8bd9402e4a649d5ff1c1 | /delira/models/segmentation/__init__.py | 1b951f9e63b3e5cdd9e6ad222050d82d02d7299e | [
"BSD-2-Clause"
] | permissive | NKPmedia/delira | a3e63adbfb82e7d4c80571e66f33c5afea43ab9b | a10227e30c14c6507a1790813e53572e0d841c21 | refs/heads/master | 2020-06-05T06:41:57.120344 | 2019-06-17T13:16:32 | 2019-06-17T13:16:32 | 192,347,832 | 0 | 0 | BSD-2-Clause | 2019-06-17T13:16:33 | 2019-06-17T13:00:27 | Python | UTF-8 | Python | false | false | 114 | py | from delira import get_backends
if "TORCH" in get_backends():
from .unet import UNet2dPyTorch, UNet3dPyTorch
| [
"justus.schock@rwth-aachen.de"
] | justus.schock@rwth-aachen.de |
85c201f5f5de17ab3e1365558c6188fbb929728d | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/841/841.keys-and-rooms.py | 0656b2ebe9f552d97e1b6e8129c65305972f48bf | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 1,525 | py | #
# @lc app=leetcode id=841 lang=python3
#
# [841] Keys and Rooms
#
# https://leetcode.com/problems/keys-and-rooms/description/
#
# algorithms
# Medium (58.74%)
# Total Accepted: 24.4K
# Total Submissions: 41.6K
# Testcase Example: '[[1],[2],[3],[]]'
#
# There are N rooms and you start in room 0. Each room has a distinct number
# in 0, 1, 2, ..., N-1, and each room may have some keys to access the next
# room.
#
# Formally, each room i has a list of keys rooms[i], and each key rooms[i][j]
# is an integer in [0, 1, ..., N-1] where N = rooms.length. A key rooms[i][j]
# = v opens the room with number v.
#
# Initially, all the rooms start locked (except for room 0).
#
# You can walk back and forth between rooms freely.
#
# Return true if and only if you can enter every room.
#
#
#
#
# Example 1:
#
#
# Input: [[1],[2],[3],[]]
# Output: true
# Explanation:
# We start in room 0, and pick up key 1.
# We then go to room 1, and pick up key 2.
# We then go to room 2, and pick up key 3.
# We then go to room 3. Since we were able to go to every room, we return
# true.
#
#
# Example 2:
#
#
# Input: [[1,3],[3,0,1],[2],[0]]
# Output: false
# Explanation: We can't enter the room with number 2.
#
#
# Note:
#
#
# 1 <= rooms.length <= 1000
# 0 <= rooms[i].length <= 1000
# The number of keys in all rooms combined is at most 3000.
#
#
#
class Solution:
def canVisitAllRooms(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: bool
"""
| [
"frankie.y.liu@gmail.com"
] | frankie.y.liu@gmail.com |
10ee337d3ea9fb8d713f7d5c3029ba3abc6161dd | 52e8c8adafba3f6f01a84a116de2804d22e2dbfb | /moving_objects.py | 9fbceb963a541e92ee041fc36c47e7a06593a4eb | [] | no_license | sajalasati/Mario-Game | d5bea29a62d31bc5ee5fc50db3ccf436658d6fbc | 89f24c03135cf6cbe457e887fd9107a7666f1e75 | refs/heads/master | 2020-04-16T04:55:49.609943 | 2019-01-11T18:40:30 | 2019-01-11T18:40:30 | 165,286,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | '''module containing all the moving objects'''
import numpy as np
class Bridge:
'''bridge object in the game'''
def __init__(self, pos):
self.posx = pos[1]
self.posy = pos[0]
self.maxup = 17
self.maxdown = 27
self.length = 14
self._fig = np.empty(14, dtype='str')
self._fig[:] = '@'
self.go_up = True
def move_up(self, game_board):
'''to move the bridge upwards'''
if self.go_up is True:
if self.posy != self.maxup:
game_board.set_range_board(
self.posy, self.posy+1, self.posx, self.posx+self.length, " ")
self.posy -= 1
game_board.set_range_board(
self.posy, self.posy+1, self.posx, self.posx+self.length, "@")
else:
self.go_up = False
self.move_down(game_board)
else:
return
def move_down(self, game_board):
'''to move the bridge downwards'''
if self.go_up is False:
if self.posy != self.maxdown:
game_board.set_range_board(
self.posy, self.posy+1, self.posx, self.posx+self.length, " ")
self.posy += 1
game_board.set_range_board(
self.posy, self.posy+1, self.posx, self.posx+self.length, "@")
else:
self.go_up = True
self.move_up(game_board)
else:
return
def move(self, game_board):
'''board oscillates between initial posy and maxup'''
if self.go_up is True:
self.move_up(game_board)
else:
self.move_down(game_board)
class Bullets:
'''anyone among mario,boss enemy can create bullets from here'''
def __init__(self, shape, posx, posy, vel):
self.posx = posx
self.posy = posy
self.velocity = vel
self.range = [self.posx - 30, self.posx+30]
# bullets deactivate after 30 steps
self.life = 1
self.shape = shape # an ascii or unicode character
def move(self):
self.posx += self.velocity
| [
"sajalasati1998@gmail.com"
] | sajalasati1998@gmail.com |
0ba08eb41cfb2e76bd9b6aa1e05cd05ecd2ebf2b | 9047aec2400933376e71fdc24d087d2ad35b4d45 | /flipAndInvertImage_832.py | 0145883c6e9c652019e80f3433fce0bc3df0edd1 | [] | no_license | sasankyadavalli/leetcode | a8c3a4b63970cfa67a8bbec5d1fb7cca818f7ea9 | 555931bc5a74e0031726070be90c945da9cb3251 | refs/heads/master | 2021-02-07T12:12:06.938562 | 2020-07-28T18:25:10 | 2020-07-28T18:25:10 | 244,024,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | class Solution:
def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:
for i in range(len(A)):
A[i] = A[i][::-1]
for j in range(len(A[i])):
A[i][j] ^= 1
return A | [
"yadavallisasank@gmail.com"
] | yadavallisasank@gmail.com |
e42cdade24c0c2eda91ac53ae694452712ab52a0 | f5e3f3ff8cc870c001b89ec1190739c2cf5f4add | /mayuri_knn_kmeans.py | 283cd20366e388ed673e4587eee710d8761d5e2e | [] | no_license | Mayuri-Wad-012447851/Job-Search-Agent-using-Knn-classifier | b7af41c7d3caa3d6473f57a631da35f52a389ac8 | 03deabee8266d6e80ee6dd7114fa6ed851d302ed | refs/heads/master | 2021-07-23T21:31:49.771286 | 2017-11-03T03:28:23 | 2017-11-03T03:28:23 | 109,347,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,412 | py | from Environment import *
from Agent import *
import datetime
def isvalidKeyword(keyword):
for word in keyword.split():
if not word.isalpha():
return False
return True
def invalidKvalue(Kstr):
if Kstr.isdigit():
return False
else:
return True
def invalidNoOfClusters(num):
if num.isdigit():
return False
else:
return True
def main():
#creating objects for Environment, Agent and WebScrapper class
env = Environment()
agent = Agent()
webscrapper = WebScrapper()
#loop for agent to run persistently until quit signal is received
while(True):
#storing timestamps for lookup table prebuild
#If current date does not match with Lookup table build date, lookuptable will get rebuild
#In this case lookup table will be built once everyday
date = datetime.date.today()
if date != env.lookupTableBuildDate:
print "\nBuilding Lookup Table...This may take a few minutes to complete"
env.lookupTable.clear()
#env.prebuildLookupTable(agent,webscrapper)
env.lookupTableBuildDate = date
print "\nPlease enter 1 to search and 2 to cluster:\t"
print "\n1. Search for jobs:\t"
print "\n2. Cluster jobs\t"
print "\n3. Quit"
choice = raw_input("\nType your option : \t").strip()
if choice == "3":
exit(0)
if choice == "1":
keyword = raw_input("\n\nEnter keywords for the Job:\t").lower().strip()
if (isvalidKeyword(keyword) == False):
print "\nInvalid keyword entered. Please enter english words to search."
continue
Kstr = raw_input("\nEnter value of K for Knn:\t").strip()
if (invalidKvalue(Kstr)):
print "\nInvalid value for K. Please enter positive integer value."
continue
K = int(Kstr)
print "\n\nYou entered: \"" + str(keyword) + "\" and K = " + str(K)
print 'Fetching jobs..'
if K <= 15 and (keyword in env.lookupTable.keys()):
print "\nKeywords exists in lookup table. Fetching from lookup table..."
outputJobs = env.getJobsFromLookup(keyword)
for job in outputJobs[0:K]:
job[0].printDetails()
else:
print "\nKeyword not found in lookup table. Fetching from web..."
outputJobs = env.scrapeWeb(agent,webscrapper, keyword, max(K, 15))
env.lookupTable[keyword] = outputJobs[0:15]
for job in outputJobs[0:K]:
job[0].printDetails()
elif choice == "2":
keyword = raw_input("\n\nEnter keywords for the Job:\t").lower().strip()
if (isvalidKeyword(keyword) == False):
print "\nInvalid keyword entered. Please enter english words to search."
continue
noOfCluster = raw_input('\nEnter number of clusters:\t').strip()
if(invalidNoOfClusters(noOfCluster)):
print "\nInvalid number entered. Please enter positive integer value."
continue
noOfClusters = int(noOfCluster)
if noOfClusters > 15 or noOfClusters <= 0:
print "\nCapping number of clusters to 15."
noOfClusters = 15
print '\nClustering jobs'
# If keyword already exists in lookup table, relevant knn jobs are fetched from lookup table
if (keyword in env.lookupTable.keys()) and (noOfClusters <= 15):
print "\nKeywords exists in lookup table. Fetching from lookup table..."
outputJobs = env.getJobsFromLookup(keyword)
env.cluster(agent,outputJobs, noOfClusters)
else:
print "\nKeyword not found in lookup table. Fetching from web..."
# If keyword not present in lookup table, jobs will be fetched from three websites
outputJobs = env.scrapeWeb(agent,webscrapper, keyword, 15)
env.cluster(agent,outputJobs, noOfClusters)
else:
print '\nYou entered values other than 1, 2 or 3. Please try again.'
continue
print "\nYou chose to Quit. Hopefully, you enjoyed my service"
if __name__ == '__main__':
main() | [
"mayuri-wad-012447851@users.noreply.github.com"
] | mayuri-wad-012447851@users.noreply.github.com |
ef87c46af4b8211114110561f07c8943a438dc8f | 6b477048cc0f665b3e21a4c54f2fe9bd29977cda | /World_geopdas.py | fbf051fa22f224970894bc0987ae355016599e5a | [] | no_license | pirate777333/COVID_19_GEOF | 3e345af8d580db58aef0ac87bec267d46e7aaefb | 84dd66ea4a67dcd7c61cdd1211a88c8a3f1deee4 | refs/heads/main | 2023-04-29T06:35:12.696045 | 2021-05-22T15:44:49 | 2021-05-22T15:44:49 | 332,896,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
df=pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
df=df.groupby("Country/Region").sum()
df=df.drop(columns=["Lat","Long"])
df_t=df.T
world_map=gpd.read_file("C:/Users/Josko/Desktop/GeoViz/esej/World_Map.shp")
world_map.replace("Viet Nam","Vietnam",inplace=True)
world_map.replace("Brunei Darussalam","Brunei",inplace=True)
world_map.replace("Cape Verde","Cabo Verde",inplace=True)
world_map.replace("Democratic Republic of the Congo","Congo (Kinshasa)",inplace=True)
world_map.replace("Congo","Congo (Brazzaville)",inplace=True)
world_map.replace("Czech Republic","Czechia",inplace=True)
world_map.replace("Swaziland","Eswatini",inplace=True)
world_map.replace("Iran (Islamic Republic of)","Iran",inplace=True)
world_map.replace("Korea, Republic of","Korea, South",inplace=True)
world_map.replace("Lao People's Democratic Republic","Laos",inplace=True)
world_map.replace("Libyan Arab Jamahiriya","Libya",inplace=True)
world_map.replace("Republic of Moldova","Moldova",inplace=True)
world_map.replace("The former Yugoslav Republic of Macedonia","North Macedonia",inplace=True)
world_map.replace("Syrian Arab Republic","Syria",inplace=True)
world_map.replace("Taiwan","Taiwan*",inplace=True)
world_map.replace("United Republic of Tanzania","Tanzania",inplace=True)
world_map.replace("United States","US",inplace=True)
world_map.replace("Palestine","West Bank and Gaza",inplace=True)
merge=world_map.join(df,on="NAME", how="right")
ax=merge.plot(column="12/10/20",
cmap="OrRd",
figsize=(15,15),
legend=True,
scheme="user_defined",
classification_kwds={"bins":[500,1000,10000,50000,100000,500000,1000000,5000000]},
edgecolor="black",
linewidth=0.5)
ax.set_title("Total Confirmed COVID-19 Cases", fontdict={'fontsize':20})
ax.set_axis_off()
ax.get_legend().set_bbox_to_anchor((0.18, 0.6))
plt.show()
| [
"noreply@github.com"
] | pirate777333.noreply@github.com |
d6013d41b72bed841984038973a1389b8a8908ec | 3b6b37de4dfd1dbdcdc4cbe6939f3b0857909b76 | /test/test_app.py | 79b306b56cc6bbfd4e153efd3e8cfd68f17f5f52 | [
"Unlicense"
] | permissive | plocandido/codeshow-flask | d49ebd473a9ef37e32f3748d7b64f620b420fc1e | 25f28d157b02dc49b72a9df9ca93f498fbd017f8 | refs/heads/master | 2023-02-09T14:18:44.402610 | 2021-01-07T05:00:30 | 2021-01-07T05:00:30 | 282,460,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | def test_app_is_created(app):
assert app.name == 'delivery.app'
def test_config_is_loaded(config):
assert config['DEBUG'] is False
def test_request_return_404(client):
assert client.get("/url_que_nao_existe").status_code == 404 | [
"pcandido@outlook.com"
] | pcandido@outlook.com |
5cf98feda027d11e03eb834da33abdfd572aba07 | 9af364af59511b0df435b8914b226d4f5eebe4fa | /source/models/ClassTransformerModel.py | 64c24a7224dfe6a93e37c6ae2a2e17ea599d73cc | [] | no_license | sudnya/bert-word-embeddings | e7ed3b09bc4db1a9ff92784cac2856f74a2ef5f7 | 09f2d913f6b32101a43c1da84adfe47205824f6d | refs/heads/master | 2022-12-04T18:19:32.495031 | 2019-08-03T07:09:34 | 2019-08-03T07:09:34 | 170,256,409 | 0 | 0 | null | 2022-09-23T22:21:45 | 2019-02-12T05:14:49 | Python | UTF-8 | Python | false | false | 39,880 | py |
import logging
import numpy
import os
import json
import shutil
import time
import math
import humanize
import tensorflow as tf
from models.Vocab import Vocab
from models.ModelDescriptionCheckpointer import ModelDescriptionCheckpointer
logger = logging.getLogger(__name__)
"""Implements a class based transformer model using Tensorflow"""
class ClassTransformerModel:
def __init__(self, config, trainingDataSource, validationDataSource):
"""Initializes the model.
Attributes:
config: The configuration for the model.
trainingDataSource: list of training samples and labels
validationDataSource: list of validation samples and labels
"""
self.config = config
self.trainingDataSource = trainingDataSource
self.validationDataSource = validationDataSource
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
self.checkpointer = ModelDescriptionCheckpointer(config, self.__class__.__name__)
self.isLoaded = False
self.bestValidationLoss = None
def train(self):
"""Trains the model.
Trains the model for epochs specified in the config.
Runs the validation dataset on the model if specified in the config.
"""
with self.graph.as_default():
self.getOrLoadModel()
for epoch in range(self.getEpochs()):
self.runOnTrainingDataset(epoch)
if self.shouldRunValidation():
self.runOnValidationDataset(epoch)
self.validationDataSource.reset()
self.checkpointBestModel()
self.trainingDataSource.reset()
def checkpointBestModel(self):
if self.bestValidationLoss is None:
self.checkpoint("best")
return
if self.totalLoss < self.bestValidationLoss:
logger.info("Updating best model with loss: " + str(self.totalLoss))
self.bestValidationLoss = self.totalLoss
self.checkpoint("best")
else:
self.checkpoint("checkpoint")
def predict(self, inputs, requestedPredictions):
with self.graph.as_default():
self.getOrLoadModel()
assert False, "Not Implemented"
inputs = numpy.array(inputs)
predictions = self.session.run(self.outputProbabilities,
feed_dict={self.inputTokens : inputs})
batchSize = requestedPredictions.shape[0]
length = requestedPredictions.shape[1]
outputPredictions = numpy.zeros(requestedPredictions.shape)
for b in range(batchSize):
for l in range(length):
outputPredictions[b,l,:] = \
predictions[b,l,requestedPredictions[b,l,:]]
return outputPredictions
def getFeatures(self, inputs, secondInputs):
with self.graph.as_default():
self.getOrLoadModel()
inputs = numpy.expand_dims(numpy.array(inputs), axis=2)
secondInputs = numpy.expand_dims(numpy.array(secondInputs), axis=2)
inputs = numpy.concatenate([inputs, secondInputs], axis=2)
# (batch, sequence, 2, embedding-size)
predictions = self.session.run(self.features,
feed_dict={self.inputTokens : inputs})
return predictions[:, :, 0, :]
def getOrLoadModel(self):
"""Returns a linear model.
If specified, create a new model else load an already existing model.
"""
if self.isLoaded:
return
self.vocab = Vocab(self.config)
shouldCreate = not os.path.exists(
self.checkpointer.getModelLoadDirectory()) or self.getShouldCreateModel()
if shouldCreate:
self.createModel()
else:
self.loadModel()
self.logModel()
def logModel(self):
totalParameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variableParameters = 1
for dim in shape:
variableParameters *= dim.value
totalParameters += variableParameters
logger.debug("Variable '" + variable.name + "' " +
str(humanize.naturalsize(variableParameters)) + " (params) " +
str(shape) + " (dims)")
logger.debug("Total #params '" + str(humanize.naturalsize(totalParameters)) + "' ")
def loadModel(self):
"""Loads an already existing model from the specified path """
self.checkpointer.load()
directory = self.checkpointer.getModelLoadDirectory()
logger.debug("Loading checkpoint from: " + str(directory))
tf.saved_model.loader.load(
self.session,
["serve"],
directory
)
self.setOperationsByName()
self.isLoaded = True
def setOperationsByName(self):
self.inputTokens = self.graph.get_tensor_by_name("input-tokens:0")
self.labels = self.graph.get_tensor_by_name("output-labels:0")
self.features = self.graph.get_tensor_by_name("features:0")
self.vocabLoss = self.graph.get_tensor_by_name("vocab-loss:0")
self.classificationLoss = self.graph.get_tensor_by_name("classification-loss:0")
self.classLoss = self.graph.get_tensor_by_name("class-loss:0")
self.outputProbabilities = self.graph.get_tensor_by_name("output-probabilities:0")
self.outputDocumentClass = self.graph.get_tensor_by_name("output-document-class:0")
self.loss = self.graph.get_tensor_by_name("loss:0")
self.optimizerStep = self.graph.get_operation_by_name("optimizer-step")
def createModel(self):
# inputs (batch, sequence-length, 2)
self.inputTokens = tf.placeholder(tf.int32, shape=(None, None, 2),
name="input-tokens")
# labels (batch, sequence-length, 2)
self.labels = tf.placeholder(tf.int32, shape=(None, None, 2),
name="output-labels")
self.createClassMappings()
# convert to classes (batch, sequence-length, 2, assignments)
self.inputClasses = self.convertToClasses(self.inputTokens)
self.classLabels = self.convertToClasses(self.labels)
# class logits (batch, sequence-length, 2, assignmets, class-size)
classLogits = self.runClassModel(self.inputClasses)
# classification logits (batch, sequence-length, 2, assignments, 2)
classificationLogits = self.runClassificationModel()
# document classification logits (batch, sequence-length, 2, assignments, 2)
documentClassificationLogits = self.runDocumentClassificationModel()
# compute the losses
self.clusterLoss = tf.identity(self.evaluateClusteringLoss(
self.features, self.classLabels), name="clustering-loss")
self.classificationLoss = tf.identity(self.evaluateClassificationLoss(
classificationLogits, self.classLabels), name="classification-loss")
self.documentClassificationLoss = tf.identity(self.evaluateDocumentClassificationLoss(
documentClassificationLogits, self.classLabels), name="document-classification-loss")
self.classLoss = tf.identity(self.evaluateLoss(classLogits[:, 1:, :, :, :],
self.classLabels[:, 1:, :, :]), name="class-loss")
self.vocabLoss = tf.identity(self.evaluateVocabLoss(classLogits[:, 1:, :, :, :],
self.labels[:, 1:, :]), name="vocab-loss")
self.loss = tf.identity(self.classLoss +
self.classificationLoss +
self.clusterLoss +
self.vocabLoss,
name="loss")
# convert to vocab logits (batch, sequence-length, vocab-size)
vocabLogits = self.expandClassLogitsToVocab(classLogits)
self.outputProbabilities = tf.nn.softmax(vocabLogits,
name="output-probabilities")
self.outputDocumentClass = tf.reduce_max(documentClassificationLogits, axis=3)
# optimizer
self.optimizerStep = self.createOptimizerStep(self.loss, "")
self.documentOptimizerStep = self.createOptimizerStep(self.documentClassificationLoss,
"document")
# initializers
self.globalInitializer = tf.global_variables_initializer()
self.localInitializer = tf.local_variables_initializer()
# summaries
self.setupSummaries()
# do the initialization
self.initializeModel()
def createClassMappings(self):
mappings = numpy.zeros([self.getAssignmentCount(), self.vocab.getSize()],
dtype=numpy.int32)
weights = numpy.zeros([self.getAssignmentCount(), self.vocab.getSize()],
dtype=numpy.float32)
for assignment in range(self.getAssignmentCount()):
mappings[assignment, :], weights[assignment, :] = self.createMapping(assignment)
self.classMappingsHost = mappings
self.classMappings = tf.constant(mappings)
self.classWeights = tf.constant(weights)
def logAdd(self, left, right):
if left is None:
return right
if left == float("-inf"):
return right
if right == float("-inf"):
return left
return max(left, right) + math.log1p(math.exp( -math.fabs(left - right)))
def logSumArray(self, array):
from functools import reduce
return reduce(lambda x, y : self.logAdd(x, y), array)
def logSubtract(self, left, right):
if left <= right:
assert False, "log of negative number in subtraction " + str(left) + " - " + str(right)
if right == float("-inf"):
return left
return left + math.log1p(-math.exp(right - left))
def createMapping(self, assignment):
assert self.getNumberOfDirectClasses() <= self.getNumberOfClasses()
assert self.getNumberOfDirectClasses() <= self.vocab.getSize()
vocabSize = self.vocab.getSize() - self.getNumberOfDirectClasses()
numberOfClasses = self.getNumberOfClasses() - self.getNumberOfDirectClasses()
directMapping = numpy.arange(self.getNumberOfDirectClasses(), dtype=numpy.int32)
directWeights = numpy.ones(self.getNumberOfDirectClasses(), dtype=numpy.float32)
mapping, weights = self.createLogMapping(assignment, vocabSize, numberOfClasses)
return (numpy.concatenate([directMapping, self.getNumberOfDirectClasses() + mapping]),
numpy.concatenate([directWeights, weights]))
def createLogMapping(self, assignment, vocabSize, numberOfClasses):
generator = numpy.random.RandomState(seed=assignment)
wordCounts = reversed([i * self.getWordFrequencyPowerLawExponent()
for i in range(vocabSize)])
wordCountsPlusRandom = [i + math.log(generator.uniform(0.0, 1000.0)) for i in wordCounts]
logTotalCount = self.logSumArray(wordCountsPlusRandom)
sortedWordCounts = sorted(enumerate(wordCountsPlusRandom), key=lambda x: x[1], reverse=True)
logClassSize = logTotalCount - math.log(numberOfClasses)
mapping = numpy.zeros([vocabSize], dtype=numpy.int32)
weights = numpy.zeros([vocabSize], dtype=numpy.float32)
currentClass = 0
wordsInCurrentClass = 0
logCurrentCount = None
for wordIndex, logWordCount in sortedWordCounts:
assert currentClass < numberOfClasses
mapping[wordIndex] = currentClass
wordsInCurrentClass += 1
logCurrentCount = self.logAdd(logCurrentCount, logWordCount)
if logCurrentCount >= logClassSize and currentClass + 1 != numberOfClasses:
#print(logCurrentCount, logWordCount, currentClass, logClassSize)
logCurrentCount = self.logSubtract(logCurrentCount, logClassSize)
wordsInCurrentClass = 0
currentClass += 1
currentClass = 0
currentClassSize = 0
currentClassMembers = []
for i, wordCountAndIndex in enumerate(sortedWordCounts):
wordIndex, wordCount = wordCountAndIndex
currentClassMembers.append(wordIndex)
currentClassSize += 1
# if end of current class
if ((1 + i) == len(sortedWordCounts) or
mapping[sortedWordCounts[1 + i][0]] != currentClass):
for memberIndex in currentClassMembers:
weights[memberIndex] = 1.0 / currentClassSize
if currentClass == 0 or i == (len(sortedWordCounts) - 1):
logger.info("current class " + str(currentClass) +
" members " + str(len(currentClassMembers)))
currentClass += 1
currentClassSize = 0
currentClassMembers = []
return mapping, weights
def initializeModel(self):
self.session.run(self.globalInitializer)
self.session.run(self.localInitializer)
def runOnTrainingDataset(self, epoch):
"""Trains the linear model on the training dataset for one epoch."""
trainStart = time.time()
totalLoss = 0.0
message = None
for step in range(self.getStepsPerEpoch()):
generatorStart = time.time()
try:
inputs, labels, secondInputs, secondLabels = self.trainingDataSource.next()
except Exception as e:
if message is None:
message = str(e)
break
generatorEnd = time.time()
trainStepStart = time.time()
loss, gradNorm = self.trainingStep(inputs, labels, secondInputs, secondLabels,
step, epoch)
trainStepEnd = time.time()
totalLoss += loss
message = ("Epoch (" + str(epoch) + " / " + str(self.getEpochs()) +
"), Step (" + str(step) + " / " + str(self.getStepsPerEpoch()) +
"), Generator time: " + ("%.2f" % (generatorEnd - generatorStart)) +
", training step time: " + ("%.2f" % (trainStepEnd -
trainStepStart) +
", loss: " + str("%.2f" % loss) +
", grad norm: " + str("%.2f" % gradNorm)) +
", avg-loss: " + str("%.2f" % (totalLoss / (step + 1))))
print(message, end="\r", flush=True)
trainEnd = time.time()
print(message)
logger.debug(" Training took: " + (str(trainEnd - trainStart)) + " seconds...")
def trainingStep(self, inputs, labels, secondInputs, secondLabels, step, epoch):
"""Training step for one minibatch of training data."""
inputs = numpy.expand_dims(numpy.array(inputs), axis=2)
labels = numpy.expand_dims(numpy.array(labels), axis=2)
secondInputs = numpy.expand_dims(numpy.array(secondInputs), axis=2)
secondLabels = numpy.expand_dims(numpy.array(secondLabels), axis=2)
inputs = numpy.concatenate([inputs, secondInputs], axis=2)
labels = numpy.concatenate([labels, secondLabels], axis=2)
if self.getShouldClassifyDocument():
optimizerStep = self.documentOptimizerStep
loss = self.documentClassificationLoss
else:
optimizerStep = self.optimizerStep
loss = self.loss
trainingLoss, gradNorm, summaries, _ = self.session.run([loss,
self.gradientNorm, self.mergedSummary, optimizerStep],
feed_dict={self.inputTokens : inputs, self.labels : labels })
if step % self.getStepsPerTensorboardLog():
self.trainingSummaryWriter.add_summary(summaries,
step + epoch * self.getStepsPerEpoch())
return trainingLoss, gradNorm
def runOnValidationDataset(self, epoch):
"""Runs the linear model on the validation dataset for one epoch."""
validationStart = time.time()
self.totalLoss = 0.0
self.totalVocabLoss = 0.0
message = None
for step in range(self.getValidationStepsPerEpoch()):
generatorStart = time.time()
try:
inputs, labels, secondInputs, secondLabels = self.validationDataSource.next()
except Exception as e:
if message is None:
message = str(e)
break
generatorEnd = time.time()
validationStepStart = time.time()
loss, vocabLoss = self.validationStep(inputs, labels, secondInputs, secondLabels)
validationStepEnd = time.time()
self.totalLoss += loss
self.totalVocabLoss += vocabLoss
message = ("Validation Step (" + str(step) + " / " +
str(self.getValidationStepsPerEpoch()) +
"), Generator time: " + ("%.2f" % (generatorEnd - generatorStart)) +
", validation step time: " + ("%.2f" % (validationStepEnd - validationStepStart)) +
", avg-loss: " + ("%.2f" % (self.totalLoss/(step + 1))))
print(message, end="\r", flush=True)
validationEnd = time.time()
print(message)
logger.debug(" Validation took: " + (str(validationEnd - validationStart)) + " seconds...")
self.addValidationSummaries(self.totalLoss, self.totalVocabLoss, epoch)
def addValidationSummaries(self, totalLoss, vocabLoss, epoch):
averageLoss = totalLoss / self.getValidationStepsPerEpoch()
summary = tf.Summary(value=[
tf.Summary.Value(tag="validation-loss", simple_value=averageLoss),
])
self.trainingSummaryWriter.add_summary(summary, epoch)
averageVocabLoss = vocabLoss / self.getValidationStepsPerEpoch()
summary = tf.Summary(value=[
tf.Summary.Value(tag="validation-vocab-cross-entropy", simple_value=averageVocabLoss),
])
self.trainingSummaryWriter.add_summary(summary, epoch)
def validationStep(self, inputs, labels, secondInputs, secondLabels):
"""One minibatch of validation data processed by the model."""
inputs = numpy.expand_dims(numpy.array(inputs), axis=2)
labels = numpy.expand_dims(numpy.array(labels), axis=2)
secondInputs = numpy.expand_dims(numpy.array(secondInputs), axis=2)
secondLabels = numpy.expand_dims(numpy.array(secondLabels), axis=2)
inputs = numpy.concatenate([inputs, secondInputs], axis=2)
labels = numpy.concatenate([labels, secondLabels], axis=2)
if self.getShouldClassifyDocument():
loss = self.documentClassificationLoss
else:
loss = self.loss
validationLoss, vocabLoss = self.session.run([loss, self.vocabLoss],
feed_dict={self.inputTokens : inputs,
self.labels : labels})
return validationLoss, vocabLoss
def createOptimizerStep(self, loss, name):
"""One step of backprop."""
optimizer = tf.train.AdamOptimizer(
learning_rate=float(self.config["model"]["learning-rate"]),
beta1=0.9,
beta2=0.98,
epsilon=10e-9,
name=name+"optimizer-step")
gradients, variables = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients,
self.config["model"]["gradient-clipping-factor"])
self.gradientNorm = tf.global_norm(gradients, name="gradient-norm")
return optimizer.apply_gradients(zip(gradients, variables))
def setupSummaries(self):
tf.summary.scalar('total-loss', self.loss)
if self.getShouldClassifyDocument():
tf.summary.scalar('document-class-cross-entropy', self.documentClassificationLoss)
else:
tf.summary.scalar('document-match-cross-entropy', self.classificationLoss)
tf.summary.scalar('vocab-cross-entropy', self.vocabLoss)
tf.summary.scalar('class-cross-entropy', self.classLoss)
tf.summary.scalar('cluster-loss', self.clusterLoss)
tf.summary.scalar('gradient-norm', self.gradientNorm)
self.mergedSummary = tf.summary.merge_all()
self.trainingSummaryWriter = tf.summary.FileWriter(
os.path.join(self.getExperimentDirectory(), 'training-summaries'),
self.graph)
#if self.shouldRunValidation():
# self.validationSummaryWriter = tf.summary.FileWriter(
# os.path.join(self.getExperimentDirectory(), 'validation-summaries'),
# self.graph)
def evaluateClusteringLoss(self, features, classLabels):
# features is [batch, sequence, 2, assignments, feature-dimension]
# class labels is [batch, sequence, 2, assignments]
assignmentLosses = []
batchSize = tf.shape(features)[0]
sequenceLength = tf.shape(features)[1]
features = tf.reshape(self.features, (batchSize, sequenceLength,
2, self.getAssignmentCount(), self.getEmbeddingSize()))
for i in range(self.getAssignmentCount()):
assignmentLosses.append(self.evaluatePerAssignmentClusterLoss(
features[:, :, :, i, :], classLabels[:, :, :, i]))
return sum(assignmentLosses) / (tf.multiply(tf.cast(batchSize, dtype=tf.float32),
2.0 * self.getAssignmentCount()))
def evaluatePerAssignmentClusterLoss(self, features, labels):
# features is [batch, sequence, 2, feature-dim]
# labels is [batch, sequence, 2]
wordFeatures = tf.reshape(features[:, 0, :, :], (-1, self.getEmbeddingSize()))
tripletLabels = tf.reshape(labels[:, 0, :], (-1, ))
return self.tripletLoss(wordFeatures, tripletLabels)
def tripletLoss(self, features, labels):
return tf.contrib.losses.metric_learning.triplet_semihard_loss(labels, features)
def evaluateClassificationLoss(self, batchOutputs, labels):
# batch outputs is [batch, assignments, 2]
# labels is [batch, sequence, 2, assignments, 1]
labels = tf.cast(tf.equal(labels[:, 0, 0, :, 0], labels[:, 0, 1, :, 0]), tf.int32)
return tf.losses.sparse_softmax_cross_entropy(
labels=labels,
logits=batchOutputs)
def evaluateDocumentClassificationLoss(self, batchOutputs, labels):
# batch outputs is [batch, 2, assignments, 2]
# labels is [batch, sequence, 2, assignments, 1]
labels = labels[:,0,:,:,0]
return tf.losses.sparse_softmax_cross_entropy(
labels=labels,
logits=batchOutputs)
def evaluateLoss(self, batchOutputs, labels):
return tf.losses.sparse_softmax_cross_entropy(
labels=labels,
logits=batchOutputs)
def klDivergence(self, a, b):
a = tf.distributions.Categorical(probs=a + numpy.finfo(float).eps)
b = tf.distributions.Categorical(probs=tf.nn.softmax(b) + numpy.finfo(float).eps)
return tf.reduce_mean(tf.distributions.kl_divergence(a, b, allow_nan_stats=False))
def convertToClasses(self, inputs):
# inputs is (batch, sequence, 2)
# class mappings is (assignments, vocab size)
# outputs is (batch, sequence, 2, assignments)
batchSize = tf.shape(inputs)[0]
sequenceLength = tf.shape(inputs)[1]
classes = tf.concat([tf.reshape(tf.gather(self.classMappings[i, :], inputs),
(batchSize, sequenceLength, 2, 1))
for i in range(self.getAssignmentCount())], axis=3)
return tf.reshape(classes, (batchSize, sequenceLength, 2, self.getAssignmentCount(), 1))
def expandClassLogitsToVocab(self, classLogits):
# class logits is (batch size, sequence-length, 2, assignments, class-size)
# class mappings is (class-assignments, vocab-size)
# class weights is (class-assignments, vocab-size)
# output is (batch-size, sequence-length, 2, vocab-size)
batchSize = tf.shape(classLogits)[0]
sequenceLength = tf.shape(classLogits)[1]
gatheredLogits = tf.concat([tf.reshape(tf.gather(classLogits[:,:,:,i,:], self.classMappings[i, :], axis=3),
(batchSize, sequenceLength, 2, 1, self.vocab.getSize()))
for i in range(self.getAssignmentCount())], axis=3)
return tf.reduce_mean(tf.multiply(gatheredLogits, self.classWeights), axis=3)
def evaluateVocabLoss(self, classLogits, vocabLabels):
# labels is (batch size, sequence-length, 2)
batchSize = tf.shape(classLogits)[0]
sequenceLength = tf.shape(classLogits)[1]
sampleCount = self.getSoftmaxSampleCount()
samples = self.generateSamples(sampleCount)
sampledLabels = tf.zeros((batchSize, sequenceLength, 2), dtype=tf.int32)
# sampled mappings is (assignment count, sample count)
sampledMappings = self.sample(self.classMappings, samples, sampleCount)
# sampled weights is (assignment count, sample count)
sampledWeights = self.sample(self.classWeights, samples, sampleCount)
# gathered logits is (batch size, sequence length, assignment count, sample count)
gatheredLogits = tf.concat([tf.reshape(tf.gather(classLogits[:,:,:,i,:], sampledMappings[i,:], axis=3),
(batchSize, sequenceLength, 2, 1, sampleCount))
for i in range(self.getAssignmentCount())], axis=3)
# gathered weights is (batch size, sequence length, 2, assignment count, sample count)
gatheredWeights = self.broadcastToExpandedDimension(sampledWeights, batchSize, sequenceLength)
# gathered logits and weights is (batch size, sequence length, 2, assignment count, sample count + 1)
gatheredLogits = self.extendLogits(gatheredLogits, classLogits, vocabLabels)
gatheredWeights = self.extendWeights(gatheredWeights, vocabLabels)
# weighted logits is (batch size, sequence length, 2, assignments, sample count + 1)
weightedLogits = tf.multiply(gatheredLogits, gatheredWeights)
# vocab logits is (batch size, sequence length, 2, sample count + 1)
vocabLogits = tf.reduce_mean(weightedLogits, axis=3)
return self.evaluateLoss(vocabLogits[:, 1:, :, :], sampledLabels[:, 1:, :])
def generateSamples(self, sampleCount):
samplesPerAssignment = []
# TODO: BUG: Dont sample the label
for assignment in range(self.getAssignmentCount()):
samples, _, _ = tf.random.uniform_candidate_sampler(
true_classes=tf.broadcast_to(tf.range(self.vocab.getSize(), dtype=tf.int64),
(1, self.vocab.getSize())),
num_true=self.vocab.getSize(),
num_sampled=sampleCount,
range_max=self.vocab.getSize(),
unique=True)
samplesPerAssignment.append(tf.reshape(samples, (1, -1)))
return tf.concat(samplesPerAssignment, axis=0)
def extendLogits(self, vocabLogits, classLogits, labels):
# class logits is (batch size, sequence length, 2, assignment count, sample count)
# map is (assignment count, vocab size)
# labels is (batch size, sequence length, 2)
batchSize = tf.shape(classLogits)[0]
sequenceLength = tf.shape(classLogits)[1]
# labelClasses is (batch size, sequence length, 2, assignment count, 1)
labelClasses = tf.concat(
[tf.reshape(tf.gather(self.classMappings[i, :], labels),
(batchSize, sequenceLength, 2, 1, 1)) for i in range(self.getAssignmentCount())],
axis=3)
# gathered logits is (batch size, sequence length, 2, assignment count, 1)
gatheredLogits = tf.batch_gather(classLogits, labelClasses)
return tf.concat([gatheredLogits, vocabLogits], axis=4)
def extendWeights(self, vocabWeights, labels):
# vocab weights is (batch size, sequence length, 2, assignment count, sample count)
# labels is (batch size, sequence length)
batchSize = tf.shape(vocabWeights)[0]
sequenceLength = tf.shape(vocabWeights)[1]
# labelWeights is (batch size, sequence length, 2, assignment count, 1)
labelWeights = tf.concat(
[tf.reshape(tf.gather(self.classWeights[i, :], labels),
(batchSize, sequenceLength, 2, 1, 1)) for i in range(self.getAssignmentCount())],
axis=3)
return tf.concat([labelWeights, vocabWeights], axis=4)
def sample(self, mappings, samples, sampleCount):
assignments = []
for i in range(self.getAssignmentCount()):
assignments.append(tf.reshape(tf.gather(mappings[i, :], samples[i,:]), (1, sampleCount)))
return tf.concat(assignments, axis=0)
def broadcastToExpandedDimension(self, tensor, batchSize, sequenceLength):
classAssignments = tensor.shape[0]
vocabSize = tensor.shape[1]
newShape = (batchSize, sequenceLength, 2, classAssignments, vocabSize)
expandedTensor = tf.broadcast_to(tensor, newShape)
#print(expandedTensor.shape)
reshapedTensor = tf.reshape(expandedTensor, newShape)
#print(reshapedTensor.shape)
return reshapedTensor
def runClassModel(self, inputs):
#print("inputs", inputs.shape)
inputEmbeddings = self.convertToEmbeddings(inputs)
#print("inputEmbeddings", inputEmbeddings.shape)
# run encoder (logits is (batch-size, sequence-length, assignments, class-count))
encodedEmbeddings = self.runEncoder(inputEmbeddings)
logits = self.runDecoder(encodedEmbeddings)
#print("logits", logits.shape)
return logits
def runClassificationModel(self):
batchSize = tf.shape(self.features)[0]
sequenceLength = tf.shape(self.features)[1]
features = tf.reshape(self.features, (batchSize, sequenceLength, 2,
self.getAssignmentCount(), self.getEmbeddingSize()))
features = self.multiheadedAttention(features)
# features is (batch-size, sequence-length, 2, assignments, embedding-size)
reducedFeatures = tf.reduce_max(features, axis=1)
# reducedFeatures is (batch size, 2, assignments, embedding-size)
transposedFeatures = tf.transpose(reducedFeatures, [0,2,1,3])
# transposedFeatures is (batch size, assignments, 2, embedding-size)
reshapedFeatures = tf.reshape(transposedFeatures, (-1, self.getAssignmentCount(),
2 * self.getEmbeddingSize()))
return tf.layers.dense(reshapedFeatures, units=2)
def runDocumentClassificationModel(self):
batchSize = tf.shape(self.features)[0]
sequenceLength = tf.shape(self.features)[1]
features = tf.reshape(self.features, (batchSize, sequenceLength, 2,
self.getAssignmentCount(), self.getEmbeddingSize()))
features = self.multiheadedAttention(features)
# features is (batch-size, sequence-length, 2, assignments, embedding-size)
reducedFeatures = tf.reduce_max(features, axis=1)
# transposedFeatures is (batch size, assignments, 2, embedding-size)
reshapedFeatures = tf.reshape(reducedFeatures, (-1, 2, self.getAssignmentCount(),
self.getEmbeddingSize()))
return tf.layers.dense(reshapedFeatures, units=2)
def convertToEmbeddings(self, sequenceIds):
assignments = []
for assignment in range(self.getAssignmentCount()):
assignments.append(self.convertToClassEmbeddings(sequenceIds, assignment))
return tf.concat(assignments, axis = 3)
def convertToClassEmbeddings(self, ids, assignment):
with tf.variable_scope("linear-embeddings", reuse=tf.AUTO_REUSE):
wordEmbeddingsGlobal = tf.get_variable('class-embeddings-' + str(assignment), \
[self.getNumberOfClasses(), self.getEmbeddingSize()])
wordEmbeddings = tf.nn.embedding_lookup(wordEmbeddingsGlobal, ids[:, :, :, assignment, :])
return wordEmbeddings
def runEncoder(self, embeddings):
return self.multiheadedAttentionStack(embeddings)
def runDecoder(self, embeddings):
batchSize = tf.shape(embeddings)[0]
sequenceLength = tf.shape(embeddings)[1]
# embeddings is (batch size, sequence length, 2, assignments, classes)
return tf.concat([tf.reshape(tf.layers.dense(embeddings[:,:,:,i,:], units=self.getNumberOfClasses()),
(batchSize, sequenceLength, 2, 1, self.getNumberOfClasses()))
for i in range(self.getAssignmentCount())], axis=3)
def multiheadedAttentionStack(self, embeddings):
embeddings = self.addPositions(embeddings)
# embeddings (batch-size, sequence-length, 2, assignments, hidden-dimension)
for layer in range(self.getNumberOfLayers()):
embeddings = self.multiheadedAttention(embeddings)
if self.isMiddleLayer(layer):
batchSize = tf.shape(embeddings)[0]
sequenceLength = tf.shape(embeddings)[1]
self.features = tf.identity(tf.reshape(embeddings, (batchSize, sequenceLength, 2,
self.getAssignmentCount() * self.getEmbeddingSize())), name="features")
return embeddings
def addPositions(self, embeddings):
batchSize = tf.shape(embeddings)[0]
sequenceLength = tf.shape(embeddings)[1]
halfSequenceLength = (sequenceLength + 1) // 2
positions = tf.cast(tf.reshape(tf.range(halfSequenceLength),
(1, halfSequenceLength, 1, 1, 1)), dtype=tf.float32)
dimensions = tf.cast(tf.reshape(tf.range(self.getEmbeddingSize()),
(1, 1, 1, 1, self.getEmbeddingSize())), dtype=tf.float32)
angles = positions / tf.pow(2.0 * tf.cast(halfSequenceLength, dtype=tf.float32),
2.0 * dimensions / self.getEmbeddingSize())
evenPositionEmbeddings = tf.reshape(tf.sin(angles),
(1, halfSequenceLength, 1, 1, 1, self.getEmbeddingSize()))
oddPositionEmbeddings = tf.reshape(tf.cos(angles),
(1, halfSequenceLength, 1, 1, 1, self.getEmbeddingSize()))
# merge them
positionEmbeddings = tf.concat([evenPositionEmbeddings, oddPositionEmbeddings], axis=2)
positionEmbeddings = tf.reshape(positionEmbeddings,
(1, 2 * halfSequenceLength, 1, 1, self.getEmbeddingSize()))
positionEmbeddings = positionEmbeddings[:, 0:sequenceLength, :, :, :]
return embeddings + positionEmbeddings
def isMiddleLayer(self, layer):
if self.getNumberOfLayers() > 1:
return layer == (self.getNumberOfLayers() - 2)
return layer == (self.getNumberOfLayers() - 1)
def multiheadedAttention(self, embeddings):
# embeddings (batch-size, sequence-length, assignments, hidden-dimension)
projectedEmbeddings = self.projectEmbeddings(embeddings)
# proj-embeddings (batch-size, sequence-length, assignments, QKV, attention-heads, hidden-dimension)
attentionOutput = self.runAttention(projectedEmbeddings)
# project back
outputEmbeddings = self.projectBackEmbeddings(attentionOutput)
# add and norm
embeddings = self.addAndNorm(outputEmbeddings, embeddings)
# dense layer
denseOutput = tf.layers.dense(embeddings,
self.getEmbeddingSize(), activation="relu")
# add and norm
denseOutput = self.addAndNorm(denseOutput, embeddings)
return denseOutput
def projectEmbeddings(self, embeddings):
output = tf.layers.dense(embeddings,
embeddings.shape[-1] * 3 * self.getNumberOfAttentionHeads())
batchSize = tf.shape(embeddings)[0]
sequenceLength = tf.shape(embeddings)[1]
assignments = embeddings.shape[3]
return tf.reshape(output,
(batchSize, sequenceLength, 2, assignments, 3,
self.getNumberOfAttentionHeads(), embeddings.shape[-1]))
def projectBackEmbeddings(self, embeddings):
# embeddings are (batch-size, sequence-length, 2, assignments, attention-heads, embedding-size)
# project to (batch-size, sequece-length, 2, assignments, embedding-size)
batchSize = tf.shape(embeddings)[0]
sequenceLength = tf.shape(embeddings)[1]
assignments = embeddings.shape[3]
reshapedEmbeddings = tf.reshape(embeddings, (batchSize, sequenceLength, 2, assignments,
embeddings.shape[-1] * embeddings.shape[-2]))
projectedEmbeddings = tf.layers.dense(reshapedEmbeddings, self.getEmbeddingSize())
return projectedEmbeddings
def addAndNorm(self, left, right):
return tf.contrib.layers.layer_norm(tf.add(left, right))
def runAttention(self, embeddings):
# Q,K,V (batch-size, sequence-length, 2, assignments, attention-heads, hidden-dimension)
Q = embeddings[:,:,:,:,0,:,:]
K = embeddings[:,:,:,:,1,:,:]
V = embeddings[:,:,:,:,2,:,:]
readOn = tf.matmul(Q, K, transpose_b=True)
scale = math.sqrt(self.getEmbeddingSize())
scaledReadOn = readOn / scale
contribution = tf.nn.softmax(scaledReadOn, axis=1)
result = tf.matmul(contribution, V)
return result
def checkpoint(self, prefix):
"""Creates a checkpoint of the current model and saves to model
directory.
"""
self.checkpointer.setPrefix(prefix)
directory = self.checkpointer.getModelSaveDirectory()
logger.debug("Saving checkpoint to: " + str(directory))
self.checkpointer.checkpoint()
with self.graph.as_default():
tf.saved_model.simple_save(self.session,
directory,
inputs={"input_text" : self.inputTokens},
outputs={"outputs" : self.outputDocumentClass})
self.checkpointer.cleanup()
"""Functions to load configuration parameters."""
def getEmbeddingSize(self):
return int(self.config["model"]["embedding-size"])
def getAssignmentCount(self):
return int(self.config["model"]["assignment-count"])
def getSoftmaxSampleCount(self):
return int(self.config["model"]["softmax-sample-count"])
def getNumberOfClasses(self):
return int(self.config["model"]["number-of-classes"])
def getNumberOfDirectClasses(self):
return int(self.config["model"]["number-of-direct-classes"])
def getNumberOfLayers(self):
return int(self.config["model"]["number-of-layers"])
def getNumberOfAttentionHeads(self):
return int(self.config["model"]["number-of-attention-heads"])
def getWordFrequencyPowerLawExponent(self):
return float(self.config["model"]["word-frequency-power-law-exponent"])
def shouldRunValidation(self):
return self.config["model"]["run-validation"]
def getEpochs(self):
return int(self.config["model"]["epochs"])
def getShouldCreateModel(self):
if not "create-new-model" in self.config["model"]:
return False
return bool(self.config["model"]["create-new-model"])
def getShouldClassifyDocument(self):
if not "classify-document" in self.config["model"]:
return False
return bool(self.config["model"]["classify-document"])
def getStepsPerEpoch(self):
return int(self.config["model"]["steps-per-epoch"])
def getStepsPerTensorboardLog(self):
return int(self.config["model"]["steps-per-tensorboard-log"])
def getValidationStepsPerEpoch(self):
return int(self.config["model"]["validation-steps-per-epoch"])
def getExperimentDirectory(self):
return self.config["model"]["directory"]
| [
"solusstultus@gmail.com"
] | solusstultus@gmail.com |
43c1a91b4e3f8f288ce744bcc1e36236880f0919 | 9268c07cd58f68fd20daf5e38880ef452eeca331 | /10219-.py | 7e62d546d53bceb63e73315a6a81a245e42f14d1 | [] | no_license | rkdr055/BOJ | bbdbdcf8a9df399c3655eea3d9f337275b1ae504 | d90465cfa80d124aba8f9572b9bd5e4bb2a62031 | refs/heads/master | 2021-04-09T14:26:04.811468 | 2018-03-18T10:05:20 | 2018-03-18T10:05:20 | 125,712,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | case=int(input())
list=[]
list2=[]
for z in range(case):
h,w=map(int,input().split())
for i in range(h):
list.append(input())
list2.append(list[i][::-1])
for i in range(len(list2)):
print(list2[i])
| [
"rkdr955@naver.com"
] | rkdr955@naver.com |
b109faeccfded125ae603f0cd2ccb693475c2cdf | 31f9333012fd7dad7b8b12c1568f59f33420b0a5 | /Alessandria/env/lib/python3.8/site-packages/django/template/utils.py | 41921a09a2c9c6ce444863547c4c915b4f03271c | [] | no_license | jcmloiacono/Django | 0c69131fae569ef8cb72b135ab81c8e957d2a640 | 20b9a4a1b655ae4b8ff2a66d50314ed9732b5110 | refs/heads/master | 2022-11-15T22:18:57.610642 | 2020-07-14T14:43:16 | 2020-07-14T14:43:16 | 255,125,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,566 | py | import functools
from collections import Counter
from pathlib import Path
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class InvalidTemplateEngineError(ImproperlyConfigured):
pass
class EngineHandler:
def __init__(self, templates=None):
"""
templates is an optional list of template engine definitions
(structured like settings.TEMPLATES).
"""
self._templates = templates
self._engines = {}
@cached_property
def templates(self):
if self._templates is None:
self._templates = settings.TEMPLATES
templates = {}
backend_names = []
for tpl in self._templates:
try:
# This will raise an exception if 'BACKEND' doesn't exist or
# isn't a string containing at least one dot.
default_name = tpl['BACKEND'].rsplit('.', 2)[-2]
except Exception:
invalid_backend = tpl.get('BACKEND', '<not defined>')
raise ImproperlyConfigured(
"Invalid BACKEND for a template engine: {}. Check "
"your TEMPLATES setting.".format(invalid_backend))
tpl = {
'NAME': default_name,
'DIRS': [],
'APP_DIRS': False,
'OPTIONS': {},
**tpl,
}
templates[tpl['NAME']] = tpl
backend_names.append(tpl['NAME'])
counts = Counter(backend_names)
duplicates = [alias for alias, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Template engine aliases aren't unique, duplicates: {}. "
"Set a unique NAME for each engine in settings.TEMPLATES."
.format(", ".join(duplicates)))
return templates
def __getitem__(self, alias):
try:
return self._engines[alias]
except KeyError:
try:
params = self.templates[alias]
except KeyError:
raise InvalidTemplateEngineError(
"Could not find config for '{}' "
"in settings.TEMPLATES".format(alias))
# If importing or initializing the backend raises an exception,
# self._engines[alias] isn't set and this code may get executed
# again, so we must preserve the original params. See #24265.
params = params.copy()
backend = params.pop('BACKEND')
engine_cls = import_string(backend)
engine = engine_cls(params)
self._engines[alias] = engine
return engine
def __iter__(self):
return iter(self.templates)
def all(self):
return [self[alias] for alias in self]
@functools.lru_cache()
def get_app_template_dirs(dirname):
"""
Return an iterable of paths of directories to load app2 templates from.
dirname is the name of the subdirectory containing templates inside
installed applications.
"""
template_dirs = [
str(Path(app_config.path) / dirname)
for app_config in apps.get_app_configs()
if app_config.path and (Path(app_config.path) / dirname).is_dir()
]
# Immutable return value because it will be cached and shared by callers.
return tuple(template_dirs)
| [
"jcmloiacono@gmail.com"
] | jcmloiacono@gmail.com |
fc78e95af6cf331f45e09bec63f4ec5467a62134 | df088d4581535b1f536c533797ec1220bb1b1cae | /greedy-coin-change.py | 296116ef0567544ff8c345b8d9fc6876ac3bc13f | [] | no_license | 20kroots/algorithm-study | f25868ad627b43207b5d92207692ba9803e7a2fa | d44618fa5e3a368274c38c90bda6b5755c0a3946 | refs/heads/main | 2023-06-03T00:36:23.721171 | 2021-06-28T20:56:02 | 2021-06-28T20:56:02 | 379,821,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | n = 1260
count = 0
coin_types = [500, 100, 50, 10]
for coin in coin_types:
count += n // coin
n %= coin
print(count)
# O(K)
| [
"86282254+20kroots@users.noreply.github.com"
] | 86282254+20kroots@users.noreply.github.com |
64b3160aa692224fc730910648c06f459dd7b7b6 | 28bf7793cde66074ac6cbe2c76df92bd4803dab9 | /answers/VanshBaijal/Day3/Question1.py | 2c1ff6a179fd4d3ee0ee7cf7dbb9dfe3fd33f32c | [
"MIT"
] | permissive | Codechef-SRM-NCR-Chapter/30-DaysOfCode-March-2021 | 2dee33e057ba22092795a6ecc6686a9d31607c9d | 66c7d85025481074c93cfda7853b145c88a30da4 | refs/heads/main | 2023-05-29T10:33:31.795738 | 2021-06-10T14:57:30 | 2021-06-10T14:57:30 | 348,153,476 | 22 | 135 | MIT | 2021-06-10T14:57:31 | 2021-03-15T23:37:26 | Java | UTF-8 | Python | false | false | 108 | py | n=int(input("Enter the number of terms:"))
s=0
x=0
for i in range(1,n+1):
x=(x*10+i)
s=s+x
print(s)
| [
"noreply@github.com"
] | Codechef-SRM-NCR-Chapter.noreply@github.com |
1caa66b3d3bedd437129cecf9005eee727b5e8c0 | b3faea51efc4528b9f171b44bafeeefa0fa9677a | /create_dirs.py | 34ebc9d0c67d15791aab225923875ddfd0c3c87f | [] | no_license | panchicore/lts | 3259ad3ec01920065e289cd166380c8dfc815cb0 | 174f5f253454f2e3cee4975c8f563aa50c3539f5 | refs/heads/master | 2020-04-09T07:00:09.693460 | 2016-06-21T22:32:57 | 2016-06-21T22:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # python create_dirs "2013-01-01 TO 2017-01-01"
import sys, os
from datetime import datetime
from dateutil.rrule import rrule, DAILY
dates = sys.argv[1]
_from = datetime.strptime(dates.split(" TO ")[0], "%Y-%m-%d").date()
_to = datetime.strptime(dates.split(" TO ")[1], "%Y-%m-%d").date()
dirs = list(rrule(freq=DAILY, dtstart=_from, until=_to))
for i, dir in enumerate(dirs):
path_dirs = os.path.join("fs", dir.strftime("%Y/%m/%d"))
print path_dirs
if not os.path.exists(path_dirs):
os.makedirs(path_dirs) | [
"thepanchi@gmail.com"
] | thepanchi@gmail.com |
39f0075dd86054fbc5cff10c3b27eef59cd6da30 | 99d6dc1d8591445cab2d7b4202e6b236c9a2160a | /V48/content/plot1.py | 06db18bef624062623b562f2730a874bffebcfab | [] | no_license | Mampfzwerg/FP | a80953237c24e24e6e6177941668eb68b70335b8 | bbc0bbf26aa58f47516729c68a0c5e49882387f0 | refs/heads/master | 2023-03-06T16:27:43.211945 | 2021-02-22T15:33:50 | 2021-02-22T15:33:50 | 267,815,264 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from scipy.optimize import curve_fit
from uncertainties import ufloat
import uncertainties.unumpy as unp
T, I = np.genfromtxt('mess1.txt', unpack=True)
T = T + 273.15
I = np.abs(I)
Tb,Ib = np.ones(7), np.ones(7)
Tb[0:6], Ib[0:6] = T[1:7], I[1:7]
Tb[6], Ib[6] = T[42], I[42]
def b(y, c, d):
return c * np.exp(d*y)
params, cov = curve_fit(b, Tb, Ib, p0=[1e-3, 1e-3])
err = np.sqrt(np.diag(cov))
z = np.linspace(T[0], T[45], 500)
plt.plot(T, I, 'x', color='#18c8fc', label='Messwerte')
plt.plot(Tb, Ib, '.', color='b', label='Untergrund-Stützwerte')
plt.plot(z, b(z, *params), 'b-', label='Untergrund')
Tx = T[20:33]
Ix = I[20:33] - b(T[20:33], *params)
print(T[20], T[33])
def f(x, a, b):
return np.exp(a/x) * b
params, cov = curve_fit(f, Tx, Ix) #, p0=[-6000, 1])
err = np.sqrt(np.diag(cov))
# Parameter
a = ufloat(params[0], err[0])
b = ufloat(params[1], err[1])
print(a, b)
kB = 8.617333262e-5
W = -a * kB
print(W)
Tmax = [T[27], T[28]]
Tmax = ufloat(np.mean(Tmax), np.std(Tmax))
#print(Tmax)
diff = np.zeros(T.size - 1)
for i in range(T.size - 1):
diff[i] = abs(T[i+1] - T[i])
H = ufloat(np.mean(diff), np.std(diff))
tau = H * a / (60 * Tmax)
tau0 = tau * unp.exp(-W/(kB * Tmax))
print(tau, tau0)
z = np.linspace(np.min(Tx), np.max(Tx), 500)
plt.plot(Tx, Ix, 'x', color='#1891fc', label='Bereinigte und approximierte Messwerte')
plt.plot(z, f(z, *params), '#1891fc', label='Exponentielle Approximation')
#plt.ylim(1e-2, 10)
#plt.yscale('log')
plt.grid(linestyle='dotted', which="both")
plt.xlabel(r'$T$/K')
plt.ylabel(r'$I$/pA')
plt.legend(loc='lower right')
plt.ylim(0, 1.5)
plt.tight_layout()
plt.savefig('plot1.pdf') | [
"karzel.marek@udo.edu"
] | karzel.marek@udo.edu |
c6b48070aef1ffc94264a24d1e8749e0afded51d | b03c84d4cc5221c4f793983aff21941e97ed437c | /5/5/5-i3.py | a402eecff2f0dc60298dc06071e43de4002ee000 | [] | no_license | shunf4-assignment/algorithm-design-assignments | 5068f57ae0fb8f2e5d733b7412a39ecd16cc579c | 1bab40df450aa55d0d4e4bf09859c4d0e0e50923 | refs/heads/master | 2020-03-12T12:26:02.888012 | 2019-01-07T17:58:41 | 2019-01-07T17:58:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,219 | py | import copy
import random
INFINITY = 9999999
class MinWeightMachine(object):
def __init__(self, costs, weights, costsLimit):
assert(len(costs) > 0)
self.componentsNum = len(costs) # 部件总数
assert(isinstance(costs[0], list))
assert(len(costs[0]) > 0)
for x in costs:
assert(len(x) == len(costs[0]))
self.providersNum = len(costs[0]) # 供应商总数
self.costs = costs # costs[c - 1][p - 1] 表示第 c 个部件从第 p 个供应商处获得所付出的价格
self.costsWhenSelectedCheapest = [min(x) for x in self.costs] # 每一个部件的最小价格
assert(len(weights) == self.componentsNum)
for x in weights:
assert(len(x) == self.providersNum)
self.weights = weights
self.weightsWhenSelectedLightest = [min(x) for x in self.weights] # 每一个部件的最轻重量
self.costsLimit = costsLimit
def startBacktrack(self):
# 回溯时使用的数组,select[c - 1] == p - 1 表示对于第 c 件部件,选择第 p 个供应商
self.select = [0] * self.componentsNum
self.currWeight = 0
self.currCosts = 0
self.currMinWeight = INFINITY
self.result = None
self._backtrack(0)
if self.result == None:
raise ValueError("Can not find solution.")
return self.result
def _backtrack(self, depth):
if depth >= self.componentsNum:
if self.currMinWeight > self.currWeight:
self.currMinWeight = self.currWeight
selectPlusOne = [x + 1 for x in self.select]
self.result = (self.currWeight, self.currCosts, selectPlusOne)
else:
# 扩展该节点,第 depth - 1 个部件可以从 providersNum 个供应商中选择
for p in range(self.providersNum):
self.select[depth] = p
self.currWeight += self.weights[depth][p]
self.currCosts += self.costs[depth][p]
# 如果此后的部件全选最便宜的,还是没法在限制之内
if self.currCosts + sum(self.costsWhenSelectedCheapest[depth + 1:]) > self.costsLimit:
pass
# 如果此后的部件全选最轻的,还是没法和当前的最小值比
elif self.currWeight + sum(self.weightsWhenSelectedLightest[depth + 1:]) > self.currMinWeight:
pass
else:
# 否则,可以开始这个子树的扩展
self._backtrack(depth + 1)
self.currWeight -= self.weights[depth][p]
self.currCosts -= self.costs[depth][p]
def main():
testTimeNo = 1000
listMaxLength = 10
maxWeight = 20
maxCost = 40
correctNo = 0
for k in range(testTimeNo):
if(k == 0):
currLen = 10
costs = [[1,2,3],[3,2,1],[2,2,2]]
weights = [[1,2,3],[3,2,1],[2,2,2]]
costsLimit = 4
else:
currLen = random.randint(0, listMaxLength) + 1
currProvider = random.randint(0, listMaxLength) + 1
weights = [[random.randint(1, maxWeight) for j in range(currProvider)] for i in range(currLen)]
costs = [[random.randint(1, maxCost) for j in range(currProvider)] for i in range(currLen)]
costsLimit = random.randint(max([min(x) for x in costs]) * currLen, maxCost * currLen)
resultWeight, resultCosts, resultSelect = MinWeightMachine(costs, weights, costsLimit).startBacktrack()
costsSum = 0
for i, p in enumerate(resultSelect):
costsSum += costs[i][p-1]
assert(costsSum <= costsLimit)
if(k == 0):
print("第一次测试题面:", "\nweights:", weights, "\ncosts:", costs, "\ncostsLimit:", costsLimit)
print("答案的总重量:", resultWeight)
print("答案的总花费:", resultCosts)
print("答案的供应商选择:", resultSelect)
print("第 {} 次测试。".format(k+1), end='\r')
print("\n运行完成。")
main() | [
"shun1048576@zoho.com"
] | shun1048576@zoho.com |
4ff0b85c92365170b0e6cbea77e7b6e628c359fa | 476682b97cdb95751651af63d00ca3f7dba8ccaa | /python/lib/sklearn/hparam-search/parallel.py | bb5f396c7294e37d9c36d430db4b403e926f1057 | [] | no_license | sundongxu/machine-learning | 6249c8f7e652157333bc43e881b3485553de528e | 44c1ef307554ae6dcfd1b7d7288564bba8c8f4a8 | refs/heads/master | 2021-01-23T22:19:21.839521 | 2017-10-14T05:23:30 | 2017-10-14T05:23:30 | 102,926,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: skip-file
from sklearn.datasets import fetch_20newsgroups
import numpy as np
news = fetch_20newsgroups(subset='all')
from sklearn.cross_validation import train_test_split
# 对前3000条新闻文本进行数据分割,25%文本用作未来测试
X_train, X_test, y_train, y_test = train_test_split(
news.data[:3000], news.target[:3000], random_state=33, test_size=0.25)
from sklearn.svm import SVC
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
# 使用Pipeline简化系统搭建流程,将文本抽取与分类器模型串联起来
clf = Pipeline([('vect', TfidfVectorizer(
stop_words='english', analyzer='word')), ('svc', SVC())])
# 这里需要试验的2个超参数的个数分别是4、3,svc_gamma的参数共有10^-2,10^-1...
# 这样一共有3*4=12种超参数的组合,12个不同参数的模型
parameters = {
'svc__gamma': np.logspace(-2, 1, 4),
'svc__C': np.logspace(-1, 1, 3)
}
# 导入网格搜索模块
from sklearn.grid_search import GridSearchCV
# 将12组参数组合以及初始化的Pipeline包括3折交叉验证要求全部告知GridSearchCV,务必注意refit = True
# 设置refit = True,那么程序将会以交叉验证训练集得到的最佳超参数,重新对所有可用的训练集与验证集进行,
# 作为最终用于评估性能参数的最佳模型的参数
# n_jobs表示使用几个CPU核执行程序,-1表示利用本机的全部CPU核心资源
gs = GridSearchCV(clf, parameters, verbose=2, refit=True, cv=3, n_jobs=-1)
# 执行单线程网格搜索
%time _ = gs.fit(X_train, y_train)
gs.best_params_, gs.best_score_
# 输出最佳模型在测试集上的准确性
print gs.score(X_test, y_test)
| [
"371211947@qq.com"
] | 371211947@qq.com |
04c660e7be6743f04c11f95012ea9978e2aaff8e | 9cb2b47243abe31fad466c8728d7d84760bcc1d2 | /settings.py | 388175c6d585abc51eca36a5e416ed37094a5087 | [
"Apache-2.0"
] | permissive | Quantum-Platinum-Cloud/appengine-guestbook-python-cloudsql | 9179b155975e66cd041fae2f2713c54b9075415d | c8dcc70f75bd6dbc37c779a8f5441fcd02bec859 | refs/heads/master | 2023-03-16T14:28:45.035571 | 2013-01-19T05:53:14 | 2013-01-19T05:53:14 | 583,510,324 | 1 | 0 | Apache-2.0 | 2022-12-30T01:52:27 | 2022-12-30T01:52:26 | null | UTF-8 | Python | false | false | 218 | py |
"""Setting file for the Cloud SQL guestbook"""
CLOUDSQL_INSTANCE = 'ReplaceWithYourInstanceName'
DATABASE_NAME = 'guestbook'
USER_NAME = 'ReplaceWithYourDatabaseUserName'
PASSWORD = 'ReplaceWithYourDatabasePassword'
| [
"tmatsuo@google.com"
] | tmatsuo@google.com |
9f5ad57c630a7a56d24f3c074fac31a76d8859e2 | c1b91dd52510b76ca85da417fbda442466e7df1f | /settings.py | 41ae640b1f5cf40170c9a3947051a293433004e3 | [] | no_license | janaya/django_smob_dummy | 4a0d8f7bfe6aa679e85cf41d9911cbd75345611d | 264500f760698c5d69454bf2abe6f5a8a2e8c1d4 | refs/heads/master | 2020-12-24T15:57:55.728360 | 2012-01-18T23:34:30 | 2012-01-18T23:34:30 | 3,146,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,169 | py | # Django settings for django_smob_dummy project.
import os.path
import logging
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = 'static'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'kabn-8h75c!))t_(jp-zp49(d!g1jj#-t4w)+gw$w5cdlcqvex'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'django_smob_dummy.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
# FIXME: hackish
os.path.join(PROJECT_ROOT, 'djsmobdummy/templates/djsmobdummy'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'djsmobdummy',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"julia.anaya@gmail.com"
] | julia.anaya@gmail.com |
b408eb1c942cd29c1efe13d3c57a2c4785cf6995 | 95cdf7753fc4022be239666a902df217a93f7125 | /dangdang/test_case/test_login_2.py | 5af74b59fe9373ec7b2a931f7cda17f213c6b17c | [] | no_license | he9mei/python_appium | ffe1b872d3732b9e8510da0dd24f7a791c534be0 | 9fc5dcb67769d2d103756b9fca82d2cfeae40e72 | refs/heads/master | 2022-06-01T03:24:36.821931 | 2022-05-22T07:55:58 | 2022-05-22T07:55:58 | 223,714,095 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,002 | py |
import os
import pytest
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from dangdang.base.base_try_2py import BasePre #导入包时应该from到py文件
class TestLogin(BasePre):
def test_01(self):
#进入登录页面
driver=self.driver
# driver.find_element_by_id("com.dangdang.reader:id/tab_personal_iv").click()
#尝试做代码与元素的分离
# by_el = By.ID("com.dangdang.reader:id/tab_personal_iv") 用法不对,这个是java中的用法
el="com.dangdang.reader:id/tab_personal_iv"
driver.find_element(By.ID,el).click()
driver.find_element_by_id("com.dangdang.reader:id/nickname_tv").click()
@pytest.mark.skip()
def test_02(self):
driver = self.driver
# 默认如果没有进入账号密码登录,先点击账号密码登录
try:
el_pw_login = driver.find_element_by_id("com.dangdang.reader:id/custom_login_tv")
if el_pw_login .is_displayed():
print("默认进入短信验证码登录,找到了账号登录按钮!")
el_pw_login .click()
# except Exception as e: #如果不打印异常会提示异常太宽泛
except NoSuchElementException:
print("可能默认就是账号密码登录!")
# print(e)
# 切换输入法
os.system("adb shell ime set io.appium.android.ime/.UnicodeIME")
el_name_input=driver.find_element_by_id("com.dangdang.reader:id/name_edit")
el_name_input.clear()
el_name_input.send_keys("18500000005")
el_pw_input = driver.find_element_by_id("com.dangdang.reader:id/password_et")
el_pw_input.clear()
el_pw_input.send_keys("111111")
# 遇到问题:输入之后,键盘没有关闭,挡住了登录按钮
driver.press_keycode(4)
driver.find_element_by_id("com.dangdang.reader:id/login_tv").click()
| [
"396167189@qq.com"
] | 396167189@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.