blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
badbe852403432f331c43df348034c334437b5f2 | 470bb48b6b51ff8a1bc58e71138bbddf7613b80e | /homework_12/django_hw_12/app_hw_12/migrations/0002_auto_20210201_2348.py | b92b909e27e179d0263dfd3e673b8661aab5e184 | [] | no_license | ParamonovED/Epam_HW | 2cfbce640606b94b9b01452873771352736becaf | d0236953fd3f9ef6331b296f6e957e3ae2682a3a | refs/heads/main | 2023-02-24T02:42:09.864851 | 2021-02-01T20:51:40 | 2021-02-01T20:51:40 | 308,440,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Generated by Django 3.1.6 on 2021-02-01 20:48
from django.db import migrations
def add_data(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Teacher = apps.get_model("app_hw_12", "Teacher")
for teacher in Teacher.objects.all():
teacher.first_name = "%s %s" % (teacher.first_name, teacher.last_name)
teacher.save()
Student = apps.get_model("app_hw_12", "Student")
for student in Student.objects.all():
student.first_name = "%s %s" % (student.first_name, student.last_name)
student.save()
class Migration(migrations.Migration):
dependencies = [
("app_hw_12", "0001_initial"),
]
operations = [
migrations.RunPython(add_data),
]
| [
"paramon.j.e.k@gmail.com"
] | paramon.j.e.k@gmail.com |
4bb74522ced3120c01f2d2ea523ce811c9d17da1 | e285da2905116d3624f13bd703baabcf22cce12c | /scripts/pyvmomi_to_ruby/tests/pyVmomi/test_create_enum_type.py | c71b59ac7e1919fd1c1d63e8391e74f28b092eff | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | cloudfoundry/bosh-vsphere-cpi-release | 4b3bd5f1f5ea0ebb8fb63a65035e8f24a86fcdfc | 229ea7656e7677ef799c7f8b2c41de4c5c8ed7e6 | refs/heads/master | 2023-08-20T00:48:07.923152 | 2023-08-19T10:03:00 | 2023-08-19T10:04:34 | 33,261,686 | 17 | 27 | Apache-2.0 | 2023-09-05T18:11:42 | 2015-04-01T17:21:21 | Ruby | UTF-8 | Python | false | false | 373 | py | from io import StringIO
import unittest
from pyVmomi.VmomiSupport import CreateEnumType
class TestCreateEnumType(unittest.TestCase):
def setUp(self):
self.out = StringIO()
def test_mixed(self):
CreateEnumType('a', 'b', 'c', None, self.out)
self.assertEqual(self.out.getvalue(),
' create_enum_type("a", "b", "c", nil)\n')
| [
"cdutra@pivotal.io"
] | cdutra@pivotal.io |
de8bd8f62c5129121078e34a97c016890096165b | b3ba0797b8f291bc4c220cbe2ab368bdab29fab7 | /tylerslist/models/posts.py | 2b4c0af07266161612f7bfd33c1febf8beb37c03 | [] | no_license | skyClutch/internets | a4afe561bef72dc4a390c5c5190b755c45dab824 | 7dcbde45f19d5e46b39917fd7f0bbd36c7a23330 | refs/heads/master | 2021-05-30T15:15:44.286566 | 2015-09-23T03:37:52 | 2015-09-23T03:37:52 | 41,172,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | from db import db
class Post(db.Model):
__tablename__='posts'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), index=True, nullable=False)
body = db.Column(db.Text(length=10000), index=True, nullable=False)
price = db.Column(db.Integer, nullable=False)
category_id = db.Column(db.Integer, db.ForeignKey('categories.id'))
email = db.Column(db.String(255), nullable=False)
token = db.Column(db.String(255), nullable=False)
filename = db.Column(db.String(255))
def update(self, *args, **kwargs):
try:
self.title = kwargs['title']
self.body = kwargs['body']
self.price = kwargs['price']
self.email = kwargs['email']
db.session.add(self)
db.session.commit()
except:
db.session.rollback()
@classmethod
def create(cls, *args, **kwargs):
try:
# title = kwargs['title']
# body = kwargs['body']
# category_id = kwargs['category_id']
# email = kwargs['email']
# price = kwargs['price']
# token = kwargs['token']
# post = Post(title=title, body=body, category_id=category_id, email=email, price=price, token=token)
post = Post(**kwargs)
db.session.add(post)
db.session.commit()
except:
db.session.rollback()
return post
| [
"TylerProbst@Tylers-MacBook-Pro.local"
] | TylerProbst@Tylers-MacBook-Pro.local |
6de76f68d1ad4c8a182a4c4c1c804d1000e205de | 20158a42bfe7006760d2bcf51bec38457652a813 | /main.py | 03ff8dbe1d3e75e727d9c286294d7bdbcabb11a4 | [] | no_license | CaoKaiK/home_automation | cd6dc8bd43e9835d0730229e1d8cb3286e8cfd09 | fe2628c9515b81824bef0952aad3de81d1c89b70 | refs/heads/master | 2022-12-15T05:48:30.985401 | 2021-04-22T18:53:16 | 2021-04-22T18:53:16 | 254,191,646 | 0 | 0 | null | 2022-12-08T04:00:56 | 2020-04-08T20:18:58 | CSS | UTF-8 | Python | false | false | 193 | py | from app import create_app, db
from app.models import Room, Thing
app = create_app()
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Room': Room, 'Thing': Thing} | [
"niklas.kueppers@rub.de"
] | niklas.kueppers@rub.de |
be23e2ced130d86cf9ab578d7d7559a29ad7b2ea | a2c7a28a852f34677961504b2b7ee99974e5768d | /tracking/bspline.py | 347173329b136e5d9eb3ddf95bb7fffa46226525 | [] | no_license | sud1312/eye-control | f6e3e953149af57e943b6906bb975510e0052809 | 0f9d2edd521a9a1196ade66d9214473924691c58 | refs/heads/master | 2021-01-19T05:03:29.142170 | 2015-07-09T11:49:31 | 2015-07-09T11:49:31 | 38,813,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | # adapted from:
# http://stackoverflow.com/questions/12643079/bezier-curve-fitting-with-scipy/14125828
import numpy as np
from scipy.misc import comb
class Bezier(object):
def __init__(self):
super(Bezier, self).__init__()
pass
# The Bernstein polynomial of n, i as a function of t
def bernstein_poly(self, i, n, t):
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
# Given a set of control points, return the bezier curve defined by the control points.
# points should be a flat list
# such as [1,1, 2,2, 3,3, ...Xn,Yn]
# nTimes is the number of time steps, defaults to 1000
def bezier_curve(self, points, nTimes=1000):
nPoints = len(points) / 2
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([self.bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints)])
nPoints *= 2
l1 = []
l2 = []
for i in range(0, nPoints, 2):
l1.append(points[i])
l2.append(points[i+1])
xvals = np.dot(np.array(l1), polynomial_array)
yvals = np.dot(np.array(l2), polynomial_array)
l1 = []
for i in range(0, nTimes, 2):
l1.append(xvals[i])
l1.append(yvals[i+1])
return l1
def split_pairs(self,l):
l1 = []
l2 = []
for i in range(0, len(l), 2):
l1.append(l[i])
l2.append(l[i+1])
return l1, l2
if __name__ == '__main__':
from matplotlib import pyplot as plt
points = [155.95185909, 4.64042794, 57.74101074, 138.1875438, 25.93409495, 81.44692581,21.16373321, 184.4609643]
b = Bezier()
bezier = b.bezier_curve(points, nTimes=1000)
# split_pairs is a utility to convert to plt.plot's funky format
xpoints, ypoints = b.split_pairs(points)
xvals, yvals = b.split_pairs(bezier)
plt.plot(xvals, yvals)
plt.plot(xpoints, ypoints, "ro")
for nr in range(len(xpoints)):
plt.text(xpoints[nr], ypoints[nr], nr)
plt.show()
| [
"smouldering.dog@b4fd473d-8a18-f549-4102-3209dafef2b4"
] | smouldering.dog@b4fd473d-8a18-f549-4102-3209dafef2b4 |
cce4b96a715d43f53534c19733cad518beb38e8e | 0d5c77661f9d1e6783b1c047d2c9cdd0160699d1 | /python/paddle/fluid/tests/test_lod_tensor.py | f7a9dd4129027417a06a6c25ff9a801fff259c5e | [
"Apache-2.0"
] | permissive | xiaoyichao/anyq_paddle | ae68fabf1f1b02ffbc287a37eb6c0bcfbf738e7f | 6f48b8f06f722e3bc5e81f4a439968c0296027fb | refs/heads/master | 2022-10-05T16:52:28.768335 | 2020-03-03T03:28:50 | 2020-03-03T03:28:50 | 244,155,581 | 1 | 0 | Apache-2.0 | 2022-09-23T22:37:13 | 2020-03-01T13:36:58 | C++ | UTF-8 | Python | false | false | 4,649 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor
import numpy as np
import unittest
class TestLoDTensor(unittest.TestCase):
def test_pybind_recursive_seq_lens(self):
tensor = fluid.LoDTensor()
recursive_seq_lens = []
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
recursive_seq_lens = [[], [1], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
recursive_seq_lens)
recursive_seq_lens = [[0], [2], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
recursive_seq_lens)
recursive_seq_lens = [[1, 2, 3]]
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
tensor.set(np.random.random([6, 1]), fluid.CPUPlace())
self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
# Each level's sum should be equal to the number of items in the next level
# Moreover, last level's sum should be equal to the tensor height
recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 2]]
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
tensor.set(np.random.random([8, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 1]]
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertTrue(tensor.has_valid_recursive_sequence_lengths())
tensor.set(np.random.random([9, 1]), fluid.CPUPlace())
self.assertFalse(tensor.has_valid_recursive_sequence_lengths())
def test_create_lod_tensor(self):
# Create LoDTensor from a list
data = [[1, 2, 3], [3, 4]]
wrong_recursive_seq_lens = [[2, 2]]
correct_recursive_seq_lens = [[3, 2]]
self.assertRaises(AssertionError, create_lod_tensor, data,
wrong_recursive_seq_lens, fluid.CPUPlace())
tensor = create_lod_tensor(data, correct_recursive_seq_lens,
fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
correct_recursive_seq_lens)
# Create LoDTensor from numpy array
data = np.random.random([10, 1])
recursive_seq_lens = [[2, 1], [3, 3, 4]]
tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
# Create LoDTensor from another LoDTensor, they are differnt instances
new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]]
new_tensor = create_lod_tensor(tensor, new_recursive_seq_lens,
fluid.CPUPlace())
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
self.assertEqual(new_tensor.recursive_sequence_lengths(),
new_recursive_seq_lens)
def test_create_random_int_lodtensor(self):
# The shape of a word, commonly used in speech and NLP problem, is [1]
shape = [1]
recursive_seq_lens = [[2, 3, 5]]
dict_size = 10000
low = 0
high = dict_size - 1
tensor = create_random_int_lodtensor(recursive_seq_lens, shape,
fluid.CPUPlace(), low, high)
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
self.assertEqual(tensor.shape(), [10, 1])
if __name__ == '__main__':
unittest.main()
| [
"xiaoyichao@haohaozhu.com"
] | xiaoyichao@haohaozhu.com |
298765993ed5182b47c72e02ed6cfac278fe1189 | 7d88a4787246e41adba39b4313380ea4f2f08f5a | /test/regression/features/dictionaries/dict_setitem_new.py | 68e8c36e6b05c25cf8aaef18b48058ee8405915f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | powergun/berp | cd66bdc6e2ad084df119d6d55742fc1e1efdfc06 | 30925288376a6464695341445688be64ac6b2600 | refs/heads/master | 2020-06-24T23:34:07.519644 | 2014-06-11T03:36:35 | 2014-06-11T03:36:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | d = {'itchy' : 'knee'}
d['scratchy'] = 'elbow'
print(d)
| [
"florbitous@gmail.com"
] | florbitous@gmail.com |
bac612e62bec9e76a649f5be726257ddc8ce1646 | 1395576291c1e8b34981dbcbfcd0fdda020083b8 | /dist_cts/dist_fleet/thirdparty/simnet_bow/dataset_generator.py | 4e3b7a3565639c5154043bbc8179ef3b0a6d635f | [] | no_license | gentelyang/scripts | a8eb8a3cc5cc5bac753f1bb12033afaf89f03404 | e3562ab40b574f06bba68df6895a055fa31a085d | refs/heads/master | 2023-06-06T12:38:37.002332 | 2021-06-15T05:09:06 | 2021-06-15T05:09:06 | 262,957,519 | 0 | 4 | null | 2021-01-10T08:28:11 | 2020-05-11T06:28:08 | Python | UTF-8 | Python | false | false | 2,500 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# @Time : 2019-09-26 17:30
# @Author : liyang109
from __future__ import print_function
import paddle.fluid.incubate.data_generator as dg
import random
class PairwiseReader(dg.MultiSlotDataGenerator):
def init_reader(self, max_len, sampling_rate):
# np.random.seed(1)
self.max_len = max_len
self.sampling_rate = sampling_rate
self.query_buffer = None
self.pos_title_buffer = None
self.neg_title_buffer = None
def infer_reader(self, infer_filelist, batch, buf_size):
def local_iter():
for fname in infer_filelist:
with open(fname, "r") as fin:
for line in fin:
items = line.strip("\t\n").split(";")
pos_num, neg_num = [int(i) for i in items[1].split(" ")]
query = [int(j) for j in items[2].split(" ")]
for i in range(pos_num):
for j in range(neg_num):
pos_title_int = [int(x) for x in items[3 + i].split(" ")]
neg_title_int = [int(x) for x in items[3 + pos_num + j].split(" ")]
yield query, pos_title_int, neg_title_int
import paddle
batch_iter = paddle.batch(
paddle.reader.shuffle(local_iter, buf_size=buf_size),
batch_size=batch)
return batch_iter
def generate_sample(self, line):
def get_rand(low=0.0, high=1.0):
return random.random()
def pairwise_iterator():
items = line.strip("\t\n").split(";")
pos_num, neg_num = [int(i) for i in items[1].split(" ")]
query = [int(j) for j in items[2].split(" ")]
for i in range(pos_num):
for j in range(neg_num):
prob = get_rand()
if prob < self.sampling_rate:
pos_title_int = [int(x) for x in items[3 + i].split(" ")]
neg_title_int = [int(x) for x in items[3 + pos_num + j].split(" ")]
yield ("query", query), \
("pos_title", pos_title_int), \
("neg_title", neg_title_int)
return pairwise_iterator
if __name__ == "__main__":
pairwise_reader = PairwiseReader()
pairwise_reader.init_reader(10000, 0.02)
pairwise_reader.run_from_stdin() | [
"liyang109@baidu.com"
] | liyang109@baidu.com |
b8620b100b32bf9b598d2c20ff934d8f061afa39 | 7812b9ccb1d4499ec93f989b155f1a623c52dcec | /beginner_level/project_3_making_right_decision_to_get_the_treasure.py | fa795e9ec3fe6fc2497d84284bd565756bceb15a | [] | no_license | DSajaykashyap/my_python_learning | caac1bc8776551fe66366ec514d7bc46ed7e45b8 | b6090e77b4ff305ff53b145def815d0fc582d884 | refs/heads/main | 2023-02-21T15:57:25.842631 | 2021-01-28T17:49:20 | 2021-01-28T17:49:20 | 332,668,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,589 | py | print('''
*******************************************************************************
| | | |
_________|________________.=""_;=.______________|_____________________|_______
| | ,-"_,="" `"=.| |
|___________________|__"=._o`"-._ `"=.______________|___________________
| `"=._o`"=._ _`"=._ |
_________|_____________________:=._o "=._."_.-="'"=.__________________|_______
| | __.--" , ; `"=._o." ,-"""-._ ". |
|___________________|_._" ,. .` ` `` , `"-._"-._ ". '__|___________________
| |o`"=._` , "` `; .". , "-._"-._; ; |
_________|___________| ;`-.o`"=._; ." ` '`."\` . "-._ /_______________|_______
| | |o; `"-.o`"=._`` '` " ,__.--o; |
|___________________|_| ; (#) `-.o `"=.`_.--"_o.-; ;___|___________________
____/______/______/___|o;._ " `".o|o_.--" ;o;____/______/______/____
/______/______/______/_"=._o--._ ; | ; ; ;/______/______/______/_
____/______/______/______/__"=._o--._ ;o|o; _._;o;____/______/______/____
/______/______/______/______/____"=._o._; | ;_.--"o.--"_/______/______/______/_
____/______/______/______/______/_____"=.o|o_.--""___/______/______/______/____
/______/______/______/______/______/______/______/______/______/______/_____ /
*******************************************************************************
''')
print("Welcome to Treasure Island.")
print("Your mission is to find the treasure.")
choice1 = input('You\'re at a cross road. Where do you want to go? Type "left" or "right" \n').lower() #lower_casing the user input
if choice1 == "left":
choice2 = input('You\'ve come to a lake. There is an island in the middle of the lake. Type "wait" to wait for a boat. Type "swim" to swim across. \n').lower()
if choice2 == "wait":
choice3 = input("You arrive at the island unharmed. There is a house with 3 doors. One red, one yellow and one blue. Which colour do you choose? \n").lower()
if choice3 == "red":
print("It's a room full of fire. Game Over.")
elif choice3 == "yellow":
print("You found the treasure! You Win!")
elif choice3 == "blue":
print("You enter a room of beasts. Game Over.")
else:
print("You chose a door that doesn't exist. Game Over.")
else:
print("You get attacked by an angry trout. Game Over.")
else:
print("You fell into a hole. Game Over.") | [
"noreply@github.com"
] | noreply@github.com |
abcb6982c6f5dd12b149025215077f5e7fde1359 | 1959350ca45f43806e925907c298cfae2f3f355f | /test/programytest/parser/pattern/nodes_tests/test_root.py | eca847aa29bdf9535f4c7d605bb69293e405357c | [
"MIT"
] | permissive | tomliau33/program-y | 8df17ff4078a0aa292b775ef869930d71843682a | 30a3715c8501b4c2f1b4de698b679cb4bac168b1 | refs/heads/master | 2021-09-06T01:56:08.053131 | 2018-01-31T15:10:10 | 2018-01-31T15:10:10 | 114,656,850 | 0 | 0 | null | 2018-02-01T13:27:30 | 2017-12-18T15:27:54 | Python | UTF-8 | Python | false | false | 3,716 | py | from programytest.parser.pattern.base import PatternTestBaseClass
from programy.parser.exceptions import ParserException
from programy.parser.pattern.nodes.word import PatternWordNode
from programy.parser.pattern.nodes.base import PatternNode
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.pattern.nodes.template import PatternTemplateNode
from programy.parser.pattern.nodes.root import PatternRootNode
from programy.parser.pattern.nodes.topic import PatternTopicNode
from programy.parser.pattern.nodes.that import PatternThatNode
class PatternRootNodeTests(PatternTestBaseClass):
def test_init(self):
node = PatternRootNode()
self.assertIsNotNone(node)
self.assertTrue(node.is_root())
self.assertFalse(node.is_priority())
self.assertFalse(node.is_wildcard())
self.assertFalse(node.is_zero_or_more())
self.assertFalse(node.is_one_or_more())
self.assertFalse(node.is_set())
self.assertFalse(node.is_bot())
self.assertFalse(node.is_template())
self.assertFalse(node.is_that())
self.assertFalse(node.is_topic())
self.assertFalse(node.is_wildcard())
self.assertIsNotNone(node.children)
self.assertFalse(node.has_children())
self.assertTrue(node.equivalent(PatternRootNode()))
self.assertEqual(node.to_string(), "ROOT [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)]")
node.add_child(PatternNode())
self.assertEqual(len(node.children), 1)
self.assertEqual(node.to_string(), "ROOT [P(0)^(0)#(0)C(1)_(0)*(0)To(0)Th(0)Te(0)]")
def test_multiple_roots(self):
node1 = PatternRootNode()
node2 = PatternRootNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertTrue(str(raised.exception).startswith("Cannot add root node to existing root node"))
def test_root_added_to_child(self):
node1 = PatternWordNode("test")
node2 = PatternRootNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertTrue(str(raised.exception).startswith("Cannot add root node to child node"))
def test_root_to_root(self):
node1 = PatternRootNode()
node2 = PatternRootNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add root node to existing root node")
def test_template_to_root(self):
node1 = PatternRootNode()
node2 = PatternTemplateNode(TemplateNode())
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add template node to root node")
def test_topic_to_root(self):
node1 = PatternRootNode()
node2 = PatternTopicNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add topic node to root node")
def test_that_to_root(self):
node1 = PatternRootNode()
node2 = PatternThatNode()
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add that node to root node")
def test_multiple_templates(self):
node1 = PatternTemplateNode(TemplateNode())
node2 = PatternTemplateNode(TemplateNode())
with self.assertRaises(ParserException) as raised:
node1.can_add(node2)
self.assertEqual(str(raised.exception), "Cannot add template node to template node")
| [
"keith@keithsterling.com"
] | keith@keithsterling.com |
2dcf69f7efbe7801f01716a225dd543da5afeafb | 246fc123d539e150ae9e82720f843b1ab5890532 | /frolia.py | b1f30602040e81720beadcf2c613fc84dc37889c | [] | no_license | Raeshmithaa/guvi | 3a5edd0b33d7971eae1a73aa4d543eb8f0a0aec2 | cd20cda104b409aba23dcb15210ab9285fc946fc | refs/heads/master | 2020-05-31T06:39:44.349687 | 2019-07-26T10:30:46 | 2019-07-26T10:30:46 | 190,146,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | a=(input("enter the value"))
if(a>='a' and a<='z')or(a>='A' and a<='Z'):
print("Alphapet")
else:
print(" not Alphapet")
| [
"noreply@github.com"
] | noreply@github.com |
d31e51d2b11c4e543c1be1663b64cb279e600790 | f4dcb14111539e9a22300256fd6f8fefc61f2d50 | /src/flua/Compiler/ExpressionParser/ExpressionParser.py | 91fe2cd96730b30e01ee0ff76e858514d3fe80a2 | [] | no_license | GWRon/flua | 276c3ea4ce1cfcf68a1000fb44512460b5161c4e | 1cf051f1d5aec3ba4da48442a0d7257d399e5b36 | refs/heads/master | 2021-01-15T17:37:03.914965 | 2012-10-24T12:57:27 | 2012-10-24T12:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,234 | py | ####################################################################
# Header
####################################################################
# Expression parser
####################################################################
# License
####################################################################
# (C) 2008 Eduard Urbach
#
# This file is part of Flua.
#
# Flua is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Flua is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Flua. If not, see <http://www.gnu.org/licenses/>.
####################################################################
# Imports
####################################################################
from flua.Compiler.Utils import *
####################################################################
# Classes
####################################################################
class Operator:
UNARY = 1
BINARY = 2
TERNARY = 3
def __init__(self, text, name, dataType):
self.text = text
self.textLen = len(text)
self.name = name
self.type = dataType
class OperatorLevel:
def __init__(self):
self.operators = []
def addOperator(self, op):
self.operators.append(op)
class ExpressionParser:
def __init__(self):
self.operatorLevels = []
#self.recursionLevel = 0
self.doc = parseString("<expr></expr>")
def compileError(self, error):
raise CompilerException(error)
def addOperatorLevel(self, opLevel):
self.operatorLevels.append(opLevel)
def getOperatorName(self, opSign, opType):
# For every operator level
for opLevel in self.operatorLevels:
# For every operator in the current level
for op in opLevel.operators:
if op.text == opSign and op.type == opType:
return op.name
return ""
def similarOperatorExists(self, op2):
# For every operator level
for opLevel in self.operatorLevels:
# For every operator in the current level
for op in opLevel.operators:
if op != op2 and op.text.find(op2.text) != -1:
return True
return False
#def getDebugPrefix(self):
# return " " * self.recursionLevel
def buildCleanExpr(self, expr):
#self.recursionLevel += 1
l = len
#print(expr)
#expr = expr.replace(" ", "")
# Identifier + Space + Identifier = Invalid instruction
exprLen = l(expr)
for i in range(exprLen):
if expr[i] == ' ':
# TODO: Handle '([{' and ')]}' correctly
if i + 1 < exprLen and i >= 1 and isVarChar(expr[i - 1]) and isVarChar(expr[i + 1]): #and expr[i + 1:i + 11] != "flua_string_":
raise CompilerException("Operator missing: %s" % (expr[:i].strip() + " ↓ " + expr[i+1:].strip()))
expr = expr.replace(" ", "")
exprLen = l(expr)
#if exprLen == 1 and not isVarChar(expr):
# raise CompilerException("Invalid expression: „%s“" % (expr))
i = 0
lastOccurence = 0
start = 0
end = 0
bracketCounter = 0
operators = None
operandLeft = ""
operandRight = ""
char = ''
#print(" * buildCleanExpr: " + expr)
# For every operator level
for opLevel in self.operatorLevels:
i = 0
while i < exprLen:
operators = opLevel.operators
# For every operator in the current level
for op in operators:
if i < exprLen - op.textLen and expr[i:i+op.textLen] == op.text:
lastOccurence = i
else:
lastOccurence = -1
if lastOccurence is not -1:
if lastOccurence == exprLen - 1:
raise CompilerException("Missing operand")
if op.text == "§":
if lastOccurence == 0:
raise CompilerException("Can't start a template expression at the beginning of an expression in „%s“" % (expr))
if not isVarChar(expr[lastOccurence - 1]):
raise CompilerException("You can't use a template expression without specifying an actual class in „%s“" % (expr))
if isVarChar(expr[lastOccurence + op.textLen]) or expr[lastOccurence + op.textLen] == '(' or op.text == '(' or expr[lastOccurence + op.textLen] == '[' or op.text == '[':
if op.type == Operator.BINARY:
# Left operand
start = lastOccurence - 1
while start >= 0 and (isVarChar(expr[start]) or ((expr[start] == ')' or expr[start] == ']') and start == lastOccurence - 1)):
if expr[start] == ')' or expr[start] == ']':
bracketCounter = 1
else:
bracketCounter = 0
# Move to last part of the bracket
while bracketCounter > 0 and start > 0:
start -= 1
if expr[start] == ')' or expr[start] == ']':
bracketCounter += 1
elif expr[start] == '(' or expr[start] == '[':
bracketCounter -= 1
start -= 1
operandLeft = expr[start+1:lastOccurence]
# Right operand
end = lastOccurence + op.textLen
if op.text == '[' or op.text == '(' or (expr[end] == '(' and end == lastOccurence + op.textLen) or (expr[end] == '[' and end == lastOccurence + op.textLen):
bracketCounter = 1
else:
bracketCounter = 0
while end < exprLen and (bracketCounter > 0 or isVarChar(expr[end]) or (end == lastOccurence + op.textLen and (expr[end] == '(' or expr[end] == '['))):
# Move to last part of the bracket
while bracketCounter > 0 and end < exprLen:
if expr[end] == '(' or expr[end] == '[':
bracketCounter += 1
elif expr[end] == ')' or expr[end] == ']':
bracketCounter -= 1
if bracketCounter == 1 and op.text != '[' and op.text != '(':
end -= 1
bracketCounter = 0
elif bracketCounter == 0: # and expr[lastOccurence + op.textLen] != '(' and expr[lastOccurence + op.textLen] != '[':
end -= 2
end += 1
end += 1
operandRight = expr[lastOccurence + op.textLen:end]
#if (not operandLeft) or (not operandRight):
# raise CompilerException("Invalid expression: „%s“" % (expr))
#if exprLen == 1 and not isVarChar(expr):
# raise CompilerException("Invalid expression: „%s“" % (expr))
# Perform "no digits at the start of an identifier" check for the left operator
if operandLeft:
operandLeftStartsWithDigit = operandLeft[0].isdigit()
if operandLeftStartsWithDigit:
for c in operandLeft:
if not c.isdigit():
if isVarChar(c):
if operandLeft[0] != '0' or operandLeft[1] != 'x':
raise CompilerException("Identifiers must not begin with a digit: „%s“" % (operandLeft))
else:
break
# Array slicing?
else:
#print("OP LEFT MISSING:")
#print(operandLeft)
#print(operandRight)
#print(op.text)
#print(expr)
#print("----------")
if op.text == ':':
operandLeft = "_flua_slice_start"
expr = "%s%s%s" % (expr[:lastOccurence], operandLeft, expr[lastOccurence:])
exprLen = l(expr)
opLeftLen = l(operandLeft)
lastOccurence += opLeftLen
start = lastOccurence - opLeftLen
end += opLeftLen
# Perform "no digits at the start of an identifier" check for the right operator
if operandRight:
operandRightStartsWithDigit = operandRight[0].isdigit()
if operandRightStartsWithDigit:
for c in operandRight:
if not c.isdigit():
if isVarChar(c):
if operandRight[0] != '0' or operandRight[1] != 'x':
raise CompilerException("Identifiers must not begin with a digit: „%s“" % (operandRight))
else:
break
if op.text != "#" and operandRight == "()":
raise CompilerException("Invalid right operand in „%s“" % (expr))
# TODO: Allow lists
if operandRight == "[]":
raise CompilerException("Invalid right operand in „%s“" % (expr))
#if op.text != "(":
# if (operandRight and operandRight[0].isdigit() and not operandRight.isdigit()):
# raise CompilerException("ERZA RIGHT %s %s %s" % (operandLeft, op.text, operandRight))
# if op.text == "&&":
# print(">> " + operandLeft + " AND " + operandRight)
# print(expr)
# print(lastOccurence)
# print(end)
# print(expr[lastOccurence:end])
# print(bracketCounter)
#print(self.getDebugPrefix() + " * buildCleanExpr.operators: " + operandLeft + " [" + op.text + "] " + operandRight)
# Bind
#===================================================
# #=======================================================
# print(expr)
# if start >= 0:
# print("START[" + str(start) + "]: " + expr[start])
# else:
# print("START: " + "OUT OF STRING")
#
# if end < exprLen:
# print("END[" + str(end) + "]: " + expr[end])
# else:
# print("END: " + "OUT OF STRING")
# #=======================================================
#===================================================
if operandLeft and (operandRight and ((start < 0 or expr[start] != '(') or (end >= exprLen or expr[end] != ')')) or op.text == "("):
if op.text == "(":
newOpText = "#"
expr = "%s(%s)%s(%s)%s" % (expr[:lastOccurence - l(operandLeft)], operandLeft, newOpText, operandRight, expr[lastOccurence + op.textLen + l(operandRight) + 1:])
elif op.text == "[":
newOpText = "@"
expr = "%s(%s)%s(%s)%s" % (expr[:lastOccurence - l(operandLeft)], operandLeft, newOpText, operandRight, expr[lastOccurence + op.textLen + l(operandRight) + 1:])
else:
expr = "%s(%s%s%s)%s" % (expr[:lastOccurence - l(operandLeft)], operandLeft, op.text, operandRight, expr[lastOccurence + op.textLen + l(operandRight):])
exprLen = l(expr)
#print(self.getDebugPrefix() + " * Expression changed: " + expr)
#else:
# pass
#print(self.getDebugPrefix() + " * Expression change denied for operator: [" + op.text + "]")
# Unary operators
elif op.type == Operator.UNARY and (lastOccurence <= 0 or (isVarChar(expr[lastOccurence - 1]) == False and expr[lastOccurence - 1] != ')')):
#print("Unary check for operator [" + op.text + "]")
#print(" Unary.lastOccurence: " + str(lastOccurence))
#print(" Unary.expr[lastOccurence - 1]: " + expr[lastOccurence - 1])
#print(" Unary.isVarChar(expr[lastOccurence - 1]): " + str(isVarChar(expr[lastOccurence - 1])))
# Right operand
end = lastOccurence + op.textLen
while end < exprLen and (isVarChar(expr[end]) or ((expr[end] == '(' or expr[end] == '[') and end == lastOccurence + 1)):
if (expr[end] == '(' or expr[end] == '[') and end == lastOccurence + 1:
bracketCounter = 1
else:
bracketCounter = 0
# Move to last part of the bracket
while bracketCounter > 0 and end < exprLen-1:
end += 1
if expr[end] == '(' or expr[end] == '[':
bracketCounter += 1
elif expr[end] == ')' or expr[end] == ']':
bracketCounter -= 1
end += 1
operandRight = expr[lastOccurence+op.textLen:end]
#print("[" + op.text + "] " + operandRight)
start = lastOccurence - 1
if (start < 0 or expr[start] != '(') or (end >= exprLen or expr[end] != ')'):
expr = "%s(%s%s)%s" % (expr[:lastOccurence], op.text, operandRight, expr[lastOccurence + op.textLen + l(operandRight):])
exprLen = l(expr)
lastOccurence += 1
#print("EX.UNARY: " + expr)
#else:
# pass
#print("EX.UNARY expression change denied: [" + op.text + "]")
else:
# If a binary version does not exist it means the operator has been used incorrectly
if not self.similarOperatorExists(op):
raise CompilerException("Syntax error concerning the unary operator [" + op.text + "]")
elif expr[lastOccurence + op.textLen] != '(' and expr[lastOccurence + op.textLen] != '[':
if not self.similarOperatorExists(op):
#print(expr)
#print(expr[lastOccurence + op.textLen])
#print(op.text)
# Array slicing for the right operator?
if expr[lastOccurence + op.textLen] == ')' and expr[lastOccurence - len(operandRight) + 1] == '@':
#print(operandLeft)
expr = "%s_flua_slice_end%s" % (expr[:lastOccurence + op.textLen], expr[lastOccurence + op.textLen:])
exprLen = l(expr)
else:
raise CompilerException("Operator [" + op.text + "] expects a valid expression (encountered '" + expr[lastOccurence + op.textLen] + "')")
#lastOccurence = expr.find(op.text, lastOccurence + op.textLen)
i += op.textLen
else:
i += 1
#self.recursionLevel -= 1
return expr
def buildOperation(self, expr):
#self.recursionLevel += 1
# Local vars for faster lookup
l = len
getOperatorName = self.getOperatorName
buildOperation = self.buildOperation
createElement = self.doc.createElement
createTextNode = self.doc.createTextNode
# Debug info
#print(self.getDebugPrefix() + " * buildOperation.dirty: " + expr)
# Remove unnecessary brackets
bracketCounter = 0
i = l(expr)
while expr and expr[0] == '(' and expr[l(expr)-1] == ')' and bracketCounter == 0 and i == l(expr):
bracketCounter = 1
i = 1
while i < l(expr) and (bracketCounter > 0 or expr[i] == ')'):
if expr[i] == '(':
bracketCounter += 1
elif expr[i] == ')':
bracketCounter -= 1
i += 1
if bracketCounter == 0 and i == l(expr):
expr = expr[1:l(expr)-1]
#print("NEW EXPR: " + expr)
# In order to continue the loop: Adjust i
i = l(expr)
#print(" * buildOperation.clean: " + expr)
# Left operand
bracketCounter = 0
i = 0
while i < l(expr) and (isVarChar(expr[i]) or expr[i] == '('):
while i < l(expr) and (bracketCounter > 0 or expr[i] == '('):
if expr[i] == '(' or expr[i] == '[':
bracketCounter += 1
elif expr[i] == ')' or expr[i] == ']':
bracketCounter -= 1
if bracketCounter == 0:
break
i += 1
i += 1
if i == l(expr):
#self.recursionLevel -= 1
return createTextNode(expr)
leftOperand = expr[:i]
opIndex = i
# Operator
opIndexEnd = opIndex
while opIndexEnd < l(expr) and not isVarChar(expr[opIndexEnd]) and not expr[opIndexEnd] == '(':
opIndexEnd += 1
operator = expr[opIndex:opIndexEnd]
if leftOperand:
opName = getOperatorName(operator, Operator.BINARY)
else:
opName = getOperatorName(operator, Operator.UNARY)
if not opName:
return self.doc.createTextNode(leftOperand)
# Right operand
bracketCounter = 0
i = opIndex + l(operator)
while i < l(expr) and (isVarChar(expr[i]) or expr[i] == '('):
while bracketCounter > 0 or (i < l(expr) and expr[i] == '('):
if expr[i] == '(':
bracketCounter += 1
elif expr[i] == ')':
bracketCounter -= 1
if bracketCounter == 0:
break
i += 1
i += 1
rightOperand = expr[opIndex+l(operator):i]
leftOperandNode = None
rightOperandNode = None
if leftOperand and leftOperand[0] == '(':
leftOperandNode = buildOperation(leftOperand[1:l(leftOperand)-1])
else:
leftOperandNode = createTextNode(leftOperand)
if rightOperand and rightOperand[0] == '(':
rightOperandNode = buildOperation(rightOperand[1:l(rightOperand)-1])
else:
rightOperandNode = createTextNode(rightOperand)
#print("---")
#print("OP: " + operator)
#print(leftOperand)
#print(rightOperand)
#print("---")
node = createElement(opName)
lNode = createElement("value")
rNode = createElement("value")
# Unary operator
if leftOperand:
node.appendChild(lNode)
node.appendChild(rNode)
lNode.appendChild(leftOperandNode)
rNode.appendChild(rightOperandNode)
# if operator == "=" and leftOperandNode.nodeType == Node.TEXT_NODE:
# if self.getCurrentScope().containsVariable(leftOperand):
# pass
# else:
# #print("Variable declaration: " + leftOperand)
# self.getCurrentScope().addVariable(GenericVariable(leftOperand, "Unknown"))
# Right operand missing
if not rightOperand:
if operator == "=":
raise CompilerException("You need to assign a valid value to „%s“" % leftOperand)
raise CompilerException("Operator [" + operator + "] expects a second operator")
#self.recursionLevel -= 1
return node
def buildXMLTree(self, expr):
#print(" * buildXMLTree: " + expr)
if not expr:
raise CompilerException("Expression missing")
if expr[0] != '~' and isDefinitelyOperatorSign(expr[0]) and not expr[0] == "-":
raise CompilerException("Invalid expression: „%s“" % expr)
#node = self.doc.createElement("expr")
expr = expr.replace("\t", " ")
# TODO: Remove double whitespaces
# TODO: Check this:
expr = expr.replace(" is not ", " != ")
# Whitespaces are required!
expr = expr.replace(" and ", " && ")
expr = expr.replace(" or ", " || ")
expr = expr.replace(" is ", " == ")
expr = expr.replace(" in ", " }= ")
# Copy
expr = expr.replace("[:]", "[_flua_slice_start:_flua_slice_end]")
#if expr.startswith("-"):
#print("------------- MINUS -----------")
# expr = "-(%s)" % expr[1:]
# TODO: Optimize and correct this
expr = " " + expr
expr = expr.replace(" not ", "!")
expr = expr.replace(" not(", "!(")
expr = expr.replace("(not", "(!")
expr = expr.replace("[not", "[!")
#if expr.startswith(" not") and len(expr) > 4 and not isVarChar(expr[4]):
# expr = "!" + expr[4:]
#print("buildXMLTree: " + expr)
expr = self.buildCleanExpr(expr)
#print(expr)
opNode = self.buildOperation(expr)
self.adjustXMLTree(opNode)
#node.appendChild(opNode)
return opNode#node.firstChild
def adjustXMLTree(self, node):
# Adjust node
if node.nodeType == Node.ELEMENT_NODE:
if node.tagName == "separate":
node.tagName = "parameters"
# 'parameters' sub nodes
child = node.firstChild
while child is not None:
child.tagName = "parameter"
# TODO: Optimize this
# Put all nested "separates" on the same level
if child.hasChildNodes() and child.firstChild.nodeType == Node.ELEMENT_NODE and child.firstChild.tagName == "separate":
for param in child.firstChild.childNodes:
node.insertBefore(param.cloneNode(True), child)
node.removeChild(child)
child = node.firstChild
# 2
#oldChild = child
#child = node.firstChild
#node.removeChild(oldChild)
continue
child = child.nextSibling
# 'parameter' tag name
#for child in node.childNodes:
# child.tagName = "parameter"
elif node.tagName == "call":
# Object based method calls will be ignored for this test
node.firstChild.tagName = "function"
node.replaceChild(self.getParametersNode(node.childNodes[1].firstChild), node.childNodes[1])
# Clean up whitespaces
#for child in node.childNodes:
# if child.nodeType == Node.TEXT_NODE:
# node.removeChild(child)
elif node.tagName == "access":
# Correct float values being interpreted as access calls
value1 = node.childNodes[0].childNodes[0]
value2 = node.childNodes[1].childNodes[0]
if value1.nodeType == Node.TEXT_NODE and value2.nodeType == Node.TEXT_NODE and value1.nodeValue.isdigit() and value2.nodeValue.isdigit():
parent = node.parentNode
parent.insertBefore(self.doc.createTextNode("%s.%s" % (value1.nodeValue, value2.nodeValue)), node)
parent.removeChild(node)
# Slice operator
elif node.tagName == "declare-type":
value1 = node.childNodes[0].childNodes[0]
value2 = node.childNodes[1].childNodes[0]
if value1.nodeType != Node.TEXT_NODE and not value1.tagName in {"access"}:
raise CompilerException("Invalid type declaration")
if value2.nodeType != Node.TEXT_NODE and not value2.tagName in {"template-call", "unmanaged"}:
raise CompilerException("You can't call a function in a type defintion")
# Slice operator
elif node.tagName == "index":
value1 = node.childNodes[0].childNodes[0]
value2 = node.childNodes[1].childNodes[0]
if value2.nodeType != Node.TEXT_NODE and value2.tagName == "declare-type":
node.tagName = "slice"
value2.tagName = "range"
value2.childNodes[0].tagName = "from"
value2.childNodes[1].tagName = "to"
# Object-oriented call
# elif node.tagName == "access":
# try:
# if node.childNodes[1].firstChild.tagName == "call":
# node.tagName = "call"
# node.firstChild.tagName = "object"
# secondValue = node.childNodes[1]
# callNode = secondValue.firstChild
#
# for child in callNode.childNodes:
# node.appendChild(child.cloneNode(True))
# node.removeChild(node.childNodes[1])
# node.childNodes[1].tagName = "function"
# node.childNodes[2].tagName = "parameters"
#
# params = node.childNodes[2].firstChild.cloneNode(True)
# node.appendChild(self.getParametersNode(params))
# node.removeChild(node.childNodes[2])
# except AttributeError:
# pass
# except:
# raise
# Recursive
adjXMLTree = self.adjustXMLTree
for child in node.childNodes:
adjXMLTree(child)
# Helper methods
def getParametersNode(self, params):
# Text
if params.nodeType == Node.TEXT_NODE and params.nodeValue:
allParams = self.doc.createElement("parameters")
thisParam = self.doc.createElement("parameter")
thisParam.appendChild(params)
allParams.appendChild(thisParam)
return allParams
# Elements
elif params.nodeType == Node.ELEMENT_NODE:
# Multiple parameters
if params.tagName == "separate" or params.tagName == "parameters":
return params
# Single parameter (needs to be enclosed by parameter tags)
else:
allParams = self.doc.createElement("parameters")
param = self.doc.createElement("parameter")
param.appendChild(params.cloneNode(True))
allParams.appendChild(param)
return allParams
# Exception
else:
# Empty text
return self.doc.createElement("parameters")
# Helper functions
def getParameterFuncName(node):
if node.firstChild.nodeType == Node.TEXT_NODE:
return node.firstChild.nodeValue
elif node.firstChild.tagName == "assign":
return node.firstChild.firstChild.firstChild.nodeValue
else:
raise CompilerException("Invalid parameter initialization")
def getParameterDefaultValueNode(node):
if node.firstChild.nodeType == Node.TEXT_NODE:
return None
elif node.firstChild.tagName == "assign":
return node.firstChild.childNodes[1].firstChild
else:
raise CompilerException("Invalid parameter default value")
####################################################################
# Main
####################################################################
if __name__ == '__main__':
try:
parser = ExpressionParser()
# Mul, Div
operators = OperatorLevel()
operators.addOperator(Operator("*", "multiply", Operator.BINARY))
operators.addOperator(Operator("/", "divide", Operator.BINARY))
parser.addOperatorLevel(operators)
# Add, Sub
operators = OperatorLevel()
operators.addOperator(Operator("+", "add", Operator.BINARY))
operators.addOperator(Operator("-", "substract", Operator.BINARY))
parser.addOperatorLevel(operators)
tree = parser.buildXMLTree("(2 + 5) * 3")
print(tree.toprettyxml())
except:
printTraceback()
| [
"e.urbach@gmail.com"
] | e.urbach@gmail.com |
da5ba41ad19e69fa3ba193faf7fad8392b6a888c | 9cc3135d5fcd781c0542a905c61dc19b0ceeffef | /catch_game.py | e1e3f92b96569c8dbd527503650ace634f77e014 | [] | no_license | bkalcho/python-crash-course | 411d8af223fb6974d4f890c0f82c9e56b062359c | 8425649a2ecd5abeeb438e816400f270d937758e | refs/heads/master | 2022-09-11T13:47:56.837256 | 2022-08-23T10:04:35 | 2022-08-23T10:04:35 | 69,810,386 | 14 | 8 | null | 2022-08-23T10:04:36 | 2016-10-02T17:14:41 | Python | UTF-8 | Python | false | false | 1,471 | py | # Author: Bojan G. Kalicanin
# Date: 15-Dec-2016
# 13-5. Catch: Create a game that places a character that you can move
# left and right at the bottom of the screen. Make a ball appear at a
# random position at the top of the screen and fall down the screen at a
# steady rate. If your character "catches" the ball by colliding with
# it, make the ball disappear. Make a new ball each time your character
# catches the ball or whenever the ball disappears off the bottom of the
# screen.
import pygame
import sys
from catch_settings import Settings
import catch_game_functions as gf
from catcher import Catcher
from catcher_ball import Ball
from pygame.sprite import Group
from catcher_stats import GameStats
def run_game():
"""Main game program."""
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width,
ai_settings.screen_height))
pygame.display.set_caption("Catch Game")
# Game stats.
stats = GameStats(ai_settings)
# Catcher object.
catcher = Group()
# Ball object.
#ball = Ball(ai_settings, screen)
ball = Group()
# Main game loop.
while True:
for c in catcher.sprites():
gf.catch_events(c)
if stats.game_active:
gf.update_catcher(ai_settings, screen, catcher)
gf.update_ball(ai_settings, stats, screen, catcher, ball)
gf.update_screen(ai_settings, screen, catcher, ball)
run_game()
| [
"kalcho@zoho.com"
] | kalcho@zoho.com |
1afd9ade4b2bdebcef0b59946af32c1f7181f86c | 36cb368b799bf1f9cfd23665b5869757da2487f7 | /projet1/log2.py | 14a645572725a973a04efc5461b38de94c0e690e | [] | no_license | donPedroDelasincamida/Algonum | fabf4f9615b88b3de2340fa0c47d78d06fd7df46 | 9fe44bd97d322103811c909238b117b73614dfc7 | refs/heads/master | 2022-04-28T05:22:54.797152 | 2018-06-11T11:33:22 | 2018-06-11T11:33:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | def puissance(a,n):
res = 1
for i in range(n):
res = res*a
return res
for i in range(10,20):
print "2^%d = %d\n" % (i,puissance(2,i))
def log2(p):
res = 0
for i in range(1,10000):
res = res + float(puissance(-1,i+1))/i
puiss=puissance(10,p)
return round(res,p)
def rp(x,p):
return float(int(x*puissance(10,p)))/puissance(10,p)
| [
"Lucien.casteres@gmail.com"
] | Lucien.casteres@gmail.com |
f7fe4d6a04dfbfa4abc2c316b83f7cfbcaa30e5e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/wikipedia/testcase/interestallcases/testcase6_022_1.py | 64a6c615586113f38ef615001e76c1bd64bd8f23 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,077 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.wikipedia',
'appActivity' : 'org.wikipedia.main.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.wikipedia/org.wikipedia.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
swipe(driver, 0.5, 0.6, 0.5, 0.2)
else:
return element
return
def clickoncheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if (len(lists) == 1) :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
# preference setting and exit
try :
os.popen("adb shell svc data enable")
time.sleep(5)
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.DeveloperSettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"useRestbase_setManually\")").click()
clickoncheckable(driver, "new UiSelector().text(\"useRestbase_setManually\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"suppressNotificationPolling\")").click()
clickoncheckable(driver, "new UiSelector().text(\"suppressNotificationPolling\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"memoryLeakTest\")").click()
clickoncheckable(driver, "new UiSelector().text(\"memoryLeakTest\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")").click()
clickoncheckable(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")", "false")
driver.press_keycode(4)
time.sleep(2)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.SettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"Download only over Wi-Fi\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Download only over Wi-Fi\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"Show images\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Show images\")", "true")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
finally :
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_022_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
# testcase022
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Got it\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/voice_search_button\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/voice_search_button\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/voice_search_button\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/view_static_card_icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/page_toolbar_button_search\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"Find in page\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/search_src_text\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("Search");
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Navigate up\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"View main page\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/page_toolbar_button_search\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"Find in page\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"en\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Recent searches:\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_022\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.wikipedia'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
os.popen("adb shell svc data enable")
| [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
46554effd6f26071610c069b65a4eb3525944d2a | 015f3e72a0b8573e084e19224f9b08e24a82c270 | /geant_image.py | 787ed98fd5f97ef16d32f2a9e9adfe5694fa5a9a | [] | no_license | cshimmin/crayfis-sim | 56ff18e89efee089137fa3f72e2a635108e31066 | 37bc3c7e074fee2709ccca6c3f5f38a189f70ada | refs/heads/master | 2020-12-24T16:31:58.925857 | 2018-04-11T16:38:58 | 2018-04-11T16:38:58 | 41,048,746 | 4 | 2 | null | 2018-03-02T16:23:59 | 2015-08-19T17:19:11 | C++ | UTF-8 | Python | false | false | 4,519 | py | #!/usr/bin/env python
from PIL import Image
import numpy as np
def trans_linear(v,pmin=50e-6,pmax=0.01):
''' Linear transformation of energy->brightness.
pmin: the minimum pixel energy (y-intercept)
pmax: the max pixel energy (corresponding to saturation) '''
return max(min(int(round((v-(pmin))*255./pmax)), 255),0)
def trans_quant(v, e_electron=3.6e-6, adc_lsb=25, adc_shift=4, pedestal=15):
''' Quantizing transformation of energy->brightness
e_electron: avg. energy per electron
adc_lsb: # of electrons per LSB on the ADC
adc_shift: how many bits the ADC gets shifted (i.e. 10bit->8bit = shift of 2)
pedestal: minimum number of electrons that can be read (this is subtracted off the electron count before ADC '''
# quantize electrons
v = int(v / e_electron)
v -= pedestal
# quantize ADC
v = int(v / adc_lsb)
# downsample ADC bits
if adc_shift > 0:
v = int(v / (2**adc_shift))
return min(v, 255)
def trans_gamma(v):
''' Random ad-hoc transformation for comparison purposes. Somewhat analogous to adjusting the gamma level of an image '''
return max(min(int(round((np.sqrt(v)-(3e-3))*255./0.1)), 255),0)
def get_bayer_color(x,y):
''' Return the color of the bayer filter at the given pixel coordinates.
0=green, 1=red, 2=blue
'''
if (x+y)%2==0:
return 1
elif x%2==0:
return 0
else:
return 2
def pixwrite_bw(img, x, y, v):
''' Write a grayscale pixel value to the image '''
if x < 0 or y < 0: return
if x > 499 or y > 499: return
img.putpixel((x,y), v)
def pixwrite_bayer(img, x, y, v):
''' Write an RGB pixel value to the image based on the bayer filter color'''
color = get_bayer_color(x,y)
if color == 0:
vc = (v,0,0)
elif color == 1:
vc = (0,v,0)
elif color == 2:
vc = (0,0,v)
img.putpixel((x,y), vc)
def mkimg(t, entry, trans, mode="L", offset=None):
''' Make an image from a single event in the ROOT tree.
t: the input ROOT tree
entry: the entry number to write out
trans: the transformation function to use
mode: The image mode; "L" for grayscale, "bayer" for bayer-mosasic
offset: (x,y) coordinate to shift the original image by
'''
if mode == "L":
img = Image.new("L", (500,500))
pixwrite = pixwrite_bw
elif mode == "bayer":
img = Image.new("RGB", (500,500))
pixwrite = pixwrite_bayer
else:
raise RuntimeError("Unknown mode: %s" % mode)
t.GetEntry(entry)
for x,y,v,n1,n2,n3,n4,n5,n6,n7,n8 in zip(t.pix_x,t.pix_y,t.pix_val,t.pix_n1,t.pix_n2,t.pix_n3,t.pix_n4,t.pix_n5,t.pix_n6,t.pix_n7,t.pix_n8):
if not offset is None:
x += offset[0]
y += offset[1]
pixwrite(img, x, y, trans(v))
pixwrite(img, x-1, y-1, trans(n1))
pixwrite(img, x, y-1, trans(n2))
pixwrite(img, x+1, y-1, trans(n3))
pixwrite(img, x-1, y, trans(n4))
pixwrite(img, x+1, y, trans(n5))
pixwrite(img, x-1, y+1, trans(n6))
pixwrite(img, x, y+1, trans(n7))
pixwrite(img, x+1, y+1, trans(n8))
return img
def random_composite(t, ntrack=20, trans=trans_linear):
''' Randomly select events from the given ROOT tree and overlay them onto a single image.
t: The input ROOT tree
ntrack: The number of events to overlay
trans: The energy transformation to apply
'''
entries = np.random.randint(0,t.GetEntries()-1, ntrack)
img_array = np.zeros_like(np.asarray(Image.new("L",(500,500))))
for i in entries:
offset = np.random.randint(-250,250,2)
img_array += np.asarray(mkimg(t, i, trans, offset=offset))
img = Image.fromarray(img_array)
return img
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Generate images from GEANT simluation")
parser.add_argument("-N", type=int, default=20, help="The number of events to overlay.")
parser.add_argument("input_file", help="The input ROOT file from GEANT simulation.")
parser.add_argument("output_file", nargs="?", default="tracks.jpg", help="The output image file")
args = parser.parse_args()
import ROOT as r
t = r.TChain("pixels")
t.Add(args.input_file)
img = random_composite(t, ntrack=args.N, trans=trans_quant)
img.save(args.output_file, format='jpeg', quality=98)
print "Saved to", args.output_file
| [
"cshimmin@uci.edu"
] | cshimmin@uci.edu |
b2830436f10dd100a76995d67b0f77827b8fa308 | c19bcbc98555ef06276f9f0dcffc9ac35942a7c4 | /tests/test_proc_pid_maps.py | 295254aa85fb9c2904a4fc24b52c440ba608763e | [
"MIT"
] | permissive | kellyjonbrazil/jc | 4e81a5421cd20be5965baf375f4a5671c2ef0410 | 4cd721be8595db52b620cc26cd455d95bf56b85b | refs/heads/master | 2023-08-30T09:53:18.284296 | 2023-07-30T17:08:39 | 2023-07-30T17:08:39 | 215,404,927 | 6,278 | 185 | MIT | 2023-09-08T14:52:22 | 2019-10-15T22:04:52 | Python | UTF-8 | Python | false | false | 1,270 | py | import os
import unittest
import json
from typing import Dict
import jc.parsers.proc_pid_maps
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
f_in: Dict = {}
f_json: Dict = {}
@classmethod
def setUpClass(cls):
fixtures = {
'proc_pid_maps': (
'fixtures/linux-proc/pid_maps',
'fixtures/linux-proc/pid_maps.json')
}
for file, filepaths in fixtures.items():
with open(os.path.join(THIS_DIR, filepaths[0]), 'r', encoding='utf-8') as a, \
open(os.path.join(THIS_DIR, filepaths[1]), 'r', encoding='utf-8') as b:
cls.f_in[file] = a.read()
cls.f_json[file] = json.loads(b.read())
def test_proc_pid_maps_nodata(self):
"""
Test 'proc_pid_maps' with no data
"""
self.assertEqual(jc.parsers.proc_pid_maps.parse('', quiet=True), [])
def test_proc_pid_maps(self):
"""
Test '/proc/<pid>/maps'
"""
self.assertEqual(jc.parsers.proc_pid_maps.parse(self.f_in['proc_pid_maps'], quiet=True),
self.f_json['proc_pid_maps'])
if __name__ == '__main__':
unittest.main()
| [
"kellyjonbrazil@gmail.com"
] | kellyjonbrazil@gmail.com |
f0e2a710d4f5bd33ef7d98cf615e13fc4b30f6ea | 1cff1958e026d697fd999d652ce9ecc7e19455f9 | /data/augment_data.py | 6769af5ff41ad5578e67698338c4afd3c54a24d7 | [] | no_license | antoniojkim/TheresWaldo_Revisted | 9bb03798f90097c9bb482aacaad8ac6504678941 | 20407e14b1874580e883f6e4673773173593a691 | refs/heads/master | 2022-06-09T16:01:22.929437 | 2020-05-08T18:59:35 | 2020-05-08T18:59:35 | 257,645,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import json
import os
import pathlib
import numpy as np
from PIL import Image
log = logging.getLogger(__name__)
file_dir = pathlib.Path(__file__).parent.absolute()
with open(os.path.join(file_dir, "boxes.json")) as file:
boxes = json.loads(file.read())
originals = {
waldo: np.array(
Image.open(os.path.join(file_dir, "data", "original-images", waldo))
)
for waldo in os.listdir(os.path.join(file_dir, "data", "original-images"))
if waldo.endswith(".jpg") and waldo in boxes
}
def augment_data(augment_times=100):
data = []
original_names = list(originals.keys())
for _ in range(augment_times):
np.random.shuffle(original_names)
for name in original_names:
image = originals[name]
crop_width = int(image.shape[1] * np.random.uniform(0.75, 0.95))
crop_height = int(image.shape[0] * np.random.uniform(0.75, 0.95))
crop_x = np.random.randint(image.shape[1] - crop_width)
crop_y = np.random.randint(image.shape[0] - crop_height)
hflip = bool(np.random.randint(2))
data.append(
{
"name": name,
"image": image,
"crop": {
"x": crop_x,
"y": crop_y,
"w": crop_width,
"h": crop_height,
"hflip": hflip,
},
}
)
box_x, box_y, box_width, box_height = boxes[name]
new_box_x = max(box_x - crop_x, 0)
new_box_y = max(box_y - crop_y, 0)
new_box_width = min(
box_width, crop_x + crop_width - box_x, box_x + box_width - crop_x
)
new_box_height = min(
box_height, crop_y + crop_height - box_y, box_y + box_height - crop_y
)
if new_box_width > 0 and new_box_height > 0:
if hflip:
new_box_x = crop_width - new_box_x - new_box_width
data[-1]["box"] = {
"x": new_box_x,
"y": new_box_y,
"w": new_box_width,
"h": new_box_height,
}
return data
| [
"antoniok@antoniojkim.com"
] | antoniok@antoniojkim.com |
aa103ece577f2eb0c662c45e3595e467122c3b44 | e8fa31dbea95c578153d079526b33cac1f17d546 | /config/settings/production.py | ed4f388d52f2ae972db765616b85f905789f8c0f | [] | no_license | boldasalion/qa_project | 5ab2af77992ccdd5d982a14843c671a34d67fe6a | 444beb40fdf8e65bd13496fdc2112ec0229dd9bf | refs/heads/master | 2020-03-30T11:46:18.467431 | 2018-10-02T02:23:25 | 2018-10-02T02:23:25 | 151,191,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,366 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['goulter.net'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3Boto3Storage'
STATIC_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = 'static'
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = 'media'
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='qa_project <noreply@goulter.net>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[qa_project]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
| [
"brian.goulter@gmail.com"
] | brian.goulter@gmail.com |
d258f13052292397d6011d79c8b92e7984c7b748 | 2d9adafb29f8137b8b4dc58c76791115e09f4355 | /mysite/settings.py | 52f8992e96958590981424bc951656645b237ff3 | [] | no_license | yanwenming/mysite | a4f1a81aeb94f1ca28986092423589200e585424 | 077ad334f34e0eab687b00ba6bfd2d2cb94be28f | refs/heads/master | 2021-04-08T00:44:27.597052 | 2020-04-17T04:04:05 | 2020-04-17T04:04:05 | 248,718,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,526 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #表示根目录
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '54ni+88+7*hd7(p39_%h#ch24sb@0t6f8cw6b2=7ewyf9u3$@s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog', #添加博客应用
'account', #添加账号应用
'article',#文章应用
'image',#美图应用
'sorl.thumbnail',#缩略图,引发第三方的
'course',#在线学习应用
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates'),], #BASE_DIR表示根目录
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#配置MySql数据库信息
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME':'django_bbs',
'USER': 'root',
'PASSWORD': 'test123456',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
#设置静态文件存在目录的位置,即在本项目根目录中的./static下
STATICFILES_DIRS = (
os.path.join(BASE_DIR,"static"),
)
#设置登录后重定向到home页面,即http://127.0.0.1:8000/home/
LOGIN_REDIRECT_URL = '/home/'
LOGIN_URL = '/account/login/'
EMAIL_HOST = 'smtp.163.com'
EMAIL_HOST_USER = "yanwenming_sz@163.com"
EMAIL_HOST_PASSWORD ="TWMXAVMGFQEJPSHW"
EMAIL_PORT =25
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "yanwenming_sz@163.com"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#添加Redis数据库配置信息
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
THUMBNAIL_DEBUG = True | [
"ywm3685@qq.com"
] | ywm3685@qq.com |
94a798f5d7308fcbe123a455033b48e8309745e5 | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/gameservices/v1/_enums.py | 80a4901a6859335feb5cffa4619a9f4fff2a8000 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,794 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AuditLogConfigLogType',
'AuthorizationLoggingOptionsPermissionType',
'CloudAuditOptionsLogName',
'ConditionIam',
'ConditionOp',
'ConditionSys',
'DataAccessOptionsLogMode',
'RuleAction',
]
class AuditLogConfigLogType(str, Enum):
"""
The log type that this config enables.
"""
LOG_TYPE_UNSPECIFIED = "LOG_TYPE_UNSPECIFIED"
"""
Default case. Should never be this.
"""
ADMIN_READ = "ADMIN_READ"
"""
Admin reads. Example: CloudIAM getIamPolicy
"""
DATA_WRITE = "DATA_WRITE"
"""
Data writes. Example: CloudSQL Users create
"""
DATA_READ = "DATA_READ"
"""
Data reads. Example: CloudSQL Users list
"""
class AuthorizationLoggingOptionsPermissionType(str, Enum):
"""
The type of the permission that was checked.
"""
PERMISSION_TYPE_UNSPECIFIED = "PERMISSION_TYPE_UNSPECIFIED"
"""
Default. Should not be used.
"""
ADMIN_READ = "ADMIN_READ"
"""
A read of admin (meta) data.
"""
ADMIN_WRITE = "ADMIN_WRITE"
"""
A write of admin (meta) data.
"""
DATA_READ = "DATA_READ"
"""
A read of standard data.
"""
DATA_WRITE = "DATA_WRITE"
"""
A write of standard data.
"""
class CloudAuditOptionsLogName(str, Enum):
"""
The log_name to populate in the Cloud Audit Record.
"""
UNSPECIFIED_LOG_NAME = "UNSPECIFIED_LOG_NAME"
"""
Default. Should not be used.
"""
ADMIN_ACTIVITY = "ADMIN_ACTIVITY"
"""
Corresponds to "cloudaudit.googleapis.com/activity"
"""
DATA_ACCESS = "DATA_ACCESS"
"""
Corresponds to "cloudaudit.googleapis.com/data_access"
"""
class ConditionIam(str, Enum):
"""
Trusted attributes supplied by the IAM system.
"""
NO_ATTR = "NO_ATTR"
"""
Default non-attribute.
"""
AUTHORITY = "AUTHORITY"
"""
Either principal or (if present) authority selector.
"""
ATTRIBUTION = "ATTRIBUTION"
"""
The principal (even if an authority selector is present), which must only be used for attribution, not authorization.
"""
SECURITY_REALM = "SECURITY_REALM"
"""
Any of the security realms in the IAMContext (go/security-realms). When used with IN, the condition indicates "any of the request's realms match one of the given values; with NOT_IN, "none of the realms match any of the given values". Note that a value can be: - 'self' (i.e., allow connections from clients that are in the same security realm, which is currently but not guaranteed to be campus-sized) - 'self:metro' (i.e., clients that are in the same metro) - 'self:cloud-region' (i.e., allow connections from clients that are in the same cloud region) - 'guardians' (i.e., allow connections from its guardian realms. See go/security-realms-glossary#guardian for more information.) - a realm (e.g., 'campus-abc') - a realm group (e.g., 'realms-for-borg-cell-xx', see: go/realm-groups) A match is determined by a realm group membership check performed by a RealmAclRep object (go/realm-acl-howto). It is not permitted to grant access based on the *absence* of a realm, so realm conditions can only be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
"""
APPROVER = "APPROVER"
"""
An approver (distinct from the requester) that has authorized this request. When used with IN, the condition indicates that one of the approvers associated with the request matches the specified principal, or is a member of the specified group. Approvers can only grant additional access, and are thus only used in a strictly positive context (e.g. ALLOW/IN or DENY/NOT_IN).
"""
JUSTIFICATION_TYPE = "JUSTIFICATION_TYPE"
"""
What types of justifications have been supplied with this request. String values should match enum names from security.credentials.JustificationType, e.g. "MANUAL_STRING". It is not permitted to grant access based on the *absence* of a justification, so justification conditions can only be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN). Multiple justifications, e.g., a Buganizer ID and a manually-entered reason, are normal and supported.
"""
CREDENTIALS_TYPE = "CREDENTIALS_TYPE"
"""
What type of credentials have been supplied with this request. String values should match enum names from security_loas_l2.CredentialsType - currently, only CREDS_TYPE_EMERGENCY is supported. It is not permitted to grant access based on the *absence* of a credentials type, so the conditions can only be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
"""
CREDS_ASSERTION = "CREDS_ASSERTION"
"""
EXPERIMENTAL -- DO NOT USE. The conditions can only be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
"""
class ConditionOp(str, Enum):
"""
An operator to apply the subject with.
"""
NO_OP = "NO_OP"
"""
Default no-op.
"""
EQUALS = "EQUALS"
"""
DEPRECATED. Use IN instead.
"""
NOT_EQUALS = "NOT_EQUALS"
"""
DEPRECATED. Use NOT_IN instead.
"""
IN_ = "IN"
"""
The condition is true if the subject (or any element of it if it is a set) matches any of the supplied values.
"""
NOT_IN = "NOT_IN"
"""
The condition is true if the subject (or every element of it if it is a set) matches none of the supplied values.
"""
DISCHARGED = "DISCHARGED"
"""
Subject is discharged
"""
class ConditionSys(str, Enum):
"""
Trusted attributes supplied by any service that owns resources and uses the IAM system for access control.
"""
NO_ATTR = "NO_ATTR"
"""
Default non-attribute type
"""
REGION = "REGION"
"""
Region of the resource
"""
SERVICE = "SERVICE"
"""
Service name
"""
NAME = "NAME"
"""
Resource name
"""
IP = "IP"
"""
IP address of the caller
"""
class DataAccessOptionsLogMode(str, Enum):
LOG_MODE_UNSPECIFIED = "LOG_MODE_UNSPECIFIED"
"""
Client is not required to write a partial Gin log immediately after the authorization check. If client chooses to write one and it fails, client may either fail open (allow the operation to continue) or fail closed (handle as a DENY outcome).
"""
LOG_FAIL_CLOSED = "LOG_FAIL_CLOSED"
"""
The application's operation in the context of which this authorization check is being made may only be performed if it is successfully logged to Gin. For instance, the authorization library may satisfy this obligation by emitting a partial log entry at authorization check time and only returning ALLOW to the application if it succeeds. If a matching Rule has this directive, but the client has not indicated that it will honor such requirements, then the IAM check will result in authorization failure by setting CheckPolicyResponse.success=false.
"""
class RuleAction(str, Enum):
"""
Required
"""
NO_ACTION = "NO_ACTION"
"""
Default no action.
"""
ALLOW = "ALLOW"
"""
Matching 'Entries' grant access.
"""
ALLOW_WITH_LOG = "ALLOW_WITH_LOG"
"""
Matching 'Entries' grant access and the caller promises to log the request per the returned log_configs.
"""
DENY = "DENY"
"""
Matching 'Entries' deny access.
"""
DENY_WITH_LOG = "DENY_WITH_LOG"
"""
Matching 'Entries' deny access and the caller promises to log the request per the returned log_configs.
"""
LOG = "LOG"
"""
Matching 'Entries' tell IAM.Check callers to generate logs.
"""
| [
"noreply@github.com"
] | noreply@github.com |
5df8ddad80786293c77eeb3cdbc342e6a41af0a2 | b793302e56ab40b82892191a3b3bbd916c9c0f47 | /pyjfuzz/core/pjf_decoretors.py | d566455c401f436a3728074d59ece0937b85936f | [] | no_license | yuanfeisiyuetian/5gc-fuzz | 5b06f053d64b5504a1667a5beb695936ce1a1381 | 13f1943a0d9e6cc79e2a2e8089e56ca31c1e5edb | refs/heads/master | 2023-08-11T23:08:38.530392 | 2021-10-06T07:19:59 | 2021-10-06T07:19:59 | 411,973,402 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | """
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <d.linguaglossa@mseclab.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .pjf_mutators import PJFMutators
class PJFDecorators(object):
"""
Represent a class with all available decorators used to fuzz types
"""
def __init__(self, configuration):
self.Mutators = PJFMutators(configuration)
def mutate_object_decorate(self, func):
"""
Mutate a generic object based on type
"""
def mutate():
obj = func()
return self.Mutators.get_mutator(obj, type(obj))
return mutate
| [
"13051593033@163.com"
] | 13051593033@163.com |
cf09aa744e34193b2f88204ad63a6d701c99a4b4 | 5455f66346259f710a46c9b227944987552eb17e | /daily_temperature.py | 963aa9523e4f43d507d8d47ad0bbe7d9834c9e24 | [] | no_license | LawrenceGao0224/LeetCode | 0dc9ead99f48660390c050c0fce0ca9d8e9fd108 | 5c3f4bc56868233fb9d693cf515528c566b7267d | refs/heads/main | 2023-04-19T09:02:30.082783 | 2021-04-28T01:46:40 | 2021-04-28T01:46:40 | 335,158,769 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | '''
how many days you would have to wait until a warmer temperature
T = [73, 74, 75, 71, 69, 72, 76, 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].
'''
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
res = [0] * len(T)
stack = []
for i, e in enumerate(T):
while stack and T[stack[-1]] < e:
cur = stack.pop()
res[cur] = i - cur
stack.append(i)
return res | [
"bightt53523@gmail.com"
] | bightt53523@gmail.com |
2031c56a44240eab92abe738a0079300ea5defc9 | e4b6830f1a52862ac16a92240ed2917dbfe0cbfe | /freshworks_assignment.py | d32d7a2ec2827d42462c8fbf45b35cf8e4f44edd | [] | no_license | Alphaleader007/Key-Value-Store | 006fe00bc22c60ff73fc4086e22cc01678d69209 | cda5f9a0ccf69851512bd28568a7ed8b85786c1c | refs/heads/main | 2023-03-24T07:16:54.092146 | 2021-03-22T06:06:48 | 2021-03-22T06:06:48 | 317,653,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | import json
ct=1
while ct:
def create():
data={}
x=input("Enter the Path of the JSON File,if not specified, data.json under the current dir will be considered\n")
if x=="":
x="data.json"
with open(x, 'w') as fp:
json.dump(data, fp)
n=input("no of input:") #no.of data entry
print("Enter the data in the following format\nKey<space>Value\n")
for j in range(int(n)):
k=input()
y=k.split(' ')
if len(y[0])<32:
if y[0] in data.keys():
print("\nKey already exists,try a new key")
continue
data[y[0]] = y[1:]
print("\nThe data you entered is\n")
print(data)
with open(x, 'w') as fp:
json.dump(data, fp)
def rd():
x=input("Enter the Path of the JSON File,if not specified, data.json under the current dir will be considered\n")
if x=="":
x="data.json"
f=open(x,"r") #opening a json file in read mode
data=json.load(f) #parsing a json file
f.close()
y=input("\nEnter the Key to be read: ")
print(data[y])
def delete():
x=input("Enter the Path of the JSON File,if not specified, data.json under the current dir will be considered\n")
if x=="":
x="data.json"
f=open(x,"r") #opening a json file in read mode
data=json.load(f)
print(data)
y=input("Enter the Key of the element to be deleted")
del data[y]
print(data)
f.close()
i=input("Enter the operation to be performed :\n1.Create\n2.Read\n3.Delete\n4.exit\n")
if i=="1":
create()
elif i=="2":
rd()
elif i=="3":
delete()
elif i=="4":
break
| [
"noreply@github.com"
] | noreply@github.com |
1f3b81a2dc445f1de658e69b2c57ae1862a4d74d | 00334fc7098f139950024a186bdbf31bae9a309c | /meiduo_mall/meiduo_mall/apps/contents/views.py | 3ca6786bc51599fd263b90ab68a06bb2d3572a12 | [] | no_license | AllenCheungOfficial/projects | d0782e6ff327072fcb0f72ee1742050b06f5df23 | 82974879d4e96734369708a4c27d15490cedb78a | refs/heads/master | 2020-05-20T00:54:31.292590 | 2019-05-24T13:54:31 | 2019-05-24T13:54:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | from django.shortcuts import render
from django.views import View
from .models import ContentCategory
from goods.utils import get_categories
class IndexView(View):
"""
定义首页广告视图:IndexView
"""
def get(self,request):
"""
提供首页广告界面
"""
# 1.获取分类的三级数据
categories = get_categories()
# 5.定义一个空的字典
dict = {}
# 2.获取所有的广告分类
content_categories = ContentCategory.objects.all()
# 3遍历所有的广告分类,获取每一个分类,然后放入到定义的空字典中:
for cat in content_categories:
# 4.获取类别所对应的展示数据, 并对数据进行排序:
# key:value ==> 商品类别.key:具体的所有商品(排过序)
dict[cat.key] = cat.content_set.filter(status=True).order_by('sequence')
# 6.拼接参数:
context = {
# 这是首页需要的一二级分类信息:
'categories': categories,
# 这是首页需要的能展示的三级信息:
'contents': dict,
}
# 返回界面, 同时传入参数:
return render(request, 'index.html',context=context) | [
"zhangyanyy@163.com"
] | zhangyanyy@163.com |
5d50336e63c121fe6f46877547009aa9b9bae971 | 5908dc4ea216ff324685f454f03c197e3d867093 | /MinAvgTwoSlice.py | 77e4b03572bd8c1fa201d427ef880aa992f912f3 | [] | no_license | thegamingmadao/codibility-solutions | 3d253de30d49854a31e0e10a0ac81f7e5b41240d | a27adad199a7f8f1024fdd3977dfc71026f62454 | refs/heads/master | 2020-04-05T16:22:02.502421 | 2018-11-10T17:16:38 | 2018-11-10T17:16:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | def solution(A):
# write your code in Python 3.6
n = len(A)
p = [0] * (n+1)
min_averages = []
for i in range(1, n+1):
p[i] = p[i-1] + A[i-1]
print('p'+str(i) + ' '+ str(p[i]))
for i in range(n-1):
averages = [(p[j] - p[i])/(j-i+1) for j in range(i+1,n+1)]
print(averages)
min_averages.append(min(averages))
print(min_averages)
print()
return min_averages.index(min(min_averages)) | [
"madao@localhost.localdomain"
] | madao@localhost.localdomain |
2a1b73db74544b5ea8a04f83b4bcafe43d880d89 | 6292b5c988f1bfffec3754336da7544554f9d137 | /lab04/src/.ipynb_checkpoints/consts-checkpoint.py | acc0ddffa168210db4a0fdb4a83b4f9c226b73a9 | [] | no_license | 2ndbleu/20f-computer-architecture | a6a0e1662a9fa21bd53b87c8e701711fd5476192 | b9e737659e250c17b4f2d84bd7c9b1966026df2c | refs/heads/master | 2023-03-16T04:38:18.729963 | 2020-12-19T16:38:11 | 2020-12-19T16:38:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,438 | py | #----------------------------------------------------------------
#
# 4190.308 Computer Architecture (Fall 2020)
#
# Project #4: A 6-Stage Pipelined RISC-V Simulator
#
# November 25, 2020
#
# Jin-Soo Kim (jinsoo.kim@snu.ac.kr)
# Systems Software & Architecture Laboratory
# Dept. of Computer Science and Engineering
# Seoul National University
#
#----------------------------------------------------------------
import numpy as np
#--------------------------------------------------------------------------
# Data types & basic constants
#--------------------------------------------------------------------------
WORD = np.uint32
SWORD = np.int32
Y = True
N = False
#--------------------------------------------------------------------------
# RISC-V constants
#--------------------------------------------------------------------------
WORD_SIZE = 4
NUM_REGS = 32
BUBBLE = WORD(0x00004033) # Machine-generated NOP: xor x0, x0, x0
NOP = WORD(0x00000013) # Software-generated NOP: addi zero, zero, 0
ILLEGAL = WORD(0xffffffff)
OP_MASK = WORD(0x0000007f)
OP_SHIFT = 0
RD_MASK = WORD(0x00000f80)
RD_SHIFT = 7
FUNCT3_MASK = WORD(0x00007000)
FUNCT3_SHIFT = 12
RS1_MASK = WORD(0x000f8000)
RS1_SHIFT = 15
RS2_MASK = WORD(0x01f00000)
RS2_SHIFT = 20
FUNCT7_MASK = WORD(0xfe000000)
FUNCT7_SHIFT = 25
#--------------------------------------------------------------------------
# ISA table index
#--------------------------------------------------------------------------
IN_NAME = 0
IN_MASK = 1
IN_TYPE = 2
IN_CLASS = 3
#--------------------------------------------------------------------------
# ISA table[IN_TYPE]: Instruction types for disassembling
#--------------------------------------------------------------------------
R_TYPE = 0
I_TYPE = 1
IL_TYPE = 2 # I_TYPE, but load instruction
IJ_TYPE = 3 # I_TYPE, but jalr instruction
IS_TYPE = 4 # I_TYPE, but shift instructions
U_TYPE = 5
S_TYPE = 6
B_TYPE = 7
J_TYPE = 8
X_TYPE = 9
#--------------------------------------------------------------------------
# ISA table[IN_CLASS]: Instruction classes for collecting stats
#--------------------------------------------------------------------------
CL_ALU = 0
CL_MEM = 1
CL_CTRL = 2
#--------------------------------------------------------------------------
# PC select signal
#--------------------------------------------------------------------------
PC_4 = 0 # PC + 4
PC_BRJMP = 1 # branch or jump target
PC_JALR = 2 # jump register target
#--------------------------------------------------------------------------
# Control signal (csignal) table index
#--------------------------------------------------------------------------
CS_VAL_INST = 0
CS_BR_TYPE = 1
CS_OP1_SEL = 2
CS_OP2_SEL = 3
CS_RS1_OEN = 4
CS_RS2_OEN = 5
CS_ALU_FUN = 6
CS_WB_SEL = 7
CS_RF_WEN = 8
CS_MEM_EN = 9
CS_MEM_FCN = 10
CS_MSK_SEL = 11
#--------------------------------------------------------------------------
# csignal[CS_BR_TYPE]: Branch type signal
#--------------------------------------------------------------------------
BR_N = 0 # Next
BR_NE = 1 # Branch on NotEqual
BR_EQ = 2 # Branch on Equal
BR_GE = 3 # Branch on Greater/Equal
BR_GEU = 4 # Branch on Greater/Equal Unsigned
BR_LT = 5 # Branch on Less Than
BR_LTU = 6 # Branch on Less Than Unsigned
BR_J = 7 # Jump
BR_JR = 8 # Jump Register
#--------------------------------------------------------------------------
# csignal[CS_OP1_SEL]: RS1 operand select signal
#--------------------------------------------------------------------------
OP1_RS1 = 0 # Register source #1 (rs1)
OP1_PC = 2
OP1_X = 0
#--------------------------------------------------------------------------
# csignal[CS_OP2_SEL]: RS2 operand select signal
#--------------------------------------------------------------------------
OP2_RS2 = 0 # Register source #2 (rs2)
OP2_IMI = 1 # Immediate, I-type
OP2_IMS = 2 # Immediate, S-type
OP2_IMU = 3 # Immediate, U-type
OP2_IMJ = 4 # Immediate, UJ-type
OP2_IMB = 5 # Immediate, SB-type
OP2_X = 0
#--------------------------------------------------------------------------
# csignal[CS_RS1_OEN, CS_RS2_OEN]: Operand enable signal
#--------------------------------------------------------------------------
OEN_0 = 0
OEN_1 = 1
#--------------------------------------------------------------------------
# csignal[CS_ALU_FUN]: ALU operation signal
#--------------------------------------------------------------------------
ALU_ADD = 1
ALU_SUB = 2
ALU_SLL = 3
ALU_SRL = 4
ALU_SRA = 5
ALU_AND = 6
ALU_OR = 7
ALU_XOR = 8
ALU_SLT = 9
ALU_SLTU = 10
ALU_COPY1 = 11
ALU_COPY2 = 12
ALU_SEQ = 13 # Set if equal
ALU_X = 0
#--------------------------------------------------------------------------
# csignal[CS_WB_SEL]: Writeback select signal
#--------------------------------------------------------------------------
WB_ALU = 0 # ALU output
WB_MEM = 1 # memory output
WB_PC4 = 2 # PC + 4
WB_X = 0
#--------------------------------------------------------------------------
# csignal[CS_RF_WEN]: Register file write enable signal
#--------------------------------------------------------------------------
REN_0 = False
REN_1 = True
REN_X = False
#--------------------------------------------------------------------------
# csignal[CS_MEM_EN]: Memory enable signal
#--------------------------------------------------------------------------
MEN_0 = False
MEN_1 = True
MEN_X = False
#--------------------------------------------------------------------------
# csignal[CS_MEM_FCN]: Memory function type signal
#--------------------------------------------------------------------------
M_XRD = 0 # load
M_XWR = 1 # store
M_X = 0
#--------------------------------------------------------------------------
# csignal[CS_MSK_SEL]: Memory mask type select signal
#--------------------------------------------------------------------------
MT_X = 0
MT_B = 1 # byte
MT_H = 2 # halfword
MT_W = 3 # word
MT_D = 4 # doubleword
MT_BU = 5 # byte (unsigned)
MT_HU = 6 # halfword (unsigned)
MT_WU = 7 # word (unsigned)
#--------------------------------------------------------------------------
# Exceptions
#--------------------------------------------------------------------------
# Multiple exceptions can occur. So they should be a bit vector.
EXC_NONE = 0 # EXC_NONE should be zero
EXC_IMEM_ERROR = 1
EXC_DMEM_ERROR = 2
EXC_ILLEGAL_INST = 4
EXC_EBREAK = 8
EXC_MSG = { EXC_IMEM_ERROR: "imem access error",
EXC_DMEM_ERROR: "dmem access error",
EXC_ILLEGAL_INST: "illegal instruction",
EXC_EBREAK: "ebreak",
}
# Forwarding source
FWD_NONE = 0
FWD_EX = 1
FWD_MM = 2
FWD_WB = 3
| [
"lucetre@naver.com"
] | lucetre@naver.com |
67d7445176b628d391bca470696e3b4247bc6228 | aabfe137db175f0e070bd9342e6346ae65e2be32 | /RecoEcal/EgammaClusterProducers/python/islandClusteringSequence_cff.py | 57c2add9f6396084fa60b771d75bfcb922cb8181 | [] | no_license | matteosan1/cmssw | e67b77be5d03e826afd36a9ec5a6dc1b3ee57deb | 74f7c9a4cf24913e2a9f4e6805bb2e8e25ab7d52 | refs/heads/CMSSW_7_0_X | 2021-01-15T18:35:33.405650 | 2013-07-30T14:59:30 | 2013-07-30T14:59:30 | 11,789,054 | 1 | 1 | null | 2016-04-03T13:48:46 | 2013-07-31T11:06:26 | C++ | UTF-8 | Python | false | false | 797 | py | import FWCore.ParameterSet.Config as cms
#
# $Id: islandClusteringSequence.cff,v 1.7 2007/03/13 17:21:44 futyand Exp $
#
#------------------
#Island clustering:
#------------------
# Island BasicCluster producer
from RecoEcal.EgammaClusterProducers.islandBasicClusters_cfi import *
# Island SuperCluster producer
from RecoEcal.EgammaClusterProducers.islandSuperClusters_cfi import *
# Energy scale correction for Island SuperClusters
from RecoEcal.EgammaClusterProducers.correctedIslandBarrelSuperClusters_cfi import *
from RecoEcal.EgammaClusterProducers.correctedIslandEndcapSuperClusters_cfi import *
# create sequence for island clustering
islandClusteringSequence = cms.Sequence(islandBasicClusters*islandSuperClusters*correctedIslandBarrelSuperClusters*correctedIslandEndcapSuperClusters)
| [
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] | sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch |
32fe1679d2feadc46297b9e8dcfb7629e168103a | 469215340934818223c7290b49f204230f3867d2 | /demoapi/manage.py | b8b09ec7e4820ffc2e961559476930a9d75a3f78 | [] | no_license | eph6666/appmesh_apigw | 1952bd0ec2d607ce3674a3aee0652f898ce1537e | 26842b0e564957478fd3ed502a304a53d2c447c4 | refs/heads/main | 2023-08-05T12:32:50.989317 | 2021-09-28T12:41:23 | 2021-09-28T12:41:23 | 395,435,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demoapi.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"ec2-user@ip-10-100-0-198.ec2.internal"
] | ec2-user@ip-10-100-0-198.ec2.internal |
a5e7a3af6377f3d034fb9d3a2eca1be0272c3745 | 4136e5496ae85c987470496b55dc577460aed7cd | /list/forms.py | c5cf0b6d5b61b2bb6332c8171a7c198d00bcf608 | [] | no_license | likass/Auto-Parts | b1ef9743b4375fbfd9ea908b7ac2c840f5269738 | 6910f4952f83620f64f7e8deae1d37b5cccb53d8 | refs/heads/master | 2022-12-22T06:34:10.858959 | 2020-09-23T19:23:47 | 2020-09-23T19:23:47 | 298,070,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from django import forms
class ContactFormEmail(forms.Form):
name = forms.CharField(required=True, max_length=15, label='Name:')
email = forms.EmailField(required=True, label='Email:')
number = forms.CharField(required=True, max_length=20, label='Phone Number:')
message = forms.CharField(required=True, max_length=100, widget=forms.Textarea, label='Message:')
| [
"noreply@github.com"
] | noreply@github.com |
c8fa4f266de42cc4b9e1d733fe14138c7a67ed74 | 0d71626fe89d1a77e1d6bd535535c2577fa62e15 | /src/functions.py | 53b88d4f56dbf22a13fb3e295189c4ea2e77c854 | [] | no_license | ChanceDurr/Intro-Python-II | 1c311293dd08485a2f4689ded8cd18ffabfc9334 | 74082cfe1707977a2328e3f1c62ea0e9b6ec1d63 | refs/heads/master | 2020-09-23T16:24:26.184847 | 2019-12-04T22:12:48 | 2019-12-04T22:12:48 | 225,540,250 | 0 | 0 | null | 2019-12-03T05:43:05 | 2019-12-03T05:43:04 | null | UTF-8 | Python | false | false | 189 | py | import time, sys, random
# function to print string like typing
def print_slow(s):
for letter in s:
sys.stdout.write(letter)
sys.stdout.flush()
time.sleep(0.05) | [
"chancedurr@gmail.com"
] | chancedurr@gmail.com |
150b4690eaa142413fe75e5d6ef20cccde218311 | 26c54c424dbe79fcd1962bddcbbb218d090eb6fc | /roteiro7/SumFat.py | dc8ede066369033aa05e71a172e8dba32d540bac | [] | no_license | Anap123/python-repo | dbfa4cad586ba5b3bffdc26e70c6170048e59260 | 2532a7cebe88b9987109a93fb5e6c486a5152b22 | refs/heads/master | 2020-08-17T12:52:49.437324 | 2019-05-21T20:55:57 | 2019-05-21T20:55:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | def fat(a):
if(a==1):
return 1
elif(a == 0):
return 0
else:
return (fat(a-1)*a)
soma = 0
for i in range(5):
a = int(input())
if(a%3 == 0):
soma += fat(a)
print(soma)
| [
"punisher077@github.com"
] | punisher077@github.com |
5ce264684ee8cc4bdf3fe7fa5259b05e6e179cd9 | 381b75fe68a4da258e2e60a97105b66ac47214e4 | /qa/rpc-tests/rawtransactions.py | 0ac80a49539a555d1b868ed4d8ea36442667ff7f | [
"MIT"
] | permissive | lipcoin/lipcoin | 3a5997dfc9193ee7dee6f9fa0adc1cb5fb8c92a3 | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | refs/heads/master | 2021-01-24T07:57:56.248620 | 2018-03-17T19:04:38 | 2018-03-17T19:04:38 | 112,155,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,666 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The LipCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""rawtranscation RPCs QA test.
# Tests the following RPCs:
# - createrawtransaction
# - signrawtransaction
# - sendrawtransaction
# - decoderawtransaction
# - getrawtransaction
"""
from test_framework.test_framework import LipCoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(LipCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://lipcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("Missing inputs" in e.error['message'])
else:
assert(False)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 LIPC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
| [
"support@lipcoins.org"
] | support@lipcoins.org |
c27074644766ba4228e511a9a1c884d8ec0e431b | ea262de505a1dd5ae1c7b546b85184309c3fdd35 | /src/models/modules/scales.py | 78234592ac2641e1791aa6240573f86204bde16e | [
"MIT"
] | permissive | Runki2018/CvPytorch | 306ff578c5f8d3d196d0834e5cad5adba7a89676 | 1e1c468e5971c1c2b037334f7911ae0a5087050f | refs/heads/master | 2023-08-25T09:48:48.764117 | 2021-10-15T05:11:21 | 2021-10-15T05:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2021/3/29 9:22
# @Author : liumin
# @File : scales.py
import torch
import torch.nn as nn
class Scale(nn.Module):
"""
A learnable scale parameter
"""
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
| [
"569793357@qq.com"
] | 569793357@qq.com |
db894d65369524787dfeec3853c0b7a67182ff8b | ed5ad5fbced00d612bf6aeaac8638d449740729a | /phr/phr/doctype/provider/provider.py | a1864f4ea6a7bb347306d01ca8d0f5518d0af3c8 | [] | no_license | pawaranand/phr_5_1 | 36d75ba02076a7aee299e747a58e41bb4481438f | c2b4da71a390fa2fcc77acfc799f79fdb0685335 | refs/heads/master | 2020-12-24T14:10:55.352872 | 2015-02-05T05:21:57 | 2015-02-05T05:21:57 | 30,339,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # Copyright (c) 2013, indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Provider(Document):
pass
| [
"tejal.s@indictranstech.com"
] | tejal.s@indictranstech.com |
9bbeedd25c985555fadeeb00f94907941765f1b2 | f9f59b407a082b6268199a82a77bb47fa1f55e75 | /oauth_flask/03_full_backend/main.py | 3230cfbaa60e882d5dd67a6afdb22720d30648f2 | [] | no_license | gronka/code_share | ce68ecab374f458659407c95141049b38919306e | e9586519caf9cd38aec4a341e30b9ec93686258d | refs/heads/master | 2022-12-13T03:21:23.033481 | 2020-09-11T01:53:32 | 2020-09-11T01:53:32 | 274,448,155 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from app import app, socketio
import routes
import sroutes
if __name__ == '__main__':
# TODO: Enable encryption by running server on HTTPS
socketio.run(app, debug=True)
| [
"tgronka@cisco.com"
] | tgronka@cisco.com |
12089dfc4e9a18d8b01d45f741d2a07429107f15 | b440967316e2cc67de01f13e29f8070d1220c376 | /db4.py | 79795519a8a55fbfb3427dabd407cc4ada103d70 | [] | no_license | kishorc1999/Kishor-Chintanpalli | ab55f9aacfc51ea340fc63f94fefcd946d1a3c43 | eaabde2e659ade33ba74e23e58c0a7e48aca9f03 | refs/heads/master | 2020-12-09T18:12:45.289778 | 2020-01-15T10:15:29 | 2020-01-15T10:15:29 | 233,380,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | '''
4]Select:
select colname1,.....,colnamen
from <tablename>
where condition
Example:
select fname,lname
from student
where rollno=1
'''
import mysql.connector
c=mysql.connector connect(host='localhost',user='root',passwd='',database='demo')
cur=c.cursor()
x=cur.rxecute("select * from student")
p=cur.fetchall() #fetchone
fot i in p:
print i
c.close() | [
"kisharc1999@gmail.com"
] | kisharc1999@gmail.com |
d457f176565b80c978bfb00733dec4d02f4861d8 | 256644d14bd15f8e1a3e92c95b1655fd36681399 | /pure_python/ga+ppm/main/utilities.py | a07447f3183438919021284b04c4c34a872f020c | [] | no_license | mfbx9da4/neuron-astrocyte-networks | 9d1c0ff45951e45ce1f8297ec62b69ee4159305a | bcf933491bdb70031f8d9c859fc17e0622e5b126 | refs/heads/master | 2021-01-01T10:13:59.099090 | 2018-06-03T12:32:13 | 2018-06-03T12:32:13 | 12,457,305 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | import random
import math
from pylab import zeros, where, array, empty_like
def crossover(m1, m2, NN):
# Maybe could be sped up using flatten/reshape output?
net = NN()
r = random.randint(0, net.wi.size + net.wo.size)
output1 = [empty_like(net.wi), empty_like(net.wo)]
output2 = [empty_like(net.wi), empty_like(net.wo)]
for i in xrange(len(m1)):
for j in xrange(len(m1[i])):
for k in xrange(len(m1[i][j])):
if r >= 0:
output1[i][j][k][:] = m1[i][j][k]
output2[i][j][k][:] = m2[i][j][k]
elif r < 0:
output1[i][j][k][:] = m2[i][j][k]
output2[i][j][k][:] = m1[i][j][k]
r -= 1
return output1, output2
def mutate(m, mutation_rate):
# Variation: could include a constant to control
# how much the weight is mutated by
for i in xrange(len(m)):
for j in xrange(len(m[i])):
for k in xrange(len(m[i][j])):
if random.random() < mutation_rate:
m[i][j][k] = random.uniform(-2.0,2.0)
def percentAcc(all_aos, targets):
correct = 0
for i, trg in enumerate(targets):
sample_res = where(trg == array(all_aos[i]), True, False)
if sample_res.all():
correct += 1
total = len(all_aos)
return float(correct) / total
def sigmoid(x):
return math.tanh(x)
def randomizeMatrix(matrix, a, b):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
matrix[i][j] = random.uniform(a, b)
def roulette(fitnessScores):
cumalativeFitness = 0.0
r = random.random()
for i in range(len(fitnessScores)):
cumalativeFitness += fitnessScores[i]
if cumalativeFitness > r:
return i
def calcFit(numbers):
"""each fitness is a fraction of the total error"""
# POTENTIAL IMPROVEMENTS:
# maybe give the better scores much higher weighting?
# maybe use the range to calculate the fitness?
# maybe do ind / range of accuracies?
total, fitnesses = sum(numbers), []
for i in range(len(numbers)):
try:
fitness = numbers[i] / total
except ZeroDivisionError:
print 'individual outputted zero correct responses'
fitness = 0
fitnesses.append(fitness)
return fitnesses
| [
"dalberto.adler@gmail.com"
] | dalberto.adler@gmail.com |
3835bd462d27894a5442d6a412b2dd67de3d593d | 675cdd4d9d2d5b6f8e1383d1e60c9f758322981f | /supervised_learning/0x03-optimization/2-shuffle_data.py | 1fc0ce20d6f011ea71c6f64624e3d65b15d7e653 | [] | no_license | AndresSern/holbertonschool-machine_learning-1 | 5c4a8db28438d818b6b37725ff95681c4757fd9f | 7dafc37d306fcf2ea0f5af5bd97dfd78d388100c | refs/heads/main | 2023-07-11T04:47:01.565852 | 2021-08-03T04:22:38 | 2021-08-03T04:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | #!/usr/bin/env python3
"""
Shuffles the data points in two matrices the same way
"""
import numpy as np
def shuffle_data(X, Y):
"""
Shuffles the data points in two matrices the same way
"""
i = np.random.permutation(np.arange(X.shape[0]))
return X[i], Y[i]
| [
"bouzouitina.hamdi@gmail.com"
] | bouzouitina.hamdi@gmail.com |
0e11d74b63a525a47ac0423bcedf37d6db871a31 | 6e47be4e22ab76a8ddd7e18c89f5dc4f18539744 | /venv/openshift/lib/python3.6/site-packages/kubernetes/client/models/v1_security_context.py | 4be4451ac2cdd63dad12dfb4d7fa91b4d3b660a7 | [] | no_license | georgi-mobi/redhat_ocp4.5_training | 21236bb19d04a469c95a8f135188d3d1ae473764 | 2ccaa90e40dbbf8a18f668a5a7b0d5bfaa1db225 | refs/heads/main | 2023-03-30T10:47:08.687074 | 2021-04-01T05:25:49 | 2021-04-01T05:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,971 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1SecurityContext(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_privilege_escalation': 'bool',
'capabilities': 'V1Capabilities',
'privileged': 'bool',
'proc_mount': 'str',
'read_only_root_filesystem': 'bool',
'run_as_group': 'int',
'run_as_non_root': 'bool',
'run_as_user': 'int',
'se_linux_options': 'V1SELinuxOptions'
}
attribute_map = {
'allow_privilege_escalation': 'allowPrivilegeEscalation',
'capabilities': 'capabilities',
'privileged': 'privileged',
'proc_mount': 'procMount',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'run_as_group': 'runAsGroup',
'run_as_non_root': 'runAsNonRoot',
'run_as_user': 'runAsUser',
'se_linux_options': 'seLinuxOptions'
}
def __init__(self, allow_privilege_escalation=None, capabilities=None, privileged=None, proc_mount=None, read_only_root_filesystem=None, run_as_group=None, run_as_non_root=None, run_as_user=None, se_linux_options=None):
"""
V1SecurityContext - a model defined in Swagger
"""
self._allow_privilege_escalation = None
self._capabilities = None
self._privileged = None
self._proc_mount = None
self._read_only_root_filesystem = None
self._run_as_group = None
self._run_as_non_root = None
self._run_as_user = None
self._se_linux_options = None
self.discriminator = None
if allow_privilege_escalation is not None:
self.allow_privilege_escalation = allow_privilege_escalation
if capabilities is not None:
self.capabilities = capabilities
if privileged is not None:
self.privileged = privileged
if proc_mount is not None:
self.proc_mount = proc_mount
if read_only_root_filesystem is not None:
self.read_only_root_filesystem = read_only_root_filesystem
if run_as_group is not None:
self.run_as_group = run_as_group
if run_as_non_root is not None:
self.run_as_non_root = run_as_non_root
if run_as_user is not None:
self.run_as_user = run_as_user
if se_linux_options is not None:
self.se_linux_options = se_linux_options
@property
def allow_privilege_escalation(self):
"""
Gets the allow_privilege_escalation of this V1SecurityContext.
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:return: The allow_privilege_escalation of this V1SecurityContext.
:rtype: bool
"""
return self._allow_privilege_escalation
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, allow_privilege_escalation):
"""
Sets the allow_privilege_escalation of this V1SecurityContext.
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param allow_privilege_escalation: The allow_privilege_escalation of this V1SecurityContext.
:type: bool
"""
self._allow_privilege_escalation = allow_privilege_escalation
@property
def capabilities(self):
"""
Gets the capabilities of this V1SecurityContext.
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:return: The capabilities of this V1SecurityContext.
:rtype: V1Capabilities
"""
return self._capabilities
@capabilities.setter
def capabilities(self, capabilities):
"""
Sets the capabilities of this V1SecurityContext.
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param capabilities: The capabilities of this V1SecurityContext.
:type: V1Capabilities
"""
self._capabilities = capabilities
@property
def privileged(self):
"""
Gets the privileged of this V1SecurityContext.
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:return: The privileged of this V1SecurityContext.
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""
Sets the privileged of this V1SecurityContext.
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param privileged: The privileged of this V1SecurityContext.
:type: bool
"""
self._privileged = privileged
@property
def proc_mount(self):
"""
Gets the proc_mount of this V1SecurityContext.
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:return: The proc_mount of this V1SecurityContext.
:rtype: str
"""
return self._proc_mount
@proc_mount.setter
def proc_mount(self, proc_mount):
"""
Sets the proc_mount of this V1SecurityContext.
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param proc_mount: The proc_mount of this V1SecurityContext.
:type: str
"""
self._proc_mount = proc_mount
@property
def read_only_root_filesystem(self):
"""
Gets the read_only_root_filesystem of this V1SecurityContext.
Whether this container has a read-only root filesystem. Default is false.
:return: The read_only_root_filesystem of this V1SecurityContext.
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""
Sets the read_only_root_filesystem of this V1SecurityContext.
Whether this container has a read-only root filesystem. Default is false.
:param read_only_root_filesystem: The read_only_root_filesystem of this V1SecurityContext.
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
@property
def run_as_group(self):
"""
Gets the run_as_group of this V1SecurityContext.
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_group of this V1SecurityContext.
:rtype: int
"""
return self._run_as_group
@run_as_group.setter
def run_as_group(self, run_as_group):
"""
Sets the run_as_group of this V1SecurityContext.
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_group: The run_as_group of this V1SecurityContext.
:type: int
"""
self._run_as_group = run_as_group
@property
def run_as_non_root(self):
"""
Gets the run_as_non_root of this V1SecurityContext.
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_non_root of this V1SecurityContext.
:rtype: bool
"""
return self._run_as_non_root
@run_as_non_root.setter
def run_as_non_root(self, run_as_non_root):
"""
Sets the run_as_non_root of this V1SecurityContext.
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_non_root: The run_as_non_root of this V1SecurityContext.
:type: bool
"""
self._run_as_non_root = run_as_non_root
@property
def run_as_user(self):
"""
Gets the run_as_user of this V1SecurityContext.
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The run_as_user of this V1SecurityContext.
:rtype: int
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""
Sets the run_as_user of this V1SecurityContext.
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param run_as_user: The run_as_user of this V1SecurityContext.
:type: int
"""
self._run_as_user = run_as_user
@property
def se_linux_options(self):
"""
Gets the se_linux_options of this V1SecurityContext.
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:return: The se_linux_options of this V1SecurityContext.
:rtype: V1SELinuxOptions
"""
return self._se_linux_options
@se_linux_options.setter
def se_linux_options(self, se_linux_options):
"""
Sets the se_linux_options of this V1SecurityContext.
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param se_linux_options: The se_linux_options of this V1SecurityContext.
:type: V1SELinuxOptions
"""
self._se_linux_options = se_linux_options
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1SecurityContext):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"student@workstation.lab.example.com"
] | student@workstation.lab.example.com |
e443ede8da949150803c68a4b70b2b62737d5908 | 3983bcdc11716c59a58bc0d14e23e1042297679d | /classes.py | e63789b8c286f77524e36dd17affaf6b0f219a43 | [] | no_license | Suman-1/learn-git- | 5534aa67961afd8cfb7101457933f2fd303e8f6a | a2942b2cd6b6339f9dae6e17a0d8870fa2f1575d | refs/heads/master | 2021-06-29T00:31:39.311634 | 2017-09-20T04:47:37 | 2017-09-20T04:47:37 | 104,163,896 | 0 | 0 | null | 2017-09-20T04:47:37 | 2017-09-20T04:05:02 | Python | UTF-8 | Python | false | false | 790 | py | students = []
class Student:
school_name = "New Summit College"
def __init__(self, name, student_id=332):
self.name = name
self.student_id = student_id
students.append(self)
def __str__(self):
return "Student" + self.name
def get_name_capitalize(self):
return self.name.capitalize()
def get_school_name(self):
return self.school_name
#mark = Student('mark')
# print(mark)
# print(Student.school_name)
class HighSchoolStudent(Student):
"""docstring for HighSchoolStudent"""
Highschool_name = "New summit High School"
def get_Highschool_name(self):
return 'This is high school student'
def get_name_capitalize(self):
hari = HighSchoolStudent('hari')
print(hari.get_Highschool_name())
| [
"suman33.sb@gmail.com"
] | suman33.sb@gmail.com |
5bcc2f9b22961570a666d8d96e07b0b6074ec6fe | e9f9a0a10fe5da02613f8662e2eaf82ee75ac2ef | /VerificadorLexer.py | 6638b0e23064583cdc9c1229074afa23295e92da | [] | no_license | SantiJimenez/ExpresionesRegularesLex | dbaaff82ef3e7da0f8690c77400352b45d9a6d5f | 79a55cae575c01377303d62f27b5e6e4cee3da9b | refs/heads/master | 2021-07-13T01:28:59.976876 | 2017-10-16T23:50:38 | 2017-10-16T23:50:38 | 106,971,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import sys
import ply.lex as lex
tokens = [ 'NAME','NUMBER','PLUS','MINUS','TIMES','DIVIDE', 'EQUALS' ]
t_ignore = ' \t'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
t.value = (t.value)
return t
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
sys.exit(1)
lex.lex() # Build the lexer
def verificarCadena(cadena):
listaCaracteres = []
lex.input(cadena)
while True:
tok = lex.token()
if tok != None:
listaCaracteres.append(tok.value)
if not tok:
break
print str(tok.value) + " - " + str(tok.type)
return listaCaracteres | [
"santijimenezbonilla@gmail.com"
] | santijimenezbonilla@gmail.com |
4a2ad827b35462efaa2c4028162422314a8ffc82 | f6ad34f1eed97340f796ea083a71e6e2d38a3d26 | /src/libs/lwip/SConstruct | 8b06b31aca08f93f63e7ee4ace2da74fba04e072 | [] | no_license | gz/aos10 | 606abb223563c4f6df6f163c07b0290ab2d95795 | b204e8fc29860ce03155a08f7e8d8748180a4f14 | refs/heads/master | 2020-03-26T02:38:11.918982 | 2011-02-10T12:39:06 | 2011-02-10T12:39:06 | 1,464,801 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 899 | Import("env")
public_headers = ["#libs/lwip/include", "#libs/lwip/include/ipv4"]
srccorelist = """core/inet.c core/ipv4/icmp.c core/ipv4/ip.c core/ipv4/ip_addr.c
core/mem.c core/memp.c core/netif.c core/pbuf.c core/stats.c
core/stats.c core/sys.c core/tcp.c core/tcp_input.c
core/tcp_output.c core/tcp_pcb.c core/udp.c"""
srcotherlist = "netif/etharp.c sos/sosif.c"
liblist = "c ixp_osal ixp400_xscale_sw"
cppdefines = env["CPPDEFINES"] + ["LWIP_DEBUG", "l4aos"]
cpppath = env["CPPPATH"] + ["#sos"] # Grab sos headers
cc_warnings = env["CC_WARNINGS"] + ["no-redundant-decls", "no-format"]
lib = env.MyLibrary("lwip",
source = Split(srccorelist) + Split(srcotherlist),
public_headers = public_headers,
LIBS = Split(liblist),
CPPDEFINES = cppdefines,
CPPPATH = cpppath,
CC_WARNINGS = cc_warnings)
Return("lib")
# vim: filetype=python
| [
"mail@gerdzellweger.com"
] | mail@gerdzellweger.com | |
61fe97d3889316ac2e1c1db3d2950d8f741a2f9b | 9f3a6caf1f80f48c8a06850348da44ff3666ad06 | /Python programs/lab13.py | 8f016f7ffcf6e392513389c47c412d503d7fcbad | [] | no_license | Coadiey/Python-Projects | f507a9d07c83638ad779ee36c1139c4f7ce860ea | f519531ee874d58c1fd7742257074a24fc42140a | refs/heads/master | 2020-03-20T23:29:39.256613 | 2018-09-23T13:49:32 | 2018-09-23T13:49:32 | 137,848,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | # Lab #13
# Coadiey Bryan/C00039405
# Lecture Section #3
# create an empty list to store the key for the test
key = []
print()
print("Grading Key: ")
# open the file of test key answers
infile = open("key.py", "r")
# read 20 characters from the file
for i in range(20):
letter = infile.readline().strip()
# append that letter to the test key list
key.append(letter)
# print your key list, all on one line
print(key)
print()
# close the file of test key answers
infile.close()
# open the file of student answers
infile = open("studentAnswers.py", "r")
print("Student Grades: ")
# read 5 student answer sets
for i in range(5):
# read an answer line
answer = infile.readline().strip()
# process a student's 20 answers, look for matches with key list
count = 0
for i in range(20):
if key[i] == answer[i]:
count += 1
else:
count +=0
# print their grade
print("Grade =", count, "/20")
# close the student answers file
infile.close() | [
"noreply@github.com"
] | noreply@github.com |
a57be1ccd70d379bb3f5a52585486e4ae5826738 | a141feed9b22cfe4f10b3008aed8311a64129113 | /ShapNet/utils/named_tensor_utils/private_methods.py | cdee94c75f6f3841fbc74e95c7ad793864e83b03 | [
"MIT"
] | permissive | Tzq2doc/ShapleyExplanationNetworks | 3f8dd34cee380701cc8bf74fbedd43cc7ff54c8c | cab6644677894f0ac88610d2f9cfca239068f403 | refs/heads/main | 2023-02-18T03:22:45.309584 | 2021-01-16T09:15:36 | 2021-01-16T09:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Rui Wang
# =============================================================================
"""
Basic methods to composite later
"""
# =============================================================================
# Imports
# =============================================================================
from typing import List, Tuple
import torch
# =============================================================================
# constants
# =============================================================================
TensorName = str # Just used to not confuse anyone
# =============================================================================
# functions
# =============================================================================
def dummy(any, *args, **kwargs):
"""
A dummy place holder callable
Args:
any (): anything
Returns:
input
"""
return any
def name_insert(names: Tuple[TensorName], index: int, name: TensorName,
*args, **kwargs) -> List[TensorName]:
"""
insert a name
Args:
names (): the names to insert into
index (): the index at which to insert the name
name (): the name to insert
Returns:
inserted name list
"""
names = list(names)
if index == -1:
index = len(names)
elif index < 0:
index = index + 1
names.insert(index, name)
return names
def add_dim(tensor: torch.Tensor, index: int,
*args, **kwargs) -> torch.Tensor:
"""
A wrapper function for torch.squeeze
Args:
tensor (): the tensor to unsqueeze
index (): the index of to unsqueeze
Returns:
The unsqueeze tensor
"""
return tensor.unsqueeze(index)
| [
"ruiwang1998@outlook.com"
] | ruiwang1998@outlook.com |
226c865167e07676ff6c7ae34795ab2942ead5d5 | 96ed412ec58a110e765ec6a61cd81daded26aa5c | /main.py | 4f98ecdd78772bd4d362ffc1a9989875b0a55c40 | [] | no_license | abrohit/PythonAnywhere-Automation | 260591a8052499c46b820230aeb20ef7bd429860 | 3c67ebbeaa38e41c59e640f4c6f3f3eca47b8310 | refs/heads/main | 2023-04-25T19:40:56.578169 | 2021-06-09T07:20:05 | 2021-06-09T07:20:05 | 375,238,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | from dotenv import dotenv_values
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
class Automate():
#Initializes class, Params: email and password of user.
def __init__(self, email: str, password: str):
self.BASEURL = ''
self.options = Options()
self.options.add_argument("--start-maximized")#Maximizes the browser.
self.options.add_argument('disable-notifications')#Disables all notifcations.
self.options.add_argument('--disable-infobars')#Removes 'Controlled by bot' on Chrome.
self.options.add_argument('--headless')#Hides the browser.
self.driver = webdriver.Chrome('chromedriver.exe', options = self.options)#Path to driver + Initializes driver.
self.EMAIL = email
self.PASS = password
#Logs the user in.
def login(self):
self.driver.get('https://www.pythonanywhere.com/login/')#Gets login page.
self.driver.find_element_by_name('auth-username').send_keys(self.EMAIL)#Enters Email.
time.sleep(0.5)
self.driver.find_element_by_name('auth-password').send_keys(self.PASS)#Enters Password.
self.driver.find_element_by_id('id_next').click()#Clicks on login button.
time.sleep(1)
self.BASEURL = self.driver.current_url#Gets URL once logged in.
#Runs Webapp forever.
def webapp(self):
self.driver.get(self.BASEURL + 'webapps/')#Once logged in driver loads webapp page.
time.sleep(0.5)
self.driver.find_element_by_xpath("/html/body/div[1]/div[2]/div/div[2]/div/div/div[6]/div/div/div/form/input[2]").click()#Clicks on 'Run for 3 months' button.
#Runs the task forever.
def tasks(self):
self.driver.get(self.BASEURL + 'tasks_tab/')#Once logged in driver loads tasks page.
time.sleep(0.5)
self.driver.find_element_by_xpath("/html/body/div[1]/div[2]/div[2]/div[3]/div/div/table/tbody/tr/td[5]/button[4]").click()#Clicks on 'Extend Expiry' button.
#Runs the functions.
def run(self):
self.login()
self.webapp()
self.tasks()
#Quits the driver.
def quit(self):
self.driver.quit()
if __name__ == '__main__':
config = dotenv_values(".env")
webapp = Automate(config['email'], config['pass'])
webapp.run()
webapp.quit()
| [
"abrohit05@gmail.com"
] | abrohit05@gmail.com |
7186624e5d8aa50538719f321aff330cca21aa6a | ef43b82725b15afac2e3d815035b25a0641659eb | /data_prep/06_channel_extract_piano.py | 9bbf3e65372b55df0afb6b82649e35e53ad79540 | [] | no_license | nikolasborrel/dlmusic | 89d36ad817792372672f12e0904e452c4034351a | 6041eff54d51fcea198fe2a1c244f3cbc5bfa086 | refs/heads/main | 2023-02-16T06:28:59.037437 | 2021-01-14T09:57:49 | 2021-01-14T09:57:49 | 313,302,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,171 | py | """
Extract piano MIDI files. Some piano tracks are split into multiple separate
piano instruments, in which case we keep them split and merge them into
multiple MIDI files.
From "Music Generation with Magenta", filename chapter_06_example_06.py
"""
import argparse
import copy
import os
import random
import shutil
import timeit
from itertools import cycle
from multiprocessing import Manager
from multiprocessing.pool import Pool
from typing import List
from typing import Optional
import matplotlib.pyplot as plt
import tables
from pretty_midi import Instrument
from pretty_midi import PrettyMIDI
from lakh_utils import get_matched_midi_md5
from lakh_utils import get_midi_path
from lakh_utils import get_msd_score_matches
from lakh_utils import msd_id_to_h5
from multiprocessing_utils import AtomicCounter
parser = argparse.ArgumentParser()
parser.add_argument("--sample_size", type=int, default=1000)
parser.add_argument("--pool_size", type=int, default=4)
parser.add_argument("--path_dataset_dir", type=str, required=True)
parser.add_argument("--path_match_scores_file", type=str, required=True)
parser.add_argument("--path_output_dir", type=str, required=True)
args = parser.parse_args()
# The list of all MSD ids (we might process only a sample)
MSD_SCORE_MATCHES = get_msd_score_matches(args.path_match_scores_file)
PIANO_PROGRAMS = list(range(0, 8))
def extract_pianos(msd_id: str) -> List[PrettyMIDI]:
"""
Extracts a list of PrettyMIDI instance of all the separate piano tracks
from the given MSD id.
:param msd_id: the MSD id
:return: the list of PrettyMIDI instances of the separate piano tracks
"""
os.makedirs(args.path_output_dir, exist_ok=True)
midi_md5 = get_matched_midi_md5(msd_id, MSD_SCORE_MATCHES)
midi_path = get_midi_path(msd_id, midi_md5, args.path_dataset_dir)
pm = PrettyMIDI(midi_path)
pm.instruments = [instrument for instrument in pm.instruments
if instrument.program in PIANO_PROGRAMS
and not instrument.is_drum]
pm_pianos = []
if len(pm.instruments) > 1:
for piano_instrument in pm.instruments:
pm_piano = copy.deepcopy(pm)
pm_piano_instrument = Instrument(program=piano_instrument.program)
pm_piano.instruments = [pm_piano_instrument]
for note in piano_instrument.notes:
pm_piano_instrument.notes.append(note)
pm_pianos.append(pm_piano)
else:
pm_pianos.append(pm)
for index, pm_piano in enumerate(pm_pianos):
if len(pm_piano.instruments) != 1:
raise Exception(f"Invalid number of piano {msd_id}: "
f"{len(pm_piano.instruments)}")
if pm_piano.get_end_time() > 1000:
raise Exception(f"Piano track too long {msd_id}: "
f"{pm_piano.get_end_time()}")
return pm_pianos
def process(msd_id: str, counter: AtomicCounter) -> Optional[dict]:
"""
Processes the given MSD id and increments the counter. The
method will call the extract_pianos method and write the resulting MIDI
files to disk.
:param msd_id: the MSD id to process
:param counter: the counter to increment
:return: the dictionary containing the MSD id and the PrettyMIDI pianos,
raises an exception if the file cannot be processed
"""
try:
with tables.open_file(msd_id_to_h5(msd_id, args.path_dataset_dir)) as h5:
pm_pianos = extract_pianos(msd_id)
for index, pm_piano in enumerate(pm_pianos):
pm_piano.write(os.path.join(args.path_output_dir,
f"{msd_id}_{index}.mid"))
return {"msd_id": msd_id, "pm_pianos": pm_pianos}
except Exception as e:
print(f"Exception during processing of {msd_id}: {e}")
finally:
counter.increment()
def app(msd_ids: List[str]):
start = timeit.default_timer()
# Cleanup the output directory
shutil.rmtree(args.path_output_dir, ignore_errors=True)
# Starts the threads
with Pool(args.pool_size) as pool:
manager = Manager()
counter = AtomicCounter(manager, len(msd_ids))
print("START")
results = pool.starmap(process, zip(msd_ids, cycle([counter])))
results = [result for result in results if result]
print("END")
results_percentage = len(results) / len(msd_ids) * 100
print(f"Number of tracks: {len(MSD_SCORE_MATCHES)}, "
f"number of tracks in sample: {len(msd_ids)}, "
f"number of results: {len(results)} "
f"({results_percentage:.2f}%)")
# Creates an histogram for the piano lengths
pm_pianos_list = [result["pm_pianos"] for result in results]
pm_piano_lengths = [pm_piano.get_end_time()
for pm_pianos in pm_pianos_list
for pm_piano in pm_pianos]
#plt.figure(num=None, figsize=(10, 8), dpi=500)
plt.hist(pm_piano_lengths, bins=100, color="darkmagenta")
plt.title('Piano lengths')
plt.ylabel('length (sec)')
plt.show()
stop = timeit.default_timer()
print("Time: ", stop - start)
if __name__ == "__main__":
if args.sample_size:
# Process a sample of it
MSD_IDS = random.sample(list(MSD_SCORE_MATCHES), args.sample_size)
else:
# Process all the dataset
MSD_IDS = list(MSD_SCORE_MATCHES)
app(MSD_IDS)
| [
"nibor@elektro.dtu.dk"
] | nibor@elektro.dtu.dk |
a954d3cf5bcbd5f4955cd6046b9d99f627473f01 | 27719c23049f2b53571f781c5b8e385243e2f752 | /pokemonStrengthChart.py | 93989230abd3a0819ebcd37403b7d3a333e08fd7 | [
"MIT"
] | permissive | importhuman/cmd_pokemon | 249f43c3e46e366913e2ac3910281e348b5e5c99 | 184da8b3bad67f07032baaac3f79bcb34aaa0eda | refs/heads/master | 2022-12-25T08:34:57.284178 | 2020-10-07T14:33:03 | 2020-10-07T14:33:03 | 300,285,672 | 0 | 0 | MIT | 2020-10-07T14:33:04 | 2020-10-01T13:16:18 | Python | UTF-8 | Python | false | false | 1,336 | py | pokemon_types = ['fire', 'water', 'grass', 'electric', 'flying', 'normal', 'ice', 'ground',
'poison', 'bug', 'rock', 'steel', 'ghost', 'dark', 'psychic']
typeAdantages = {
'fire': ['grass', 'steel', 'bug', 'ice'],
'water': ['fire', 'rock', 'ground'],
'grass': ['water', 'rock', 'ground'],
'electric': ['water', 'flying'],
'ice': ['grass', 'flying'],
'psychic': [None],
'ghost': ['psychic'],
'dark': ['psychic', 'ghost'],
'flying': ['bug', 'grass'],
'normal': [None],
'ground': ['fire', 'steel', 'rock', 'electric'],
'poison': ['grass'],
'steel': ['rock', 'ice'],
'bug': ['psychic', 'dark'],
'rock': ['ice', 'flying', 'fire', 'electric']
}
typeDisadantages = {
'fire': ['water', 'rock', 'ground'],
'water': ['grass', 'electric'],
'grass': ['fire', 'bug', 'flying', 'poison'],
'electric': ['rock', 'ground', 'steel'],
'ice': ['steel', 'rock', 'fire'],
'psychic': ['ghost', 'dark', 'bug'],
'ghost': ['dark'],
'dark': ['bug'],
'flying': ['electric', 'ice', 'rock'],
'normal': [None],
'ground': ['water', 'grass', 'bug', 'ice'],
'poison': ['ground', 'psychic'],
'steel': ['fire', 'ground'],
'bug': ['fire', 'flying', 'rock'],
'rock': ['water', 'grass', 'steel']
} | [
"noreply@github.com"
] | noreply@github.com |
0a6018e764277307ff25e5c7a6a56f1c5fdf6781 | 59b12d062154cf81d745a88defb8d5c77636a343 | /venv/Lib/site-packages/sqlalchemy/__init__.py | ddcf8e1add95a7313dbcac165e2bd5313f16b1d6 | [] | no_license | PyGrameWu/LocalNewsForTour | 4208877d1f914793648d9c59130336a446dd7235 | 38580f806b9dd0c85e928f485597bafb556ff529 | refs/heads/master | 2023-07-11T16:36:45.809100 | 2021-08-14T13:59:14 | 2021-08-14T13:59:14 | 395,945,435 | 23 | 10 | null | null | null | null | UTF-8 | Python | false | false | 4,085 | py | # sqlalchemy/__init__.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from . import util as _util
from .engine import create_engine
from .engine import create_mock_engine
from .engine import engine_from_config
from .inspection import inspect
from .schema import BLANK_SCHEMA
from .schema import CheckConstraint
from .schema import Column
from .schema import ColumnDefault
from .schema import Computed
from .schema import Constraint
from .schema import DDL
from .schema import DefaultClause
from .schema import FetchedValue
from .schema import ForeignKey
from .schema import ForeignKeyConstraint
from .schema import Identity
from .schema import Index
from .schema import MetaData
from .schema import PrimaryKeyConstraint
from .schema import Sequence
from .schema import Table
from .schema import ThreadLocalMetaData
from .schema import UniqueConstraint
from .sql import alias
from .sql import all_
from .sql import and_
from .sql import any_
from .sql import asc
from .sql import between
from .sql import bindparam
from .sql import case
from .sql import cast
from .sql import collate
from .sql import column
from .sql import delete
from .sql import desc
from .sql import distinct
from .sql import except_
from .sql import except_all
from .sql import exists
from .sql import extract
from .sql import false
from .sql import func
from .sql import funcfilter
from .sql import insert
from .sql import intersect
from .sql import intersect_all
from .sql import join
from .sql import LABEL_STYLE_DEFAULT
from .sql import LABEL_STYLE_DISAMBIGUATE_ONLY
from .sql import LABEL_STYLE_NONE
from .sql import LABEL_STYLE_TABLENAME_PLUS_COL
from .sql import lambda_stmt
from .sql import lateral
from .sql import literal
from .sql import literal_column
from .sql import modifier
from .sql import not_
from .sql import null
from .sql import nulls_first
from .sql import nulls_last
from .sql import nullsfirst
from .sql import nullslast
from .sql import or_
from .sql import outerjoin
from .sql import outparam
from .sql import over
from .sql import select
from .sql import subquery
from .sql import table
from .sql import tablesample
from .sql import text
from .sql import true
from .sql import tuple_
from .sql import type_coerce
from .sql import union
from .sql import union_all
from .sql import update
from .sql import values
from .sql import within_group
from .types import ARRAY
from .types import BIGINT
from .types import BigInteger
from .types import BINARY
from .types import BLOB
from .types import BOOLEAN
from .types import Boolean
from .types import CHAR
from .types import CLOB
from .types import DATE
from .types import Date
from .types import DATETIME
from .types import DateTime
from .types import DECIMAL
from .types import Enum
from .types import FLOAT
from .types import Float
from .types import INT
from .types import INTEGER
from .types import Integer
from .types import Interval
from .types import JSON
from .types import LargeBinary
from .types import NCHAR
from .types import NUMERIC
from .types import Numeric
from .types import NVARCHAR
from .types import PickleType
from .types import REAL
from .types import SMALLINT
from .types import SmallInteger
from .types import String
from .types import TEXT
from .types import Text
from .types import TIME
from .types import Time
from .types import TIMESTAMP
from .types import TypeDecorator
from .types import Unicode
from .types import UnicodeText
from .types import VARBINARY
from .types import VARCHAR
__version__ = "1.4.22"
def __go(lcls):
global __all__
from . import events
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
_sa_util.preloaded.import_prefix("sqlalchemy")
from . import exc
exc._version_token = "".join(__version__.split(".")[0:2])
__go(locals())
| [
"549073688@qq.com"
] | 549073688@qq.com |
a231312ca77fdeac22c60185fe25bd5d22a51476 | 57cc185c3f08d6f76390f6e444e47ff1e9e630a8 | /pic.py | a32b15c1bbcd9fddd7c5e971090f67df26f3e823 | [] | no_license | VDashenka/hole-boring | 81a23cac203a4635c9c3d7ba36c338258c028245 | 121bc262f43f720f28355e71cfc5d3f0dbcb095d | refs/heads/master | 2022-12-25T17:22:58.323971 | 2020-10-06T09:53:03 | 2020-10-06T09:53:03 | 298,557,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
from scipy.integrate import solve_ivp
from os import listdir
from os.path import isfile, join
# In[2]:
def full_path(data_name, n):
return '/home/dasha/picador/1.0rc1/bin/m=1/n5a25/BasicOutput/data/{:s}/{:06d}.bin'.format(data_name, n)
def load2DfromFile(dataname, f, ny):
fd = open(full_path(dataname, f), 'rb')
a = np.frombuffer(fd.read(), dtype=np.single)
fd.close()
return np.reshape(a, (ny, -1))
def files_in_dir(mypath):
files = [file for file in listdir(mypath) if isfile(join(mypath,file))]
return files
t = 20
m_e = 9.1/10**31
e = 1.6/10**19
c = 3*10**8
m_i = m_e*1836
x = np.linspace(-2,10,600)
# ,data.shape[1]) #cm
p = np.linspace(-3.40581e-14,3.40581e-14,200) #г*см/с
data3 = load2DfromFile('electronphase',t,200)
data3 = load2DfromFile('ionphase',t,200)
x,y = np.meshgrid(x,p/m_i/c*1e-5)
ax = plt.contourf(x,y,data3,
cmap = 'magma',
levels = np.arange(0.005e16,2.5e16,0.1e16))
plt.colorbar(ax)
# plt.xlim(-1,10)
plt.ylim(-0.3,0.5)
plt.xlabel("$x$, $\mu$m")
plt.ylabel("$p_x/m_ic$")
# plt.savefig('fp_t20n5a25.pdf')
# In[9]:
def load2DfromFile(dataname, f, ny):
fd = open(full_path(dataname, f), 'rb')
a = np.frombuffer(fd.read(), dtype=np.single)
fd.close()
return(a)
t = 35
data = load2DfromFile('Ey',t,200)
x = np.linspace(-2,10,600)
p = np.linspace(-3.40581e-14,3.40581e-14,200)
data = np.array(data)
plt.plot(x,data,linewidth=0.7)
data1 = load2DfromFile('Ex',t,200)
plt.plot(x,data1,linewidth=0.7)
n0 = 20
matrixsize_x = 600
x_max = 10/10**4
x_min = -2/10**4
Delta_x = (x_max-x_min)/matrixsize_x
n_cr = 1.1e21
ne = load2DfromFile('Electron1D',t,200)
n = ne/Delta_x/n_cr/n0
# plt.plot(x,n,linewidth=0.5,c='magenta')
# plt.ylim(0,55)
ni = load2DfromFile('Ion1D',t,200)
ni = ni/Delta_x/n_cr/n0
# plt.plot(x,ni,linewidth=0.5,c='b')
plt.xlim(-1,10)
plt.xlabel("$x$, $\mu$m")
plt.legend(("$E_y$","$E_x$"))
plt.savefig('pic_t35n5a25_.pdf')
# print(len(data))
| [
"darya_voytovich@mail.ru"
] | darya_voytovich@mail.ru |
17fc350a8bce1e34248cf2065c2b4990e338e9be | f6c7910213d97bd7a24be399f8475c11648732c4 | /build/lib/rqalpha/data/dtsk_python_interface/tsk.py | 5e7b90273994e6d9a6606f4388c509ce028186d8 | [
"Apache-2.0"
] | permissive | kinglogxzl/rqalpha | 8c25a2c5ecb04639e0a6bd3765d376759bb98889 | 6203803e0fb130fbb5a280ee8e1b902a8c0fd731 | refs/heads/master | 2021-01-11T14:17:00.459069 | 2017-02-22T07:21:24 | 2017-02-22T07:21:24 | 81,298,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | import utility.ascii
import tdtsk
import h5py
import numpy as np
import xarray as xr
def get_key_string_list(h5_file_content, dimension_name):
ascii_arr = h5_file_content[dimension_name][()]
string_list = []
for current_ascii in ascii_arr.tolist():
string_list.append(utility.ascii.ascii_to_string(current_ascii))
return string_list
def get_key_ascii_array_list(tsk, dimension_name):
ascii_solid_len = np.nan
if dimension_name == "KEY":
ascii_solid_len = 128
if dimension_name == "SYMBOL":
ascii_solid_len = 20
if dimension_name == "TIME":
ascii_solid_len = 8
key_string_list = tsk.coords[dimension_name].values
key_ascii_array_list = utility.ascii.parse_ascii_array_list_from_string_list(\
key_string_list, ascii_solid_len)
return key_ascii_array_list
def load(h5_file_path, kline_type, local_symbol_list):
'''
kline_type is formatted as {int}_{time_unit}
local symbol list is read from 2 local files,
time/symbol/key list is read from h5 files.
'''
# Load h5 file
h5_file_content = h5py.File(h5_file_path, 'r')
# Load dimension names from h5_file content
time_list = get_key_string_list(h5_file_content, \
tdtsk.get_kline_type_labal(kline_type))
symbol_list = get_key_string_list(h5_file_content, "SYMBOL")
key_list = get_key_string_list(h5_file_content, "KEY")
# Load raw data from h5_file content
labal = tdtsk.get_kline_type_labal(kline_type) + '_DTSK'
dataset = h5_file_content[labal]
tsk_ndarray = np.asarray(dataset).reshape(len(time_list), len(symbol_list), len(key_list))
tsk_xarray = xr.DataArray(tsk_ndarray, \
coords=[time_list, symbol_list, key_list],\
dims=['TIME', 'SYMBOL', 'KEY'])
# Convert raw data to real dtsk that fit local_symbol_list
symbol_set = set(symbol_list)
tsk_ndarray_symbol_nan = np.ndarray(shape=(len(time_list), 1, len(key_list))) * np.nan
tsk_select_ndarray = np.ndarray(shape=\
(len(time_list), len(local_symbol_list), len(key_list)))
for index, sym in enumerate(local_symbol_list):
if sym in symbol_set:
tsk_select_ndarray[:, index:index + 1, :] = \
tsk_xarray.loc[:, sym, :].values.reshape(len(time_list), 1, len(key_list))
else:
tsk_select_ndarray[:, index:index + 1, :] = tsk_ndarray_symbol_nan
eps = 1e-5
tsk_select_ndarray[abs(tsk_select_ndarray + 2e10) <= eps] = np.nan
tsk_select_xarray = xr.DataArray(tsk_select_ndarray, \
coords=[time_list, local_symbol_list, key_list], \
dims=['TIME', 'SYMBOL', 'KEY'])
return tsk_select_xarray
def save(h5_file_path, tsk, kline_type):
''' Not sure whether the data is correct '''
time_list = get_key_ascii_array_list(tsk, 'TIME')
symbol_list = get_key_ascii_array_list(tsk, "SYMBOL")
key_list = get_key_ascii_array_list(tsk, "KEY")
file = h5py.File(h5_file_path, 'w')
file[tdtsk.get_kline_type_labal(kline_type)] = time_list
file["SYMBOL"] = symbol_list
file["KEY"] = key_list
file[tdtsk.get_kline_type_labal(kline_type)+'_DTSK'] = tsk
''' Not sure whether the tsk is correct '''
file.close()
| [
"391275002@qq.com"
] | 391275002@qq.com |
f8fccfa10aaf61b927be76184af448a1b5c565f6 | 75fa11b13ddab8fd987428376f5d9c42dff0ba44 | /metadata-ingestion/tests/integration/snowflake/common.py | 43f5e04fbc89fcd2cb4b24d5cc32c9cf6600679d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | RyanHolstien/datahub | 163d0ff6b4636919ed223ee63a27cba6db2d0156 | 8cf299aeb43fa95afb22fefbc7728117c727f0b3 | refs/heads/master | 2023-09-04T10:59:12.931758 | 2023-08-21T18:33:10 | 2023-08-21T18:33:10 | 246,685,891 | 0 | 0 | Apache-2.0 | 2021-02-16T23:48:05 | 2020-03-11T21:43:58 | TypeScript | UTF-8 | Python | false | false | 22,993 | py | import json
from datetime import datetime, timezone
from datahub.configuration.time_window_config import BucketDuration
from datahub.ingestion.source.snowflake import snowflake_query
from datahub.ingestion.source.snowflake.snowflake_query import SnowflakeQuery
NUM_TABLES = 10
NUM_VIEWS = 2
NUM_COLS = 10
NUM_OPS = 10
FROZEN_TIME = "2022-06-07 17:00:00"
def default_query_results( # noqa: C901
query,
num_tables=NUM_TABLES,
num_views=NUM_VIEWS,
num_cols=NUM_COLS,
num_ops=NUM_OPS,
):
if query == SnowflakeQuery.current_account():
return [{"CURRENT_ACCOUNT()": "ABC12345"}]
if query == SnowflakeQuery.current_region():
return [{"CURRENT_REGION()": "AWS_AP_SOUTH_1"}]
if query == SnowflakeQuery.show_tags():
return []
if query == SnowflakeQuery.current_role():
return [{"CURRENT_ROLE()": "TEST_ROLE"}]
elif query == SnowflakeQuery.current_version():
return [{"CURRENT_VERSION()": "X.Y.Z"}]
elif query == SnowflakeQuery.current_database():
return [{"CURRENT_DATABASE()": "TEST_DB"}]
elif query == SnowflakeQuery.current_schema():
return [{"CURRENT_SCHEMA()": "TEST_SCHEMA"}]
elif query == SnowflakeQuery.current_warehouse():
return [{"CURRENT_WAREHOUSE()": "TEST_WAREHOUSE"}]
elif query == SnowflakeQuery.show_databases():
return [
{
"name": "TEST_DB",
"created_on": datetime(2021, 6, 8, 0, 0, 0, 0),
"comment": "Comment for TEST_DB",
}
]
elif query == SnowflakeQuery.get_databases("TEST_DB"):
return [
{
"DATABASE_NAME": "TEST_DB",
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"COMMENT": "Comment for TEST_DB",
}
]
elif query == SnowflakeQuery.schemas_for_database("TEST_DB"):
return [
{
"SCHEMA_NAME": "TEST_SCHEMA",
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"COMMENT": "comment for TEST_DB.TEST_SCHEMA",
},
{
"SCHEMA_NAME": "TEST2_SCHEMA",
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"COMMENT": "comment for TEST_DB.TEST_SCHEMA",
},
]
elif query == SnowflakeQuery.tables_for_database("TEST_DB"):
raise Exception("Information schema query returned too much data")
elif query == SnowflakeQuery.show_views_for_database("TEST_DB"):
raise Exception("Information schema query returned too much data")
elif query == SnowflakeQuery.tables_for_schema("TEST_SCHEMA", "TEST_DB"):
return [
{
"TABLE_SCHEMA": "TEST_SCHEMA",
"TABLE_NAME": "TABLE_{}".format(tbl_idx),
"CREATED": datetime(2021, 6, 8, 0, 0, 0, 0),
"LAST_ALTERED": datetime(2021, 6, 8, 0, 0, 0, 0),
"BYTES": 1024,
"ROW_COUNT": 10000,
"COMMENT": "Comment for Table",
"CLUSTERING_KEY": None,
}
for tbl_idx in range(1, num_tables + 1)
]
elif query == SnowflakeQuery.show_views_for_schema("TEST_SCHEMA", "TEST_DB"):
return [
{
"schema_name": "TEST_SCHEMA",
"name": "VIEW_{}".format(view_idx),
"created_on": datetime(2021, 6, 8, 0, 0, 0, 0),
"comment": "Comment for View",
"text": None,
}
for view_idx in range(1, num_views + 1)
]
elif query == SnowflakeQuery.columns_for_schema("TEST_SCHEMA", "TEST_DB"):
raise Exception("Information schema query returned too much data")
elif query in [
*[
SnowflakeQuery.columns_for_table(
"TABLE_{}".format(tbl_idx), "TEST_SCHEMA", "TEST_DB"
)
for tbl_idx in range(1, num_tables + 1)
],
*[
SnowflakeQuery.columns_for_table(
"VIEW_{}".format(view_idx), "TEST_SCHEMA", "TEST_DB"
)
for view_idx in range(1, num_views + 1)
],
]:
return [
{
# "TABLE_CATALOG": "TEST_DB",
# "TABLE_SCHEMA": "TEST_SCHEMA",
# "TABLE_NAME": "TABLE_{}".format(tbl_idx),
"COLUMN_NAME": "COL_{}".format(col_idx),
"ORDINAL_POSITION": col_idx,
"IS_NULLABLE": "NO",
"DATA_TYPE": "TEXT" if col_idx > 1 else "NUMBER",
"COMMENT": "Comment for column",
"CHARACTER_MAXIMUM_LENGTH": 255 if col_idx > 1 else None,
"NUMERIC_PRECISION": None if col_idx > 1 else 38,
"NUMERIC_SCALE": None if col_idx > 1 else 0,
}
for col_idx in range(1, num_cols + 1)
]
elif query in (
SnowflakeQuery.use_database("TEST_DB"),
SnowflakeQuery.show_primary_keys_for_schema("TEST_SCHEMA", "TEST_DB"),
SnowflakeQuery.show_foreign_keys_for_schema("TEST_SCHEMA", "TEST_DB"),
):
return []
elif query == SnowflakeQuery.get_access_history_date_range():
return [
{
"MIN_TIME": datetime(2021, 6, 8, 0, 0, 0, 0),
"MAX_TIME": datetime(2022, 6, 7, 7, 17, 0, 0),
}
]
elif query == snowflake_query.SnowflakeQuery.operational_data_for_time_window(
1654473600000,
1654586220000,
):
return [
{
"QUERY_START_TIME": datetime(2022, 6, 2, 4, 41, 1, 367000).replace(
tzinfo=timezone.utc
),
"QUERY_TEXT": "create or replace table TABLE_{} as select * from TABLE_2 left join TABLE_3 using COL_1 left join TABLE 4 using COL2".format(
op_idx
),
"QUERY_TYPE": "CREATE_TABLE_AS_SELECT",
"ROWS_INSERTED": 0,
"ROWS_UPDATED": 0,
"ROWS_DELETED": 0,
"BASE_OBJECTS_ACCESSED": json.dumps(
[
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_3",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_4",
},
]
),
"DIRECT_OBJECTS_ACCESSED": json.dumps(
[
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_3",
},
{
"columns": [
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_4",
},
]
),
"OBJECTS_MODIFIED": json.dumps(
[
{
"columns": [
{
"columnId": 0,
"columnName": "COL_{}".format(col_idx),
"directSources": [
{
"columnName": "COL_{}".format(col_idx),
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
],
}
for col_idx in range(1, num_cols + 1)
],
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
}
]
),
"USER_NAME": "SERVICE_ACCOUNT_TESTS_ADMIN",
"FIRST_NAME": None,
"LAST_NAME": None,
"DISPLAY_NAME": "SERVICE_ACCOUNT_TESTS_ADMIN",
"EMAIL": "abc@xyz.com",
"ROLE_NAME": "ACCOUNTADMIN",
}
for op_idx in range(1, num_ops + 1)
]
elif (
query
== snowflake_query.SnowflakeQuery.usage_per_object_per_time_bucket_for_time_window(
1654473600000,
1654586220000,
use_base_objects=False,
top_n_queries=10,
include_top_n_queries=True,
time_bucket_size=BucketDuration.DAY,
)
):
return []
elif query in (
snowflake_query.SnowflakeQuery.table_to_table_lineage_history(
1654473600000,
1654586220000,
),
snowflake_query.SnowflakeQuery.table_to_table_lineage_history(
1654473600000, 1654586220000, False
),
):
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
"UPSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_2",
"UPSTREAM_TABLE_COLUMNS": json.dumps(
[
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
]
),
"DOWNSTREAM_TABLE_COLUMNS": json.dumps(
[
{
"columnId": 0,
"columnName": "COL_{}".format(col_idx),
"directSources": [
{
"columnName": "COL_{}".format(col_idx),
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
],
}
for col_idx in range(1, num_cols + 1)
]
),
}
for op_idx in range(1, num_ops + 1)
] + [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_1",
"UPSTREAM_TABLE_NAME": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"UPSTREAM_TABLE_COLUMNS": json.dumps(
[{"columnId": 0, "columnName": "COL_1"}]
),
"DOWNSTREAM_TABLE_COLUMNS": json.dumps(
[
{
"columnId": 0,
"columnName": "COL_1",
"directSources": [
{
"columnName": "COL_1",
"objectDomain": "Table",
"objectId": 0,
"objectName": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
}
],
}
]
),
}
]
elif query in (
snowflake_query.SnowflakeQuery.table_to_table_lineage_history_v2(
start_time_millis=1654473600000,
end_time_millis=1654586220000,
include_view_lineage=True,
include_column_lineage=True,
),
):
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
"DOWNSTREAM_TABLE_DOMAIN": "TABLE",
"UPSTREAM_TABLES": json.dumps(
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"upstream_object_domain": "TABLE",
}
]
+ ( # This additional upstream is only for TABLE_1
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.VIEW_1",
"upstream_object_domain": "VIEW",
},
{
"upstream_object_name": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"upstream_object_domain": "TABLE",
},
]
if op_idx == 1
else []
)
),
"UPSTREAM_COLUMNS": json.dumps(
[
{
"column_name": "COL_{}".format(col_idx),
"upstreams": [
[
{
"object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"object_domain": "Table",
"column_name": "COL_{}".format(col_idx),
}
]
],
}
for col_idx in range(1, num_cols + 1)
]
+ ( # This additional upstream is only for TABLE_1
[
{
"column_name": "COL_1",
"upstreams": [
[
{
"object_name": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"object_domain": "Table",
"column_name": "COL_1",
}
]
],
}
]
if op_idx == 1
else []
)
),
}
for op_idx in range(1, num_ops + 1)
]
elif query in (
snowflake_query.SnowflakeQuery.table_to_table_lineage_history_v2(
start_time_millis=1654473600000,
end_time_millis=1654586220000,
include_view_lineage=False,
include_column_lineage=False,
),
):
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_{}".format(op_idx),
"DOWNSTREAM_TABLE_DOMAIN": "TABLE",
"UPSTREAM_TABLES": json.dumps(
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"upstream_object_domain": "TABLE",
},
]
+ ( # This additional upstream is only for TABLE_1
[
{
"upstream_object_name": "OTHER_DB.OTHER_SCHEMA.TABLE_1",
"upstream_object_domain": "TABLE",
},
]
if op_idx == 1
else []
)
),
}
for op_idx in range(1, num_ops + 1)
]
elif query == snowflake_query.SnowflakeQuery.external_table_lineage_history(
1654473600000,
1654586220000,
):
return []
elif query in [
snowflake_query.SnowflakeQuery.view_dependencies(),
]:
return [
{
"REFERENCED_OBJECT_DOMAIN": "table",
"REFERENCING_OBJECT_DOMAIN": "view",
"DOWNSTREAM_VIEW": "TEST_DB.TEST_SCHEMA.VIEW_2",
"VIEW_UPSTREAM": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
]
elif query in [
snowflake_query.SnowflakeQuery.view_dependencies_v2(),
]:
# VIEW_2 has dependency on TABLE_2
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.VIEW_2",
"DOWNSTREAM_TABLE_DOMAIN": "view",
"UPSTREAM_TABLES": json.dumps(
[
{
"upstream_object_name": "TEST_DB.TEST_SCHEMA.TABLE_2",
"upstream_object_domain": "table",
}
]
),
}
]
elif query in [
snowflake_query.SnowflakeQuery.view_lineage_history(
1654473600000,
1654586220000,
),
snowflake_query.SnowflakeQuery.view_lineage_history(
1654473600000, 1654586220000, False
),
]:
return [
{
"DOWNSTREAM_TABLE_NAME": "TEST_DB.TEST_SCHEMA.TABLE_1",
"VIEW_NAME": "TEST_DB.TEST_SCHEMA.VIEW_1",
"VIEW_DOMAIN": "VIEW",
"VIEW_COLUMNS": json.dumps(
[
{"columnId": 0, "columnName": "COL_{}".format(col_idx)}
for col_idx in range(1, num_cols + 1)
]
),
"DOWNSTREAM_TABLE_DOMAIN": "TABLE",
"DOWNSTREAM_TABLE_COLUMNS": json.dumps(
[
{
"columnId": 0,
"columnName": "COL_{}".format(col_idx),
"directSources": [
{
"columnName": "COL_{}".format(col_idx),
"objectDomain": "Table",
"objectId": 0,
"objectName": "TEST_DB.TEST_SCHEMA.TABLE_2",
}
],
}
for col_idx in range(1, num_cols + 1)
]
),
}
]
elif query in [
snowflake_query.SnowflakeQuery.external_table_lineage_history(
1654473600000,
1654586220000,
),
snowflake_query.SnowflakeQuery.view_dependencies_v2(),
snowflake_query.SnowflakeQuery.view_dependencies(),
snowflake_query.SnowflakeQuery.show_external_tables(),
]:
return []
elif (
query
== snowflake_query.SnowflakeQuery.get_all_tags_in_database_without_propagation(
"TEST_DB"
)
):
return [
*[
{
"TAG_DATABASE": "TEST_DB",
"TAG_SCHEMA": "TEST_SCHEMA",
"TAG_NAME": f"my_tag_{ix}",
"TAG_VALUE": f"my_value_{ix}",
"OBJECT_DATABASE": "TEST_DB",
"OBJECT_SCHEMA": "TEST_SCHEMA",
"OBJECT_NAME": "VIEW_2",
"COLUMN_NAME": None,
"DOMAIN": "TABLE",
}
for ix in range(3)
],
{
"TAG_DATABASE": "TEST_DB",
"TAG_SCHEMA": "TEST_SCHEMA",
"TAG_NAME": "security",
"TAG_VALUE": "pii",
"OBJECT_DATABASE": "TEST_DB",
"OBJECT_SCHEMA": "TEST_SCHEMA",
"OBJECT_NAME": "VIEW_1",
"COLUMN_NAME": "COL_1",
"DOMAIN": "COLUMN",
},
{
"TAG_DATABASE": "OTHER_DB",
"TAG_SCHEMA": "OTHER_SCHEMA",
"TAG_NAME": "my_other_tag",
"TAG_VALUE": "other",
"OBJECT_DATABASE": "TEST_DB",
"OBJECT_SCHEMA": None,
"OBJECT_NAME": "TEST_SCHEMA",
"COLUMN_NAME": None,
"DOMAIN": "SCHEMA",
},
{
"TAG_DATABASE": "OTHER_DB",
"TAG_SCHEMA": "OTHER_SCHEMA",
"TAG_NAME": "my_other_tag",
"TAG_VALUE": "other",
"OBJECT_DATABASE": None,
"OBJECT_SCHEMA": None,
"OBJECT_NAME": "TEST_DB",
"COLUMN_NAME": None,
"DOMAIN": "DATABASE",
},
]
# Unreachable code
raise Exception(f"Unknown query {query}")
| [
"noreply@github.com"
] | noreply@github.com |
c36baf9647e0ce044c5d388d9bed6008c9b426f3 | 2c64663fb368230d0deaa2bfb72ec33e6af8b12b | /models/notify_type_model.py | 84e01fa7f58af279ea392e57279222472256bc62 | [] | no_license | cncg51/post_bar | 8da4cb76dc6f03d11645d3270a6f6c2264fc77fe | 8981f9721af5ca5ecda2c4d5723123833e284b85 | refs/heads/master | 2020-04-01T04:20:42.347831 | 2018-10-13T16:01:36 | 2018-10-13T16:01:36 | 152,859,344 | 0 | 0 | null | 2018-10-13T10:08:37 | 2018-10-13T10:08:37 | null | UTF-8 | Python | false | false | 186 | py | # coding: utf8
__metaclass__ = type
from models.model import *
class notify_type_model(model):
def __init__(self):
super(notify_type_model, self).__init__('notify_type') | [
"rabbitzhang52@gmail.com"
] | rabbitzhang52@gmail.com |
0b90df7dbd721ecc641998896bff6d7087d4c28c | ac0a583e4765f2b5b97e898f30d6df0fc71ea8f6 | /pyros_msgs/opt_as_nested/__init__.py | 4beab2c01e9a05fe2344eb3a0f0e64941a108eae | [
"MIT"
] | permissive | pyros-dev/pyros-msgs | 5ce9efaa246ffa94396552fd6034c0eeacddeb76 | 28d9d6aa3cfbb42d154360f16eea1900be518f74 | refs/heads/master | 2022-07-06T15:53:16.764600 | 2018-02-17T15:03:36 | 2018-02-17T15:03:36 | 67,676,303 | 1 | 3 | MIT | 2022-06-21T21:19:34 | 2016-09-08T06:45:37 | Python | UTF-8 | Python | false | false | 347 | py | from __future__ import absolute_import
from __future__ import print_function
"""
pyros_msgs.opt_as_nested is a module that declares optional fields as a specific message type.
This is useful if you want to express an optional field in a message without any ambiguity.
"""
from .opt_as_nested import duck_punch
__all__ = [
'duck_punch',
] | [
"asmodehn@gmail.com"
] | asmodehn@gmail.com |
d0dade868cb00ef5e103594ae46c0d072fcbd126 | e94d22cdb7c73b8a55262d5a6c2c7b0d75f3b63e | /snussum/analytics/management/commands/createanalytics.py | 6aae31665b387f93092ff96a50482be7c680c3e8 | [] | no_license | dobestan/snussum | 594d1169cc6a0a799c8104135dc028d65a3967d0 | 4f1f092a4c5cebd913a64c5a0d7f12b3e061552f | refs/heads/master | 2021-01-18T18:25:00.237448 | 2015-06-01T06:03:29 | 2015-06-01T06:03:29 | 34,576,643 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from django.core.management.base import BaseCommand, CommandError
from analytics.models.demographic import Demographic
class Command(BaseCommand):
help = "Create Analytics Data"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
demograhic = Demographic.objects.create_analytics()
self.stdout.write('Successfully created demograhic analytics data ...')
| [
"dobestan@gmail.com"
] | dobestan@gmail.com |
d3783b55fc2833796d674fbf66cad05b5c93c4cf | f3ff4033692811e0c90af7c7bbbc7542d747fc3c | /python_analysis/mulit_stock_df.py | 52a5428c94d32e9b4d2235ff4b474692efe7721b | [] | no_license | barrard/alpaca_data | de72f2d68bb4c5cd2652fe51360c9798546c4394 | 6c1177b40ac08e1de434c231efc2919457eb9520 | refs/heads/master | 2022-12-20T12:55:35.368171 | 2019-09-06T07:28:33 | 2019-09-06T07:28:33 | 193,910,711 | 1 | 0 | null | 2022-12-09T07:49:31 | 2019-06-26T13:33:17 | JavaScript | UTF-8 | Python | false | false | 2,288 | py | import pandas as pd
import matplotlib.pyplot as plt
symbols = [ 'BAC', 'IBM', 'MSFT', 'AAPL', 'F', 'GE', 'INTC']
# def adjust_dates():#No need any more
# for symbol in symbols:
# df = load_data(symbol)
# print(df.head())
# time = df['t']*1000
# df.drop('t', axis=1, inplace=True)
# df['t'] = time
# print(df.head())
# save_csv(df, symbol)
def save_csv(data, symbol):
data.to_csv('../data/{}/15MIN_{}.csv'.format(symbol, symbol), index=False)
return True
def load_data_col(symbol, col):
print('Loading {} column {}'.format(symbol, col))
df = pd.read_csv('../data/{}/15MIN_{}.csv'.format(symbol, symbol),
usecols=['t','c', 'v'],
index_col='t')
# df.index = pd.to_datetime(df.index,unit='ms')
df = df.rename(columns={'c':symbol, 'v':'{}_vol'.format(symbol)})
# print(df)
return df
def load_data(symbol):
df = pd.read_csv('../data/{}/15MIN_{}.csv'.format(symbol, symbol), index_col='t')
# df = df.rename(columns={'c':symbol})
# df.set_index('t', inplace=True)
return df
# Get reference via SPY
def get_SPY_refrence():
df = load_data_col('SPY', 'c')
return df
def create_main_df(symbols):
df = get_SPY_refrence()
for symbol in symbols:
df = df.join(load_data_col(symbol, 'c'))
# print(df)
# df.plot()
# plt.show()
return df
def plot_close_symbol(symbol):
df = pd.read_csv('../data/{}/15MIN_{}.csv'.format(symbol, symbol))
df['c'].plot()
plt.show()
def plot_open_close_symbol(symbol):
df = pd.read_csv('../data/{}/15MIN_{}.csv'.format(symbol, symbol))
df[['h','l']][:100].plot()
plt.show()
# USe SPY as a refrence
# df = create_main_df(symbols)
# print(len(df))
# df = df.loc[~df.index.duplicated(keep='first')]
# print(len(df))
# print(df[:15])
# why not just write this file..
# df.to_csv('./main_df.csv')
def plot_data(title="Stock Prices"):
df = pd.read_csv('main_df.csv', index_col='t')
df.index = pd.to_datetime(df.index,unit='ms')
df = df/df.iloc[0, :]
ax = df[symbols].plot(title=title, fontsize=12)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
plot_data()
| [
"barrard@gmail.com"
] | barrard@gmail.com |
b63fdac6fd07e00fa254f5ef1bc16fd767aecde5 | 593d1deeb8483c71fdb7a157ed0584a90ffc2b3c | /setup.py | 8142df92999635e7793934373f7aff9f27f31ed7 | [] | no_license | ValvePython/dota2-wrappers | 7c2851543a9b219c393e3adf69bf3fcb932c41ca | 249bc22a4bb0a308d50cc5745a55e84a1cd15b64 | refs/heads/master | 2021-01-18T08:09:26.775764 | 2015-06-01T20:48:56 | 2015-06-01T20:48:56 | 36,687,968 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | #!/usr/bin/env python
from setuptools import setup
from codecs import open
from os import path
from dota2 import __version__
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dota2',
version=__version__,
description='Provies wrapper objects for working with Dota2',
long_description=long_description,
url='https://github.com/ValvePython/dota2',
author='Rossen Georgiev',
author_email='hello@rgp.io',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing ',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
keywords='valve dota dota2 stats statistics',
packages=['dota2'],
install_requires=[
"vpk",
"vdf",
],
zip_safe=True,
)
| [
"zoorty@gmail.com"
] | zoorty@gmail.com |
7a2d4ef049bce7d2db3ab5b46eec04be0b136a9e | 63d52137a68d500efffd45d0256cd480da05f8e0 | /draw2.py | 08fbfcbe4c07f5fec38dcced3861c1325e6cfbc5 | [] | no_license | TheArle/pong | fdcf6a73179ca74185169f0e56ddc4dc54107a3f | 1474be4cd5f020964494f460bce0f9b79e9bcd47 | refs/heads/master | 2020-08-24T17:26:19.482906 | 2019-11-19T18:27:23 | 2019-11-19T18:27:23 | 216,871,283 | 0 | 0 | null | 2019-10-30T17:39:23 | 2019-10-22T17:27:22 | Python | UTF-8 | Python | false | false | 1,556 | py | import arcade, random
"""
Practice basic arcade drawing commands and some practice using the
arcade api (application programing interface)
This is a list of all the various arcade functions, methods, classes
and all the detail you need to program with arcade and Python
http://arcade.academy/index.html
http://arcade.academy/quick_index.html def on_mouse_press(self, x, y, button, modifiers):
vertical(10,random.randint(10),x,y)
http://arcade.academy/arcade.color.html
"""
######## setup stuff ##########################
screen_width = 1280
screen_height = 960
def drawA(xPos,yPos):
arcade.draw_line(0+xPos,0+yPos,15+xPos,30+yPos,arcade.color.BLUE,5)
arcade.draw_line(15+xPos,30+yPos,30+xPos,0+yPos,arcade.color.BLUE,5)
arcade.draw_line(5+xPos,15+yPos,25+xPos,15+yPos,arcade.color.BLUE,5)
def vertical(amount,times,xPos,yPos):
goDown = 0
for i in range(0,times):
drawA(xPos,yPos-goDown)
goDown += amount
#################################################
class Drawd(arcade.Window):
def setup(self):
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
arcade.start_render()
def on_mouse_press(self, x, y, button, modifiers):
print("x=", x, "y=", y, "button=", button)
vertical(10,random.randint(2,10),x,y)
game = Drawd(screen_width, screen_height, "Agd;lka")
game.setup()
arcade.run() # game loop
| [
"noreply@github.com"
] | noreply@github.com |
cef569ea71a6c217a0106324003024ccbd957271 | fd837c9d6577770a275a4de07dc5f025aa242c73 | /days09/demo03_问题.py | 1698ce5c6953e51ba7836fd70f8332f3ba278926 | [] | no_license | liuhu0514/py1901_0114WORK | 4228ea56a84237b7e5fee57d8884a53d107e5fd3 | c783e94af9ecd09e33efbc287a79c8633e15d1a8 | refs/heads/master | 2020-05-06T12:20:04.550244 | 2019-04-08T09:40:18 | 2019-04-08T09:40:18 | 180,117,323 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | '''
全局变量
局部变量
冲突问题【如果出现:只能说明~软件设计有问题~你有问题】
'''
# 全局变量
name = "tom"
# 函数中~局部变量
def test():
name = "jerry"
print("函数中访问name:", name)
test()
print("函数外访问name:", name)
'''
正常情况下,变量数据的访问
函数中如果出现了和全局变量重名的变量
1. 函数外部访问:
优先访问 全局变量
然后访问 局部变量【X】
2. 函数内部访问:
就近原则:谁离访问代码近~优先访问谁
''' | [
"1466947051@qq.com"
] | 1466947051@qq.com |
23b61174641553a6cf1a0222f5fc0fa3b551f0b8 | 445dfee46685e37519377d79bf62880eab224727 | /Programming Assg/1_Dy_Kim_assg1.py | 9f011bf03b53bd1a1d205a0dae648ad43573c65b | [] | no_license | kdy618/CS300 | 06781527d2c5223d8497731324eeaabba94d576a | a106e8ea72aa8bd5ed1dcc6d30209e1e6f2712b5 | refs/heads/master | 2020-04-05T06:25:24.049701 | 2018-11-09T20:41:37 | 2018-11-09T20:41:37 | 156,637,392 | 0 | 0 | null | 2020-12-08T03:27:29 | 2018-11-08T02:16:47 | Python | UTF-8 | Python | false | false | 345 | py | '''
Intent: Begin to provide options for the form of people to be addressed.
Postcondition: The following is on the console (i.e., preceded by a blank line):
Greetings from a beginning Python programmer.
Do you want to be addressed as ...
'''
x = '\nGreetings from a beginning Python programmer.\nDo you want to be address as ...'
print(x)
| [
"Kim@DESKTOP-G3P05GK.localdomain"
] | Kim@DESKTOP-G3P05GK.localdomain |
d4190b5bde288c39f6b1972246762766070ebb16 | e41a35bb6b57aef401363341d7fbcc4ed04f7a0d | /motorapp/migrations/0004_auto_20151211_1822.py | 7613d5c6326e50510ffd395a69760de14f273c78 | [] | no_license | CamilleMariniSonos/ML21 | 20a82f3bd78aff8752f014675074e61c12f82c85 | 08758f8b8d2707ee4277b30058b010c1e001db88 | refs/heads/master | 2022-12-15T18:21:13.844497 | 2016-03-22T16:29:52 | 2016-03-22T16:29:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motorapp', '0003_auto_20151122_1725'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='raw_data',
field=models.FileField(null=True, upload_to=b'', blank=True),
),
]
| [
"camille.marini@feetme.fr"
] | camille.marini@feetme.fr |
a775e4b0f818ac2bdd927c36c645d58aea22d114 | 389d95ee1f8d4ba992114e36c5fc427d02ba2a6c | /flexmessage_project/settings.py | ef5e0ea9c78442b5078ec03584e4b733d9fc65ac | [
"MIT"
] | permissive | adepeter/sleekmessage | d7a6b4279f6a60659cf8a98897136ca22c1b830a | 64621842cb9b0d707523e87f8bd6549d4e2d8433 | refs/heads/master | 2022-11-16T23:58:18.477628 | 2020-07-15T15:50:16 | 2020-07-15T15:50:16 | 265,276,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | """
Django settings for flexmessage_project project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SETTINGS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@k0lop(x*yo$2jm03k)@2c3$ch0@4l=)0)0ab+(10)5sn#llx@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'messages.apps.MessagesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'flexmessage_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'string_if_invalid': '%s is not a valid template variable',
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'flexmessage_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')] | [
"adepeter26@gmail.com"
] | adepeter26@gmail.com |
946900ca782f0415d723eaabba961b981d2c9cd4 | 0e7a4a6926dc83b0ae415a2a9430c3e5274c8602 | /officialdemo/Chapter01/callRadioButton2.pyw | 425d3e8de7aa91404398f4646ecf9ae408049e07 | [] | no_license | madmadcat/QtCookBook_Demo | 9b0b3fb36c0786b186e42ad83a78ccdb31e2f002 | 8287964874fd9cafe5024118a2611af226e936d3 | refs/heads/master | 2023-02-27T11:59:44.736598 | 2021-02-05T13:03:56 | 2021-02-05T13:03:56 | 325,999,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | pyw | import sys
from PyQt5.QtWidgets import QDialog, QApplication
from demoRadioButton2 import *
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.radioButtonMedium.toggled.connect(self.dispSelected)
self.ui.radioButtonLarge.toggled.connect(self.dispSelected)
self.ui.radioButtonXL.toggled.connect(self.dispSelected)
self.ui.radioButtonXXL.toggled.connect(self.dispSelected)
self.ui.radioButtonDebitCard.toggled.connect(self.dispSelected)
self.ui.radioButtonNetBanking.toggled.connect(self.dispSelected)
self.ui.radioButtonCashOnDelivery.toggled.connect(self.dispSelected)
self.show()
def dispSelected(self):
selected1=""
selected2=""
if self.ui.radioButtonMedium.isChecked()==True:
selected1="Medium"
if self.ui.radioButtonLarge.isChecked()==True:
selected1="Large"
if self.ui.radioButtonXL.isChecked()==True:
selected1="Extra Large"
if self.ui.radioButtonXXL.isChecked()==True:
selected1="Extra Extra Large"
if self.ui.radioButtonDebitCard.isChecked()==True:
selected2="Debit/Credit Card"
if self.ui.radioButtonNetBanking.isChecked()==True:
selected2="NetBanking"
if self.ui.radioButtonCashOnDelivery.isChecked()==True:
selected2="Cash On Delivery"
self.ui.labelSelected.setText("Chosen shirt size is "+selected1+" and payment method as " + selected2)
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
| [
"xdong@DONGXIN-MBP.local"
] | xdong@DONGXIN-MBP.local |
55840b4ca854f21374583d35ef6fc38dd63093cf | 5668e921a45a70269e6eef4901e66cc6108454f1 | /DiscordNitroGen.py | f193af256affdbf619edbe8d078c78bdd2d606e1 | [] | no_license | Baneee0011/Discord-Nitro-Gen-Python- | 8d5b54e1c08a664f773b20c90fb63de8e076a89c | ad6536f3d1a84cb0a0648ab6ea5377910490d799 | refs/heads/master | 2021-03-24T20:24:35.825208 | 2020-03-24T21:45:22 | 2020-03-24T21:45:22 | 247,562,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | import random
import os
import string
def random_char(y: int):
return ''.join(random.choice(string.ascii_letters) for x in range(y))
random_string = random_char(16)
link = "discord.gift/"
print(link + random_string)
os.system('pause >NUL')
| [
"noreply@github.com"
] | noreply@github.com |
81e5c1962464f46f6731194743921ded34f9046e | 028f022419360d6657745042d71d650eeec86510 | /first tki.py | 7153421503721749deb25b8079888ca1493f293c | [] | no_license | Divyanshu050303/gui | 21b6eca7724dc00fa7beb3c70c8dcef78827b65d | f76a5529111275abb0e46c6a8174166c46ba787a | refs/heads/main | 2023-06-02T05:12:29.421795 | 2021-06-19T15:41:28 | 2021-06-19T15:41:28 | 368,836,758 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from tkinter import *
root=Tk()
root.geometry("300x300")
root.title("My first gui")
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
4d6f2d80ffc442f7fe963c45fb36deb10b4fed3c | f967778ce740aa64e069aa9fdb6fe5c09cfaedad | /curso/serializers.py | 90942607f739773930c3058f0afbadc36a20904b | [] | no_license | Marcos-VNC/django_rest_api | 08c956c5f80c49e136f136de9c988b0427bf7593 | deb0aa62f415978dd12ae0e0ed90bd80f1a1cea4 | refs/heads/master | 2023-08-22T14:47:40.277194 | 2021-10-06T13:15:49 | 2021-10-06T13:15:49 | 414,229,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from rest_framework import serializers
from .models import Curso, Avaliacao
class AvaliacaoSerializer(serializers.ModelSerializer):
class Meta:
extra_kwargs = {
'email': {'write_only': True}
}
model = Avaliacao
fields = (
'id',
'curso',
'nome'
'email',
'comentarios',
'nota',
'create',
'active',
)
class CursoSerializer(serializers.ModelSerializer):
class Meta:
model = Curso
fields = (
'id',
'titulo',
'url',
'create',
'active',
)
| [
"marcosvn.cardoso@outlook.com"
] | marcosvn.cardoso@outlook.com |
51a48253563501adbd6d8ae5e02a65be6ca83012 | c3aa0741480c70f8b0345eaa4e90111979995ebe | /Weather/Actual-Weather-to-MQTT.py | c4af9f8a5737f683922cb8d8822ac849999ed9f3 | [] | no_license | CubicrootXYZ/Helpful-MQTT-Python-Scripts | 6397c5ae9bbe44b490f03c8143e32b7374224ce9 | e904b38fdd996d76789dd6dead9cea68eb154026 | refs/heads/master | 2020-06-11T23:08:34.811730 | 2019-08-07T17:00:15 | 2019-08-07T17:00:15 | 194,116,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | # coding: utf-8
# just run this script via a cronjob every X Minutes
# this script pushes the actual weather from your choosen city to MQTT Channels (weather provided from openweathermap.org, you need a free account)
import json, requests, logging, traceback, datetime, time
import paho.mqtt.client as mqtt
#Settings
mqttip = "<IP FROM THE MQTT SERVER>"
cityID = '<OPENWEATHERMAP CITY ID>' #this is the ID from the City we want the weather from, just check out their list: http://bulk.openweathermap.org/sample/
appID = '<OPENWEATHERMAP APP ID - CREATE AN FREE ACCOUNT AND SETUP AN APP AT OPENWEATHERMAP.ORG>'
mqttchannel = "information/actualweather/"
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client = mqtt.Client()
client.on_connect = on_connect
client.connect(mqttip, 1883, 60) #here you can change the Port of your MQTT Server, not needed for default settings
url = 'http://api.openweathermap.org/data/2.5/weather?id=' + cityID + '&appid=' + appID
#get data and parse it
resp = requests.get(url)
data = resp.json()
temp = data['main']['temp'] - 273.15
hum = data['main']['humidity']
pressure = data['main']['pressure']
rain = 'no rain'
snow = 'no snow'
thunderstorm = 'no thunderstorm'
clouds = data['clouds']['all']
windspeed = data['wind']['speed']
# I am converting the openweathermap weather-codes into small strings: (light/heavy) rain, etc. because I want to put them on a small display.
if data['weather'][0]['id'] >= 300 and data['weather'][0]['id'] < 400:
rain = 'l. rain'
elif data['weather'][0]['id'] == 500:
rain = 'l. rain'
elif data['weather'][0]['id'] >= 501 and data['weather'][0]['id'] <= 503:
rain = 'rain'
elif data['weather'][0]['id'] >= 504 and data['weather'][0]['id'] < 600:
rain = 'h. rain'
if data['weather'][0]['id'] >= 600 and data['weather'][0]['id'] < 700:
snow = 'snow'
if data['weather'][0]['id'] >= 200 and data['weather'][0]['id'] < 211:
thunderstorm = 'thunderstorm'
elif data['weather'][0]['id'] >= 211 and data['weather'][0]['id'] < 300:
thunderstorm = 'h. thunderstorm'
try:
weatherHourlyRain = data["rain"]["3h"]/3
except:
weatherHourlyRain = 0
try:
weatherHourlySnow = data["snow"]["3h"]/3
except:
weatherHourlySnow = 0
client.publish(mqttchannel+"Temperature1", payload=temp, retain=True, qos=1) # °C
time.sleep(1)
client.publish(mqttchannel+"Humidity1", payload=hum, retain=True, qos=1) # %
time.sleep(1)
client.publish(mqttchannel+"Pressure", payload=pressure, retain=True, qos=1) # hPa
time.sleep(1)
client.publish(mqttchannel+"Rain", payload=rain, retain=True, qos=1) #only "l. rain", "rain", "h. rain", "no rain"
time.sleep(1)
client.publish(mqttchannel+"Snow", payload=snow, retain=True, qos=1) #only "snow" or "no snow"
time.sleep(1)
client.publish(mqttchannel+"Thunderstorm", payload=thunderstorm, retain=True, qos=1) #only "no thunderstorm", "thunderstorm", "h. thunderstorm"
time.sleep(1)
client.publish(mqttchannel+"Clouds", payload=clouds, retain=True, qos=1) # %
time.sleep(1)
client.publish(mqttchannel+"Windspeed", payload=windspeed, retain=True, qos=1) # m/s
time.sleep(1)
client.publish(mqttchannel+"Rainamount", payload=weatherHourlyRain, retain=True, qos=1) # mm/h
time.sleep(1)
client.publish(mqttchannel+"Snowamount", payload=weatherHourlySnow, retain=True, qos=1) # mm/h
| [
"noreply@github.com"
] | noreply@github.com |
b14ca4e7607d03104004ff629ef74d9863f957c0 | f34fb848436ff38abd385928d49e5ba88392b71d | /mysite/polls/models.py | 369ce658e201a6460bb336f19387947b7263f54a | [] | no_license | adityag3/Polling_Website-Django | 333e20b1f46b2b3425dfd8948ff3e59bccf0210c | 352aa9ab474b5e3e238a6b746473194b57f037c5 | refs/heads/master | 2021-01-10T14:50:14.336173 | 2016-03-11T19:15:00 | 2016-03-11T19:15:00 | 53,206,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
# Create your models here.
#@python_2_unicode_compatible
class Question(models.Model):
question_text = models.CharField( max_length = 200 )
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta( days = 1 )
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
def __str__(self):
return self.question_text
#@python_2_unicode_compatible
class Choice(models.Model):
question = models.ForeignKey( Question, on_delete = models.CASCADE )
choice_text = models.CharField( max_length = 200 )
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"adityagovil3@gmail.com"
] | adityagovil3@gmail.com |
105fa8d4cdfed72290458e5a57041240c95372ae | be6baafa884d5422d611587ca31495cbb7660a14 | /www/models.py | 99e14534f251ec3b4bffb6160bf7d003f5c806d8 | [] | no_license | protec21/www | 90405827d2c1985baeb1814e553c56b055986ad0 | 1cab267d034b5d27c3acf7fef60aa961f281af8d | refs/heads/master | 2023-07-18T00:04:20.580993 | 2021-08-17T01:30:32 | 2021-08-17T01:30:32 | 336,940,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.db import models
class Notice(models.Model):
title = models.CharField(max_length=200)
content = models.TextField()
date = models.DateTimeField()
class NoticeAttach(models.Model):
notice = models.ForeignKey(Notice, on_delete=models.CASCADE, null=True)
file = models.FileField(null=True, blank=True, upload_to="")
| [
"leekh529@gmail.com"
] | leekh529@gmail.com |
c5a506c058d8d1a23c61352ae6ad017163387afd | d2e2f05f8b894e32f43dac8a45819da54888109f | /0x01-python-if_else_loops_functions/9-print_last_digit.py~ | 726544fbcc4abd272721ae61b076e57545212c2f | [] | no_license | wolf-coder/holbertonschool-higher_level_programming | 1b249a63c77156fcb25dda616497dd1abc272e75 | a81ac6c7b7e59210b19f413bd413f999ed599d2c | refs/heads/master | 2023-05-31T20:47:54.785063 | 2021-07-09T14:21:47 | 2021-07-09T14:21:47 | 259,362,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | #!/usr/bin/python3
def print_last_digit(number):
return (5)
| [
"cuore.fakhri@gmail.com"
] | cuore.fakhri@gmail.com | |
bb81dcf53023df1f57952ce33ee109ad1f6587dd | d4bd60e24d9d40335ce403c5ee53401d1aaa1b9f | /django_project/customers/models.py | 96849f946efbdad9825c3f0774773b2b13901a41 | [] | no_license | Odeke12/clothing | d46d756c9083f0e84cf0121124c0de92b0b84ec8 | e85424d4a36090eddec8bdde552fdf70f9931ddb | refs/heads/master | 2022-12-18T15:22:49.260957 | 2020-09-29T10:47:14 | 2020-09-29T10:47:14 | 299,573,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from django.db import models
from django.contrib.auth.models import User
class User_profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default = 'default.jpg', upload_to='shop_profile')
def __str__(self):
return f'{self.user.username} Customer-Profile'
| [
"trevodex@gmail.com"
] | trevodex@gmail.com |
ac5983589d1903d1d80a2ae9a87438dd66d95c9a | 7398b8196c769af2bb84f0c1e3e079c7c9bf0c22 | /Radar Eletronico.py | 3a139138c0e0ab1d58b207e31dadeb3fbde33c5e | [] | no_license | Inglaciela/PastaPython | e15576da7d1f9fe45e361b7c6ab054982801160a | cc31d9f20c128c2ac5361e30ec6eb64bbdfa02d5 | refs/heads/main | 2023-08-27T10:18:53.959821 | 2021-10-25T16:09:08 | 2021-10-25T16:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | velocidade = float(input('Qual é a velocidade atual do carro?'))
if velocidade > 80:
print('MULTADO! Voce execedeu o limite permitido que é 80km/h')
multa = (velocidade-80) * 7
print('Voce deve pagar uma multa de R${:.2f}!'.format(multa))
print('Tenha um bom dia! Dirija com segurança!')
#calculo conforme a velocidade "80" e multa por km "7" ex: | [
"noreply@github.com"
] | noreply@github.com |
a27b81a4aff77764f13ae4cbae0e51204cd02540 | ee9381b8452183901ce30cd878c18cb23991027a | /Bank App/YazilimBakimi/settings.py | 1f0cb0210337205cbd030ad3507c1cbdd880a688 | [] | no_license | oguzhandeveloper/Bank-App | cce0cb82d55b2caca616c57b3d7d564e39ea60ff | e32ca7a28085d730a0590c053979de8f9366b7ec | refs/heads/master | 2020-09-16T01:49:22.404782 | 2019-11-23T15:55:00 | 2019-11-23T15:55:00 | 223,613,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,468 | py | """
Django settings for YazilimBakimi project.
Generated by 'django-admin startproject' using Django 2.1.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xg8nuzomcxg&we*i4*bz4hp9q@#ze&vzbe(fsh!z&tswkithcv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'BankApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'YazilimBakimi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'YazilimBakimi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
"""DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}"""
DATABASES = {
'default': {
'ENGINE': 'sql_server.pyodbc',
'NAME': 'yazlimbakimibanka',
'USER': '',
'PASSWORD': '',
'HOST': 'DESKTOP-3A61024\SQLEXPRESS',
'PORT': '',
'OPTIONS': {
'driver': 'ODBC Driver 13 for SQL Server',
},
},
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Istanbul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"30536489+oguzhandeveloper@users.noreply.github.com"
] | 30536489+oguzhandeveloper@users.noreply.github.com |
c51a92f4e41a3f3a5813610cdf58b8ed40d4d0bb | 08270ff4b2782493fbfed3727c3181589d1cf7e4 | /GUI/login_form.py | ccab67634ab7c0986a6b0dbd3cc6a4743de389f9 | [] | no_license | Valemos/hurryup_calendar | 6a60d0886f51c816bf464f15e7d6f569629cc295 | cbea7dead29b7e593a8e1660f027c15cb32cdfbb | refs/heads/master | 2023-02-04T21:23:39.336169 | 2020-12-24T14:34:26 | 2020-12-24T14:34:26 | 294,373,201 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import tkinter as tk
from GUI.main_window import MainWindow as mainWindow
class FormLogin(tk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.init_ui()
def init_ui(self):
usernameLabel = tk.Label(self, text="Name").grid(row=0, column=0)
self.var_username = tk.StringVar()
usernameEntry = tk.Entry(self, textvariable=self.var_username).grid(row=0, column=1)
passwordLabel = tk.Label(self, text="Password").grid(row=3, column=0)
self.var_password = tk.StringVar()
passwordEntry = tk.Entry(self, textvariable=self.var_username, show='*').grid(row=3, column=1)
loginButton = tk.Button(self, text="Login", command=self.validateLogin).grid(row=4, column=0)
def start_main_window(self):
if mainWindow.counter == 0:
newWindow = tk.Toplevel(self)
window = mainWindow(newWindow)
def validateLogin(self):
print("username entered :", self.var_username.get())
print("password entered :", self.var_username.get())
self.start_main_window()
| [
"bsoloxa@gmail.com"
] | bsoloxa@gmail.com |
19edd54c98166d538eaa97782b65ec8c08b4783a | 999d05ac46cca3751040854f90a517ecdf0fec27 | /wiki_login_headless_chrome.py | 454397004e091fafea387f7511b57ba9b4e74259 | [] | no_license | adamreiser/selenium | 051c7e21f08bfa8f8aa2b8c60976debb46307f1a | 8069c36bd6840e20b3c540edb1425b6365f996fb | refs/heads/master | 2021-08-23T10:12:19.618798 | 2017-11-12T05:35:07 | 2017-11-12T05:35:07 | 107,916,413 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | #!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import atexit
import tabcomplete
import credentials
options = webdriver.chrome.options.Options()
options.add_argument("--headless")
wd = webdriver.Chrome(chrome_options=options)
(u, p) = credentials.load("/root/credentials/mediawiki.txt")
atexit.register(wd.quit)
wd.implicitly_wait(30)
wd.get("http://localhost:8000")
wd.maximize_window()
wd.find_element_by_link_text("Log in").click()
wd.find_element_by_id("wpName1").send_keys(u)
wd.find_element_by_id('wpPassword1').send_keys(p)
wd.find_element_by_id('wpLoginAttempt').click()
| [
"reiser@defensivecomputing.io"
] | reiser@defensivecomputing.io |
d470117b87c20044939b34206f9e9d67c89cc690 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=76/params.py | 29fdaca73a9d96a41ddea9479708049d1a27dfc2 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.010214',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 76,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
3ed03f450ecd93b825fa1583fb79154b40c83ff4 | 70d4ef0863906b3ca64f986075cd35b8412b871e | /packages/blueking/component/apis/sops.py | 9446f23f6bd7a4629b842b45ea8ea69b7a4e32f0 | [
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | selinagyan/bk-sops | 72db0ac33d9c307f51769e4baa181ceb8e1b279e | 39e63e66416f688e6a3641ea8e975d414ece6b04 | refs/heads/master | 2020-05-07T16:44:33.312442 | 2019-04-11T02:09:25 | 2019-04-11T02:09:25 | 180,696,241 | 0 | 0 | null | 2019-04-11T02:07:11 | 2019-04-11T02:07:10 | null | UTF-8 | Python | false | false | 2,426 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from ..base import ComponentAPI
class CollectionsSOPS(object):
"""Collections of SOPS APIS"""
def __init__(self, client):
self.client = client
self.create_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/create_task/',
description=u'创建任务'
)
self.get_task_status = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_task_status/',
description=u'查询任务或节点状态'
)
self.get_template_info = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_info/',
description=u'查询单个模板详情'
)
self.get_template_list = ComponentAPI(
client=self.client, method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_list/',
description=u'查询模板列表'
)
self.operate_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/operate_task/',
description=u'操作任务'
)
self.query_task_count = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/query_task_count/',
description=u'查询任务分类统计'
)
self.start_task = ComponentAPI(
client=self.client, method='POST',
path='/api/c/compapi{bk_api_ver}/sops/start_task/',
description=u'开始任务'
)
| [
"pagezhou@tencent.com"
] | pagezhou@tencent.com |
a6768d8a47950a39edd1f9814281fb0aa0803376 | 016281cc4878eef95130d646462e284b18d72a8e | /detection/models/detectors/faster_rcnn.py | a12651c61d53bcde392dc607b671daa5241b557d | [
"MIT"
] | permissive | ZhouLiyan111111/tf-eager-fasterrcnn | dbe6f46ff0f1c3391e1e43596b6d33efb3b25272 | 52817572ff29c0c207f4e12e6c020f6876cd9212 | refs/heads/master | 2020-04-06T22:21:21.645383 | 2018-11-15T09:20:07 | 2018-11-15T09:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,545 | py | import tensorflow as tf
from detection.models.backbones import resnet
from detection.models.necks import fpn
from detection.models.rpn_heads import rpn_head
from detection.models.bbox_heads import bbox_head
from detection.models.roi_extractors import roi_align
from detection.core.anchor import anchor_generator, anchor_target
from detection.core.loss import losses
from detection.core.bbox import bbox_target, transforms
class FasterRCNN(tf.keras.Model):
def __init__(self, num_classes, **kwags):
super(FasterRCNN, self).__init__(**kwags)
self.NUM_CLASSES = num_classes
# Anchor attributes
self.ANCHOR_SCALES = (32, 64, 128, 256, 512)
self.ANCHOR_RATIOS = (0.5, 1, 2)
# The strides of each layer of the FPN Pyramid.
self.FEATURE_STRIDES = (4, 8, 16, 32, 64)
# Bounding box refinement mean and standard deviation
self.RPN_TARGET_MEANS = (0., 0., 0., 0.)
self.RPN_TARGET_STDS = (0.1, 0.1, 0.2, 0.2)
self.PRN_PROPOSAL_COUNT = 2000
self.PRN_NMS_THRESHOLD = 0.7
self.ROI_BATCH_SIZE = 512
# Bounding box refinement mean and standard deviation
self.RCNN_TARGET_MEANS = (0., 0., 0., 0.)
self.RCNN_TARGET_STDS = (0.1, 0.1, 0.2, 0.2)
self.POOL_SIZE = (7, 7)
self.backbone = resnet.ResNet(depth=101, name='res_net')
self.neck = fpn.FPN(name='fpn')
self.rpn_head = rpn_head.RPNHead(anchors_per_location=len(self.ANCHOR_RATIOS),
proposal_count=self.PRN_PROPOSAL_COUNT,
nms_threshold=self.PRN_NMS_THRESHOLD,
target_means=self.RPN_TARGET_MEANS,
target_stds=self.RPN_TARGET_STDS,
name='rpn_head')
self.roi_align = roi_align.PyramidROIAlign(pool_shape=self.POOL_SIZE,
name='pyramid_roi_align')
self.bbox_head = bbox_head.BBoxHead(num_classes=self.NUM_CLASSES,
pool_size=self.POOL_SIZE,
name='b_box_head')
self.generator = anchor_generator.AnchorGenerator(
scales=self.ANCHOR_SCALES,
ratios=self.ANCHOR_RATIOS,
feature_strides=self.FEATURE_STRIDES)
self.anchor_target = anchor_target.AnchorTarget(
target_means=self.RPN_TARGET_MEANS,
target_stds=self.RPN_TARGET_STDS)
self.bbox_target = bbox_target.ProposalTarget(
target_means=self.RCNN_TARGET_MEANS,
target_stds=self.RPN_TARGET_STDS,
num_rcnn_deltas=self.ROI_BATCH_SIZE)
self.rpn_class_loss = losses.rpn_class_loss
self.rpn_bbox_loss = losses.rpn_bbox_loss
self.rcnn_class_loss = losses.rcnn_class_loss
self.rcnn_bbox_loss = losses.rcnn_bbox_loss
def __call__(self, inputs, training=True):
if training: # training
imgs, img_metas, gt_boxes, gt_class_ids = inputs
else: # inference
imgs, img_metas = inputs
C2, C3, C4, C5 = self.backbone(imgs,
training=training)
P2, P3, P4, P5, P6 = self.neck([C2, C3, C4, C5],
training=training)
rpn_feature_maps = [P2, P3, P4, P5, P6]
rcnn_feature_maps = [P2, P3, P4, P5]
layer_outputs = []
for p in rpn_feature_maps:
layer_outputs.append(self.rpn_head(p, training=training))
outputs = list(zip(*layer_outputs))
outputs = [tf.concat(list(o), axis=1) for o in outputs]
rpn_class_logits, rpn_probs, rpn_deltas = outputs
anchors, valid_flags = self.generator.generate_pyramid_anchors(img_metas)
proposals_list = self.rpn_head.get_proposals(
rpn_probs, rpn_deltas, anchors, valid_flags, img_metas)
if training:
rois_list, rcnn_target_matchs_list, rcnn_target_deltas_list = \
self.bbox_target.build_targets(
proposals_list, gt_boxes, gt_class_ids, img_metas)
else:
rois_list = proposals_list
pooled_regions_list = self.roi_align(
(rois_list, rcnn_feature_maps, img_metas), training=training)
rcnn_class_logits_list, rcnn_probs_list, rcnn_deltas_list = \
self.bbox_head(pooled_regions_list, training=training)
if training:
rpn_target_matchs, rpn_target_deltas = self.anchor_target.build_targets(
anchors, valid_flags, gt_boxes, gt_class_ids)
rpn_class_loss = self.rpn_class_loss(
rpn_target_matchs, rpn_class_logits)
rpn_bbox_loss = self.rpn_bbox_loss(
rpn_target_deltas, rpn_target_matchs, rpn_deltas)
rcnn_class_loss = self.rcnn_class_loss(
rcnn_target_matchs_list, rcnn_class_logits_list)
rcnn_bbox_loss = self.rcnn_bbox_loss(
rcnn_target_deltas_list, rcnn_target_matchs_list, rcnn_deltas_list)
return [rpn_class_loss, rpn_bbox_loss,
rcnn_class_loss, rcnn_bbox_loss]
else:
detections_list = self.bbox_head.get_bboxes(
rcnn_probs_list, rcnn_deltas_list, rois_list, img_metas)
return self.unmold_detections(detections_list, img_metas)
def unmold_detections(self, detections_list, img_metas):
return [
self._unmold_single_detection(detections_list[i], img_metas[i])
for i in range(img_metas.shape[0])
]
def _unmold_single_detection(self, detections, img_meta):
zero_ix = tf.where(tf.not_equal(detections[:, 4], 0))
detections = tf.gather_nd(detections, zero_ix)
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:, :4]
class_ids = tf.cast(detections[:, 4], tf.int32)
scores = detections[:, 5]
boxes = transforms.bbox_mapping_back(boxes, img_meta)
return {'rois': boxes.numpy(),
'class_ids': class_ids.numpy(),
'scores': scores.numpy()}
| [
"yiran_ding@outlook.com"
] | yiran_ding@outlook.com |
935174241eb1b2f79cf360d50e02ae02d92e4bad | cd3beb489210453beb41e681ca3c43a20e5bdf97 | /Python/Basic/get_count.py | f115a14a53c2dd495a4630d070d336b677d139ec | [] | no_license | pmlave/Codewars | ab09da97e8636d51528372b44db6d9717caec544 | 04f6c888b6297f04167d6429390956c64ac1de97 | refs/heads/master | 2020-09-16T15:42:50.624968 | 2019-12-02T04:05:23 | 2019-12-02T04:05:23 | 223,817,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | # Simple function takes an input string and returns a count of how many vowels are in the string.
def getCount(inputStr):
vowel_str = "aeiou"
return len([each for each in inputStr if each in vowel_str]) | [
"paullave@Pauls-MacBook-Pro.local"
] | paullave@Pauls-MacBook-Pro.local |
17c830370f9e553bbc15ff41060cc6c1ed6892e5 | fd02d742d3bb647e00e56709b18e723fdd75fe77 | /champions/boites/mathieu.py | a79855877d2dc6b437b20420cf7f63c3f886e372 | [] | no_license | yous29/stage-python | 7b778245ffd9ebd557b113dee7fd1083fc220988 | 2aeee762f82f9b12df41dcf0d1c15b3a9a484c80 | refs/heads/master | 2022-04-06T13:24:55.726799 | 2018-06-28T20:13:39 | 2018-06-28T20:13:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | def ia(boites):
if len(boites) == 1:
return "D"
if boites[0]-max(boites[1],boites[-1])>boites[-1]-max(boites[0],boites[-2]) :
return "G"
else :
return "D"
| [
"vie@jill-jenn.net"
] | vie@jill-jenn.net |
889a29dd98a7786a22e8d2fbde68e5a1ce2d4137 | a6ed0c42659f54f88024a9171c353e7cbe51328e | /Python/flask_MySQL/emailval/server.py | 1d9d6e7aa490bb6f47f766d7b83b106c0677f317 | [] | no_license | tomama1/Practice | c4a44a044fe67b3f4eb34dca0a0dd9ea38f4c766 | 8adecd0ee985db06497578a11d067ac16502da7b | refs/heads/master | 2021-09-05T04:32:42.020673 | 2018-01-24T05:51:16 | 2018-01-24T05:51:16 | 104,159,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | from flask import Flask, request, redirect, render_template, flash
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
app.secret_key = ("CodingDojo")
mysql = MySQLConnector(app,'listserv')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def create():
# grabbing user input from form
email = request.form['emailcheck']
# query for checking the database
query = "SELECT email from listserv WHERE email = :echeck"
data = {
'echeck':email
}
emailcheck = mysql.query_db(query, data)
# if email exists in database
if emailcheck:
flash("Valid Email")
return redirect('/success')
else:
# regex check for valid email string
if re.match(r"[^@]+@[^@]+\.[^@]+",email):
# insert query into database
query = "INSERT INTO listserv (email, created_at, updated_at) VALUES (:emailtobeinserted, NOW(), NOW())"
# mysql.query_db("INSERT INTO listserv(email, created_at, updated_at) VALUES (:emailtobeinserted, NOW(), NOW())",{'emailtobeinserted':email})"
data = {
'emailtobeinserted': request.form['emailcheck']
}
mysql.query_db(query, data)
flash("Email has been Inserted!")
else:
# not a valid email string ( no @ sign)
flash("Not a valid email")
return redirect('/')
@app.route('/success')
def success():
# display all rows in the listserv table
emails = mysql.query_db("SELECT * FROM listserv")
return render_template('success.html', all_emails = emails)
@app.route('/goback')
def goback():
return redirect('/')
app.run(debug=True) | [
"matthewtoma123@gmail.com"
] | matthewtoma123@gmail.com |
308f9a29f07580702789f145882e743863ede567 | cfd096753a1c1074b645b75bf759ee5778c2d133 | /source/conf.py | 8c320df1d2d61ba1d70328c0579e8904e534e17d | [] | no_license | jmiranda1997/IA_Reconocimiento_Tomate | c5be2f4246a48d430560dfb883a7ef6a5211814b | 746a095ef62e590dbe318ee471eca6122436a04f | refs/heads/master | 2020-03-15T15:30:37.294092 | 2018-05-06T03:44:34 | 2018-05-06T03:44:34 | 132,213,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,106 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Inteligencia Artificial'
copyright = '2018, Juan Pablo Monroy, Jonathan Miranda'
author = 'Juan Pablo Monroy, Jonathan Miranda'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1B'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'es'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'InteligenciaArtificialdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'InteligenciaArtificial.tex', 'Inteligencia Artificial Documentation',
'Juan Pablo Monroy, Jonathan Miranda', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'inteligenciaartificial', 'Inteligencia Artificial Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'InteligenciaArtificial', 'Inteligencia Artificial Documentation',
author, 'InteligenciaArtificial', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | [
"j.miranda1997@gmail.com"
] | j.miranda1997@gmail.com |
5976844a6cca57ff3a82d77f0e94999f90db0980 | d51b2e9657d0ea1d8bcb8ed8da11a3f9dac72192 | /test/core/parser/grammar/__init__.py | 54ee5571517bcfd27feaef685e7e665a69802d81 | [
"MIT"
] | permissive | sqlfluff/sqlfluff | dae8294814471165582e12ea75ab8142e75f8e62 | a66da908907ee1eaf09d88a731025da29e7fca07 | refs/heads/main | 2023-08-28T20:07:59.624519 | 2023-08-27T22:17:24 | 2023-08-27T22:17:24 | 155,790,228 | 5,931 | 545 | MIT | 2023-09-14T18:05:19 | 2018-11-01T23:56:04 | Python | UTF-8 | Python | false | false | 57 | py | """Tests for the sqlfluff.core.parser.grammar module."""
| [
"noreply@github.com"
] | noreply@github.com |
f1ef84d165e5410c8ce81b3709c6af10a3bbda49 | 3c430bd14a54becf0a2e31758f30adb6857ddc9d | /venv/Scripts/pip3-script.py | e60f31232e248c211d271de03324d70e68e0adcc | [] | no_license | revanth12/LSTM-TENSORFLOW | ef5638fd8d653c1af1bb926c489d5c56cc82fbca | f8fe3ef8f600ac965731aba5edda42bd5f5e4e3b | refs/heads/master | 2020-03-28T21:28:59.344772 | 2018-09-17T17:22:01 | 2018-09-17T17:22:01 | 149,159,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #!C:\Users\Rreddy\PycharmProjects\LSTM-TENSORFLOW\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"revanthreddy.katanguri@oubt.org"
] | revanthreddy.katanguri@oubt.org |
f53f414f7ee5fbc8b13847a32418970ec312c561 | 4af454bced0f99e4ed8269d71e97284f0ef13afb | /loginserver/keys/rsa.py | 02c4c9811d4e423b0a531fa48b9e687d9ba12cbd | [] | no_license | L2jBrasil/L2py | c46db78238b4caf272a2399f4e4910fc256b3cca | d1c2e7bddb54d222f9a3d04262c09ad70329a226 | refs/heads/master | 2022-11-19T01:39:02.019777 | 2020-07-24T20:07:15 | 2020-07-24T20:07:15 | 292,115,581 | 1 | 1 | null | 2020-09-01T21:53:54 | 2020-09-01T21:53:54 | null | UTF-8 | Python | false | false | 2,315 | py | from Cryptodome.PublicKey import RSA
from M2Crypto import BIO
from M2Crypto import RSA as M2RSA
from common.helpers.bytearray import ByteArray
class L2RsaKey(RSA.RsaKey):
def scramble_mod(self) -> bytes:
n = ByteArray(self.n_bytes)
# step 1: 0x4d - 0x50 <-> 0x00 - 0x04
for i in range(4):
n[i], n[0x4d + i] = n[0x4d + i], n[i]
# step 2 : xor first 0x40 bytes with last 0x40 bytes
for i in range(0x40):
n[i] = n[i] ^ n[0x40 + i]
# step 3 : xor bytes 0x0d-0x10 with bytes 0x34-0x38
for i in range(4):
n[0x0d + i] = n[0x0d + i] ^ n[0x34 + i]
# step 4 : xor last 0x40 bytes with first 0x40 bytes
for i in range(0x40):
n[0x40 + i] = n[0x40 + i] ^ n[i]
return bytes(n)
@classmethod
def unscramble_mod(cls, n: bytes) -> int:
n = ByteArray(n)
for i in range(0x40):
n[0x40 + i] = n[0x40 + i] ^ n[i]
for i in range(4):
n[0x0d + i] = n[0x0d + i] ^ n[0x34 + i]
for i in range(0x40):
n[i] = n[i] ^ n[0x40 + i]
for i in range(4):
temp = n[0x00 + i]
n[0x00 + i] = n[0x4d + i]
n[0x4d + i] = temp
return int.from_bytes(bytes(n), "big")
@property
def n_bytes(self):
return self.n.to_bytes(128, "big")
@classmethod
def from_scrambled(cls, data) -> "L2RsaKey":
modulus = cls.unscramble_mod(data)
key = RSA.construct((modulus, 65537))
key.__class__ = L2RsaKey
return key
@classmethod
def generate(cls, bits=1024, randfunc=None, e=65537) -> "L2RsaKey":
key = RSA.generate(bits, randfunc, e)
key.__class__ = cls
return key
def __repr__(self):
return "L2" + super().__repr__()
@property
def m2crypto_key(self):
key_bio = BIO.MemoryBuffer(self.export_key())
if self.has_private():
return M2RSA.load_key_bio(key_bio)
else:
return M2RSA.load_pub_key_bio(key_bio)
@property
def scrambled_key(self):
scrambled_key = RSA.construct((int.from_bytes(self.scramble_mod(), "big"), self.e))
key_bio = BIO.MemoryBuffer(scrambled_key.export_key())
return M2RSA.load_key_bio(key_bio)
| [
"yurzs@icloud.com"
] | yurzs@icloud.com |
902a56826d2fec12ea7260dca767c2f42f2eeff1 | aac3dfcd0b7abbfb63d093d49617850571fdbed0 | /code/find_station_info.py | 295bb41d081b2c40c1b4a4ad39d1f4b5028b50ff | [] | no_license | ChenYu0723/SCD_System_2.0 | 62c9f51cfac75293268324a9a0264ca4d282b870 | ad5fad2ad961b590105a18b7d69cf846df4c1de2 | refs/heads/master | 2022-07-17T13:39:51.040931 | 2020-05-21T04:39:23 | 2020-05-21T04:39:23 | 265,559,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
# @Time : 2019/12/14 15:49
# @Author : Chen Yu
import os
import pickle
import pandas as pd
os.chdir('..')
infile2 = 'data/raw_data/transferStations.pkl'
transferStation_info = pickle.load(open(infile2, 'rb'))
infile3 = 'data/raw_data/metroStations.csv'
station_id_df = pd.read_csv(infile3)
# print(transferStation_info[0][2040])
# ==== 输出整合后站点信息
transferStation_info[0].keys()
df = pd.DataFrame(transferStation_info[0]).T
df.columns = ['包含站点', '站点名称']
df.to_csv('result/transferStation_info.csv', encoding='utf-8') | [
"yuchen.723@bytedance.com"
] | yuchen.723@bytedance.com |
dcd2ab18421a62f877b727aca74ec29effb78b6d | 8663f48499aa4b9d6676df4bd29a7de8e87a803a | /chef-repo/cookbooks/Cabmate/files/default/cwebs.1.0.7/app/vehicle.py | 79f9ee04bb4792da7b19d2be6cfbdc804498032c | [] | no_license | ScottNeedham/chef | 34ed6accd24ec7efbcfd83ef5dd502d8edc2db9c | 0d8b293bb5ce58779f0ef67049dabe5b3137e16d | refs/heads/master | 2020-09-23T06:53:07.659832 | 2016-11-08T19:35:14 | 2016-11-08T19:35:14 | 67,830,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,557 | py | import time
import struct
import cabmsg
import os
import msgconf
import sysv_ipc
class VehicleParams(object):
def __init__(self, vehicle_dic=None):
self.taxi_no = -1
try:
#return self.init_object()
if vehicle_dic is not None and isinstance(vehicle_dic, dict):
try:
self.taxi_no = int(vehicle_dic["vehicle_number"])
except Exception as e:
pass
self.sys_ftypes = 0
self.usr_ftypes = 0
self.dtypes = 0
self.vtypes = 0
try:
self.fleet = int(vehicle_dic["fleet"])
except Exception as e:
self.fleet = 0
self.vehicle_type = 0
try:
self.baseunit = int(vehicle_dic["baseunit"])
except Exception as e:
self.baseunit = 0
self.from_zone = 32*[0]
if vehicle_dic.has_key("from_zone"):
for k in range(len(vehicle_dic["from_zone"])):
self.from_zone[k] = vehicle_dic["from_zone"][k]
#print 'from_zone ' , self.from_zone
# default
self.to_zone = 32*[0]
if vehicle_dic.has_key("to_zone"):
for k in range(len(vehicle_dic["to_zone"])):
self.to_zone[k] = vehicle_dic["to_zone"][k]
#print 'to_zone ' , self.to_zone
try:
self.training = int(vehicle_dic["training"])
except Exception as e:
self.training = 0
try:
self.max_passengers = vehicle_dic["maximum_passengers"]
except Exception as e:
self.max_passengers = 0
self.sspare = 0
# term type?
try:
self.fleet_name = vehicle_dic["fleet_name"].encode('utf8') if isinstance(vehicle_dic["fleet_name"], unicode) else vehicle_dic["fleet_name"]
except Exception as e:
self.fleet_name = '\0'
self.alias = '\0'
try:
self.alias = vehicle_dic["alias"].encode('utf8') if isinstance(vehicle_dic["alias"], unicode) else vehicle_dic["alias"]
except Exception as e:
self.alias = '\0'
try:
self.voice = vehicle_dic["voice"]
except Exception as e:
self.voice = 'N'
try:
self.sale = vehicle_dic["sale"]
except Exception as e:
self.sale = 'N'
# alternate_fleet
self.alternate_fleet = 10*[0]
if vehicle_dic.has_key("alternate_fleet"):
for k in range(len(vehicle_dic["alternate_fleet"])):
try:
if k < 10:
self.alternate_fleet[k] = int(vehicle_dic["alternate_fleet"][k])
except Exception as e:
continue
self.drivers = 10*[0]
if vehicle_dic.has_key("drivers"):
for k in range(len(vehicle_dic["drivers"])):
try:
if k < 10:
self.drivers[k] = long(vehicle_dic["drivers"][k])
except Exception as e:
continue
try:
self.cur_driver = int(vehicle_dic["cur_driver"])
except Exception as e:
self.cur_driver = 0
self.veh_class_32 = 0
if vehicle_dic.has_key("veh_class_32"):
for i in vehicle_dic["veh_class_32"]:
try:
print i, int(i), self.veh_class_32
itype = int(i)
if itype in range(32):
self.veh_class_32 = self.veh_class_32 + (1 << itype)
except Exception as e:
pass
self.disallowedchannels = ""
temp = 16*['N']
if vehicle_dic.has_key('disallowedchannels'):
for i in vehicle_dic["disallowedchannels"]:
try:
ic = int(i)
if ic in range(16):
temp[ic] = 'Y'
except Exception as e:
pass
for c in temp:
self.disallowedchannels = self.disallowedchannels + c
self.baseunit = 0
cnt = 0
for c in temp:
if c == 'N':
break
cnt = cnt + 1
self.baseunit = cnt
print 'base unit ', self.baseunit
self.VDM_MDTControlEnabled = 'N'
try:
self.VDM_MDTControlEnabled = vehicle_dic['VDM_MDTControlEnabled']
except Exception as e:
pass
self.RearSeatVivotechEnabled = 'N'
try:
self.RearSeatVivotechEnabled = vehicle_dic['RearSeatVivotechEnabled']
except Exception as e:
pass
self.VDM_MDTControl = 0
try:
self.VDM_MDTControl = vehicle_dic["VDM_MDTControl"]
except Exception as e:
pass
#'license_expiry_date': 1577854
#self.expiryTime, # expiryTime
self.expiryTime = 0
try:
self.expiryTime = vehicle_dic["license_expiry_date"]
except Exception as e:
pass
self.spare = 28*' '
self.new_alias = '\0'
try:
self.new_alias = vehicle_dic["long_alias"].encode('utf8') if isinstance(vehicle_dic["long_alias"], unicode) else vehicle_dic["long_alias"]
except Exception as e:
self.new_alias = '\0'
#self.new_alias 'long_alias':
#'mobile_phone': '22222222\x00', 'cur_driver': 0, 'phone': '11111111\x00
self.phone = '\0'
try:
self.phone = vehicle_dic["phone"]
except Exception as e:
self.phone = '\0'
self.mobile_phone = '\0'
try:
self.mobile_phone = vehicle_dic["mobile_phone"]
except Exception as e:
self.mobile_phone = '\0'
else:
self.init_object()
return
except Exception as e:
print ' Exception in init object ', str(e)
def init_object(self):
self.new_alias = '\0'
self.mobile_phone = '\0'
self.taxi_no = -1
self.sys_ftypes = 0
self.usr_ftypes = 0 # fdv_types: usr_ftypes user defined fare types
self.dtypes = 0 # fdv_types: dtypes_32 user defined driver types
self.vtypes = 0 # fdv_types: vtypes user defined vehicle types
self.taxi_no = 0 # taxi_number
self.fleet = 0 # fleet
self.vehicle_type = 0 # vehicle_type
self.baseunit = 0
self.from_zone = 32*[0] # baseunit
self.to_zone = 32*[0]
self.training = 0 # training: is this taxi working the training system?
self.max_passengers = 0 # max_passengers
self.sspare = 0 # sspare
self.comp_num = 0 # comp_num
self.termtype = 0 # termtype: MDT3602 = '0', MDT4021 = '1'
self.termrev = 4*[0] # termrev[4]: not used in 4.3
self.fleet_name = 24*[0] # fleet_name[24]
self.alias = 4*[0] # alias[4]
self.voice = 0 # voice
self.sale = 0 # sale
self.cspare = 2*[0] # cspare[2]
self.alternate_fleet = 32 *[0]
self.drivers = 32*[0]
self.cur_drive = 0 # cur_driver
self.veh_class_32 = 0 # self.veh_class_32, # veh_class_32
self.disallowedchannels = 16*[0] # disallowedchannels[16]
self.VDM_MDTControlEnabled = 0 # self.VDM_MDTControlEnabled
self.RearSeatVivotechEnabled = 0 # RearSeatVivotechEnabled
self.cspare = 2*[0] # CSpare[2]
self.VDM_MDTControl = 0 # VDM_MDTControl
self.expiryTime = 0
self.phone = '\0'
def vehicle_info_to_tuple(self):
if len(self.alias) > 4:
self.alias = self.alias[:4]
values1 = (
self.sys_ftypes, #0x00000080,# fdv_types: sys_ftypes system defines fare types
self.usr_ftypes, #0, # fdv_types: usr_ftypes user defined fare types
self.dtypes, # fdv_types: dtypes_32 user defined driver types
self.vtypes, # fdv_types: vtypes user defined vehicle types
self.taxi_no, # taxi_number
self.fleet, # fleet
self.vehicle_type, # vehicle_type
self.baseunit, # baseunit
self.from_zone[0], # from_zone0
self.from_zone[1], # from_zone1
self.from_zone[2], # from_zone2
self.from_zone[3], # from_zone3
self.from_zone[4], # from_zone4
self.from_zone[5], # from_zone5
self.from_zone[6], # from_zone6
self.from_zone[7], # from_zone7
self.from_zone[8], # from_zone8
self.from_zone[9], # from_zone9
self.from_zone[10], # from_zone10
self.from_zone[11], # from_zone11
self.from_zone[12], # from_zone12
self.from_zone[13], # from_zone13
self.from_zone[14], # from_zone14
self.from_zone[15], # from_zone15
self.from_zone[16], # from_zone16
self.from_zone[17], # from_zone17
self.from_zone[18], # from_zone18
self.from_zone[19], # from_zone19
self.from_zone[20], # from_zone20
self.from_zone[21], # from_zone21
self.from_zone[22], # from_zone22
self.from_zone[23], # from_zone23
self.from_zone[24], # from_zone24
self.from_zone[25], # from_zone25
self.from_zone[26], # from_zone26
self.from_zone[27], # from_zone27
self.from_zone[28], # from_zone28
self.from_zone[29], # from_zone29
self.from_zone[30], # from_zone30
self.from_zone[31], # from_zone31
self.to_zone[0], # to_zone0
self.to_zone[1], # to_zone1
self.to_zone[2], # to_zone2
self.to_zone[3], # to_zone3
self.to_zone[4], # to_zone4
self.to_zone[5], # to_zone5
self.to_zone[6], # to_zone6
self.to_zone[7], # to_zone7
self.to_zone[8], # to_zone8
self.to_zone[9], # to_zone9
self.to_zone[10], # to_zone10
self.to_zone[11], # to_zone11
self.to_zone[12], # to_zone12
self.to_zone[13], # to_zone13
self.to_zone[14], # to_zone14
self.to_zone[15], # to_zone15
self.to_zone[16], # to_zone16
self.to_zone[17], # to_zone17
self.to_zone[18], # to_zone18
self.to_zone[19], # to_zone19
self.to_zone[20], # to_zone20
self.to_zone[21], # to_zone21
self.to_zone[22], # to_zone22
self.to_zone[23], # to_zone23
self.to_zone[24], # to_zone24
self.to_zone[25], # to_zone25
self.to_zone[26], # to_zone26
self.to_zone[27], # to_zone27
self.to_zone[28], # to_zone28
self.to_zone[29], # to_zone29
self.to_zone[30], # to_zone30
self.to_zone[31], # to_zone31
self.training, # training: is this taxi working the training system?
self.max_passengers, # max_passengers
self.sspare, # sspare
'0', # comp_num
'1', # termtype: MDT3602 = '0', MDT4021 = '1'
'0', # termrev[4]: not used in 4.3
self.fleet_name, # fleet_name[24]
self.alias, # alias[4]
'\0', # voice
self.sale, # sale
'SS', # cspare[2]
self.alternate_fleet[0], # alternate_fleet0
self.alternate_fleet[1], # alternate_fleet1
self.alternate_fleet[2], # alternate_fleet2
self.alternate_fleet[3], # alternate_fleet3
self.alternate_fleet[4], # alternate_fleet4
self.alternate_fleet[5], # alternate_fleet5
self.alternate_fleet[6], # alternate_fleet6
self.alternate_fleet[7], # alternate_fleet7
self.alternate_fleet[8], # alternate_fleet8
self.alternate_fleet[9], # alternate_fleet9
self.drivers[0], # drivers0
self.drivers[1], # drivers1
self.drivers[2], # drivers2
self.drivers[3], # drivers3
self.drivers[4], # drivers4
self.drivers[5], # drivers5
self.drivers[6], # drivers6
self.drivers[7], # drivers7
self.drivers[8], # drivers8
self.drivers[9], # drivers9
0, # cur_driver
self.veh_class_32, # self.veh_class_32, # veh_class_32
self.disallowedchannels, # disallowedchannels[16]
self.VDM_MDTControlEnabled, # self.VDM_MDTControlEnabled
self.RearSeatVivotechEnabled, # RearSeatVivotechEnabled
'SS', # CSpare[2]
0, # VDM_MDTControl
self.expiryTime, # expiryTime
"0123456789012345678901234567")
values2 = (
self.taxi_no,
self.new_alias,
self.phone,
self.mobile_phone
)
print 'values1 ', values1
print 'values2 ', values2
return (values1, values2)
def vehicle_add_cmt(self, dData, dData0):
ss = struct.Struct('5I 4c 320s')
s = struct.Struct('4I 71h 2c 4s 24s 4s 2c 2s 10h 12I 16s 2c 2s 2I 28s') # taxi info data
packed_data = s.pack( *dData)
data_size = s.size
cabmsg.gmsg_send(packed_data, data_size, msgconf.TFC, 0, msgconf.MT_ADD_TAXI, ss)
ss0 = struct.Struct('5I 4c 51s') # packet 1
s0 = struct.Struct('h 9s 20s 20s') # extended taxi info data
packed_data0 = s0.pack( *dData0)
data_size0 = s0.size
cabmsg.gmsg_send(packed_data0, data_size0, msgconf.TFC, 0, msgconf.MT_EXTENDED_TAXI_INFO, ss0)
return
def vehicle_modify_cmt(self, dData, dData0):
#ss = struct.Struct('5I 4c 320s')
s = struct.Struct('4I 71h 2c 4s 24s 4s 2c 2s 10h 12I 16s 2c 2s 2I 28s') # taxi info data
packed_data = s.pack( *dData)
data_size = s.size
ss = struct.Struct(cabmsg.base_fmt + '%ds' % (s.size))
cabmsg.gmsg_send(packed_data, data_size, msgcof.TFC, 0, msgconf.MT_MODTAXINFO, ss)
#ss0 = struct.Struct('5I 4c 51s') # packet 1
s = struct.Struct('h 9s 20s 20s') # extended taxi info data
packed_data0 = s.pack( *dData0)
data_size = s.size
ss = struct.Struct(cabmsg.base_fmt + '%ds' % (s.size))
cabmsg.gmsg_send(packed_data, data_size, msgconf.TFC, 0, msgconf.MT_EXTENDED_TAXI_INFO, ss)
return
def vehicle_info_to_struct(self):
frmt = '4I 71h 2c 4s 24s 4s 2c 2s 10h 12I 16s 2c 2s 2I 28s'
s = struct.Struct(frmt)
dData, dDataE = self.vehicle_info_to_tuple()
print 'vehicle data: ', dData
print 'vehicle extended data: ', dDataE
packed_data = s.pack(*dData)
data_size = s.size
print 'data packed ... '
sE = struct.Struct("h 9s 20s 20s") # extended taxi info data
packed_dataE = sE.pack( *dDataE)
data_sizeE = sE.size
print 'returning packed stuff'
return (packed_data, data_size, packed_dataE, data_sizeE)
def add_modify_vehicle(self, action):
try:
if self.taxi_no > -1:
print 'taxi_no ', self.taxi_no, 'copying to sturct '
packed_data, data_size, packed_dataE, data_sizeE = self.vehicle_info_to_struct()
print 'data_size ', data_size, ' data_sizeE ', data_sizeE
ss = struct.Struct('I I I I I c c c c 208s')
print 'add_modify_vehicle action ', action, ' code ', msgconf.vehicle_msg[action]
packed_msg = ss.pack(msgconf.CWEBS,
msgconf.TFC,
0,
msgconf.vehicle_msg[action],
data_size,
'0',
'0',
'1',
'a',
packed_data)
print 'first message ... is prepared...'
ssE = struct.Struct('5I 4c 51s')
packed_msgE = ss.pack(msgconf.CWEBS,
msgconf.TFC,
0,
msgconf.MT_EXTENDED_TAXI_INFO,
data_sizeE,
'0',
'0',
'1',
'a',
packed_dataE)
print 'second message ... is prepared...'
print 'msgconf.MT_EXTENDED_TAXI_INFO ', msgconf.MT_EXTENDED_TAXI_INFO
mq = sysv_ipc.MessageQueue(msgconf.TFC, flags=sysv_ipc.IPC_CREAT, mode=0666, max_message_size = 8064)
try:
print 'sending 1st message'
mq.send(packed_msg, block=False, type=msgconf.ROUT_PRI)
#except sysv_ipc.BusyError:
except Exception as e:
#print "Queue is full"
print str(e)
print '1st message - done'
try:
print 'sending 2nd message'
mq.send(packed_msgE, block=False, type=msgconf.ROUT_PRI)
#except sysv_ipc.BusyError:
except Exception as e:
print str(e)
#mq.remove()
print '2nd message - done'
else:
print 'invalid vehicle'
except Exception as e:
print 'exception is raised in add_modify_vehicle', str(e)
return
def delete_vehicle(self, taxi_no):
try:
if taxi_no > -1:
driver_id = 0
fleet = 0
dData = (driver_id, taxi_no, fleet)
s = struct.Struct('2I h') # tif index data
packed_data = s.pack(*dData)
data_size = s.size
ss = struct.Struct('I I I I I c c c c 10s')
packed_msg = ss.pack(msgconf.CWEBS,
msgconf.TFC,
0,
msgconf.MT_DELETE_TAXI,
data_size,
'0',
'0',
'1',
'a',
packed_data)
mq = sysv_ipc.MessageQueue(msgconf.TFC, flags=sysv_ipc.IPC_CREAT, mode=0666, max_message_size = 8064)
try:
mq.send(packed_msg, block=False, type=msgconf.ROUT_PRI)
except sysv_ipc.BusyError:
print "Queue is full - will not send"
#mq.remove()
except ValueError:
pass
def read_taxirec(self, taxinum):
bError = False
taxifile = "/data/taxirecs.fl"
taxirec_fmts = [
'8I 2f',
'540c',
'9I 10h 12c',
#'4I 1h 1h 300c', # == taxi info struct =320 bytes (320c)
'4I 1h 1h 1h 1h 32h 32h 1h 1h 1h 2c 4s 24s 4s 2c 2s 10h 10I 1I 1I 16s 2c 2s 2I 28s', ## # == taxi info struct =320 bytes (320c)
'24s 2I 2h 4I 2h 1I 1h 2c',
'1h 2c 64I',
'3I 33s 33s 2s 3I 16I 4c 1I 4h 1I',
'1h 9c 18s 4s 1c 9s 20s 277c '
'5I 1I 2c 1h 2f 2I 2c 1h 5f 2c 1h 4I 1c 20s 1c 130s'
]
rec_fmt = ''.join(taxirec_fmts)
rec_size = struct.Struct(rec_fmt).size
bError = False
try:
with open(taxifile, "rb") as f:
count = 0
bRun=True
while bRun:
count = count + 1
data = f.read(rec_size)
if not data:
bError=True
fp.close()
break
else:
if (len(data) == rec_size):
udata = struct.Struct(rec_fmt).unpack(data)
tis_start = 581
taxi_number_offset = 4
fleetname_offset = 4 + 71 + 2 + 1
alias_offset = fleetname_offset + 1
alternate_offset = alias_offset + 4
driver_offset = alternate_offset + 10
curdrv_offset = driver_offset + 10
if udata[tis_start + taxi_number_offset ] == taxinum:
print ' Found it ==> taxi#[%d] fleet#[%d] fleetname=[%s] alias[%s] curdrv [%d] \n' \
% ( udata[585] \
, udata[586] \
, udata[tis_start+fleetname_offset] \
, udata[tis_start+alias_offset] \
, udata[tis_start+curdrv_offset ] \
)
for i in range(10):
print 'alternate fleet %d ' % (udata[tis_start+alternate_offset + i])
for i in range(10):
print 'drivers %d ' % (udata[tis_start+driver_offset + i])
bRun=False
else:
bRun=False
except Exception as e:
bError = True
print 'exception ', e
return bError
if __name__ == "__main__":
try:
print 'test: do nothing'
mytaxi = 9006
dic = {}
myTaxiRec = VehicleParams(dic)
myTaxiRec.read_taxirec(mytaxi)
except Exception as e:
print 'Exception in main ', str(e)
| [
"sneedham@MacBook-Pro.local"
] | sneedham@MacBook-Pro.local |
201469154abd03cbf07a934dc0cc53c89c917b00 | 5ea51b48e1db9b401f397cec703481f9c817ce62 | /Exercícios/ex003 - Imprime Soma.py | 8d196e0f807e277a3bb313c364c24d53fdd64014 | [
"MIT"
] | permissive | Dioclesiano/Estudos_sobre_Python | a75040bfab5c425d43e4a28ada75ab0d7ef73adf | ba9166048fe824fef96fcf6cf3696714e79f4235 | refs/heads/main | 2023-04-01T17:34:16.533197 | 2021-04-14T13:50:06 | 2021-04-14T13:50:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | n1 = int(input('Digite um Número: '))
n2 = int(input('Digite outro Número: '))
s = n1 + n2
print('A soma entre {} e {} é igual a {}'.format(n1, n2, s))
| [
"diowpaz@gmail.com"
] | diowpaz@gmail.com |
5abc5f914f15ec1a78a3917a9d9825fa90c0d333 | 0dd49a6ddd206477b0651396ff7d8a9811b19ac0 | /Learning_ML/face_detection.py | 5120542e3c0909f3764fdde66dc5c66596d05d44 | [] | no_license | gauravpratihast/Python_Projects | b7a7fd5309c3b86bffb78a34aea1c01e02c9f72e | 0c0a50a9691ae755749e238c176c5b8423f2e402 | refs/heads/master | 2023-02-08T19:00:15.541537 | 2021-01-05T06:35:57 | 2021-01-05T06:35:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import cv2
cap = cv2.VideoCapture(0)
classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while cap.isOpened():
ret, frame = cap.read()
if ret:
faces = classifier.detectMultiScale(frame)
for face in faces:
x, y, w, h = face
frame = cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 4)
cv2.imshow('My window', frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"pratihastgaurav@gmail.com"
] | pratihastgaurav@gmail.com |
ea0207d1f4614c56c66b011cec3e7d9ecefe2d10 | 58f6184fbfe4782bccf7803fbb978b5a5f93bb50 | /src/scs_analysis/cmd/cmd_sample_tally.py | a7dff9ca4f518978eee941ce646bb2796fd1ea4b | [
"MIT"
] | permissive | seoss/scs_analysis | d41db35a1c7d97d75776a797df099749dbced824 | c203093fd6728eafe576a1798bd9040ca18c73f8 | refs/heads/master | 2020-04-04T20:14:48.026665 | 2018-11-05T12:51:23 | 2018-11-05T12:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | """
Created on 22 Aug 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdSampleTally(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-t TALLY] [-p PRECISION] [-v] [PATH]", version="%prog 1.0")
# optional...
self.__parser.add_option("--tally", "-t", type="int", nargs=1, action="store", dest="tally",
help="generate a rolling aggregate for TALLY number of data points (default all)")
self.__parser.add_option("--prec", "-p", type="int", nargs=1, action="store", default=None, dest="precision",
help="precision (default 0 decimal places)")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.tally is not None and self.tally < 1:
return False
return True
# ----------------------------------------------------------------------------------------------------------------
@property
def tally(self):
return self.__opts.tally
@property
def precision(self):
return self.__opts.precision
@property
def verbose(self):
return self.__opts.verbose
@property
def path(self):
return self.__args[0] if len(self.__args) > 0 else None
@property
def args(self):
return self.__args
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdSampleTally:{tally:%s, tally:%s, verbose:%s, path:%s, args:%s}" % \
(self.tally, self.precision, self.verbose, self.path, self.args)
| [
"bruno.beloff@southcoastscience.com"
] | bruno.beloff@southcoastscience.com |
602d5661471469217459de0236ec43a9a1f0e8de | 81344c55ed60bf12818d1a0ec246f3c24c79cb4c | /力扣习题/8字符串转整数/atoi.py | 73fb9e626ac6852e2287bfbded03dddd0161775c | [
"MIT"
] | permissive | lollipopnougat/AlgorithmLearning | 7d5c4a37bd5c814c5caea6963e81fbe0cb44b7b7 | cb13caa0159f0179d3c1bacfb1801d156c7d1344 | refs/heads/master | 2023-05-11T04:47:09.758889 | 2023-05-07T06:55:48 | 2023-05-07T06:55:48 | 194,078,151 | 7 | 2 | MIT | 2023-03-25T01:23:44 | 2019-06-27T10:53:08 | Python | UTF-8 | Python | false | false | 144 | py | class Solution:
def myAtoi(self, str: str) -> int:
return max(min(int(*re.findall('^[\+\-]?\d+', str.lstrip())), 2**31 - 1), -2**31) | [
"ab2defg145@gmail.com"
] | ab2defg145@gmail.com |
ee05a5e82535721478ceacafffb4b4bdcce04bfa | 37ef9240250351287c9558cea8a646ca5d8431fc | /docs/conf.py | fb0d9b458e8a73cbd3779ab66263ad7f67761a32 | [
"MIT"
] | permissive | lucashn/peakutils | f45b10eabc53f6f0565e9ab63747df033691c77f | f48d65a9b55f61fb65f368b75a2c53cbce132a0c | refs/heads/master | 2022-09-18T17:37:22.453986 | 2022-08-05T13:09:35 | 2022-08-05T13:09:35 | 102,883,046 | 28 | 7 | MIT | 2022-08-05T13:03:51 | 2017-09-08T16:40:03 | Jupyter Notebook | UTF-8 | Python | false | false | 6,799 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
import peakutils
from better import better_theme_path
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PeakUtils'
copyright = '2014 - 2020, Lucas Hermann Negri'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = peakutils.__version__
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
html_theme_path = [better_theme_path]
html_theme = 'better'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PeakUtilsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PeakUtils.tex', 'PeakUtils Documentation',
'Lucas Hermann Negri', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'peakutils', 'PeakUtils Documentation',
['Lucas Hermann Negri'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PeakUtils', 'PeakUtils Documentation',
'Lucas Hermann Negri', 'PeakUtils', 'Peak detection utilities',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"lucashnegri@gmail.com"
] | lucashnegri@gmail.com |
610bec6c5aaa43962cc9e88912c2a38b2f7e20fc | f7d84bcecbc9ed55465a2d6c5a205c1a51ee4422 | /mongo.py | 88a487833bee84eead2ad5ae0ebbdd98d30f010f | [] | no_license | sophie-apples/Mongodblessons | 6948b40366372d6a52e1050d82d493d0fa7ad066 | a9418a293085572ad4da9210e33fe30a4a2aa7fe | refs/heads/master | 2023-01-02T03:04:58.222214 | 2020-10-27T15:46:44 | 2020-10-27T15:46:44 | 307,695,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import os
import pymongo
if os.path.exists("env.py"):
import env
MONGO_URI = os.environ.get("MONGO_URI")
DATABASE = "myFirstDb"
COLLECTION = "celebrities"
def mongo_connect(url):
try:
conn = pymongo.MongoClient(url)
print("Mongo is connected")
return conn
except pymongo.errors.ConnectionFailure as e:
print("Could not connect to MongoDB: %s") % e
conn = mongo_connect(MONGO_URI)
coll = conn[DATABASE][COLLECTION]
documents = coll.find({"first": "douglas"})
for doc in documents:
print(doc)
| [
"sophieannemurray@gmail.com"
] | sophieannemurray@gmail.com |
33adfce55baf4f800d3148500f069515676b25c7 | df4811e2427ef948820a42a34a6c4b39d08db668 | /streams/migrations/0007_videoquality.py | 4fdac644c0f159b7239ffa0e0d13dfec8a325b15 | [] | no_license | AnbangZhao/liveStreamingWeb | c58a4c7b36a04f76c3cf6a94ce42a2dc46564c7b | 80cb3a62bd0523f07f9d1c04d8cbc0f226b9571b | refs/heads/master | 2016-09-08T00:32:00.695423 | 2014-12-04T20:32:10 | 2014-12-04T20:32:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('streams', '0006_auto_20141118_1919'),
]
operations = [
migrations.CreateModel(
name='videoQuality',
fields=[
('sVideo', models.CharField(max_length=30, serialize=False, primary_key=True)),
('sQuality', models.CharField(max_length=10)),
],
options={
},
bases=(models.Model,),
),
]
| [
"anbang@anbangs-mbp.wv.cc.cmu.edu"
] | anbang@anbangs-mbp.wv.cc.cmu.edu |
0ddbd6f2826fc77d43da601ac9710287be3d9766 | c780278735657733ef3f490008ed9424d9420842 | /base/DoubleCircleLinkedList.py | 19da8041225eea0d103aa9f273e8c4ea4f69e864 | [
"MIT"
] | permissive | MnkyC/Algorithms-Python | 085d1de45a3fbe4ec5d3a478f435d87cf1e5840c | 01d2c9099f185f549c07e6e3162b9b2d55ba6fd6 | refs/heads/master | 2020-12-03T23:45:27.268546 | 2020-05-20T09:30:17 | 2020-05-20T09:30:17 | 231,526,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,961 | py | from base import DoubleNode
from DoubleNode import Node
# 双向循环链表,尾节点后续为头节点,头节点的前驱为尾节点,链表为空则头节点为None
class DoubleCircleLinkedList(object):
def __init__(self):
self.__head = None
# 头插法,逆序插入到链表头部
def add(self, data):
newNode = Node(data)
if self.isEmpty():
newNode.setPrev(newNode)
newNode.setNext(newNode)
self.__head = newNode
else:
newNode.setNext(self.__head)
newNode.setPrev(self.__head.getPrev())
self.__head.getPrev().setNext(newNode)
self.__head.setPrev(newNode)
self.__head = newNode
# 尾插法,顺序插入到链表尾部
def append(self, data):
newNode = Node(data)
if self.isEmpty():
self.add(data)
else:
newNode.setNext(self.__head)
# head的前驱就是链表尾部
newNode.setPrev(self.__head.getPrev())
self.__head.getPrev().setNext(newNode)
self.__head.setPrev(newNode)
# 插入指定位置
def insert(self, data, pos=0):
if pos <= 0:
self.add(data)
elif pos > self.size() - 1:
self.append(data)
else:
newNode = Node(data)
preHead = self.__head
count = 0
# preHead指向指定位置前一个位置pos-1
while count < pos - 1:
count += 1
preHead = preHead.getNext()
newNode.setPrev(preHead)
newNode.setNext(preHead.getNext())
preHead.getNext().setPrev(newNode)
preHead.setNext(newNode)
def remove(self, data):
if self.isEmpty():
return
currentHead = self.__head
if currentHead.getData() == data:
if self.size() == 1:
self.__head = None
else:
prevp = currentHead.getPrev()
nextp = currentHead.getNext()
self.__head = nextp
nextp.setPrev(currentHead.getPrev())
prevp.setNext(nextp)
else:
while currentHead.getNext() is not self.__head:
nextp = currentHead.getNext()
if nextp.getData() == data:
nextp.getPrev().setNext(nextp.getNext())
nextp.getNext().setPrev(nextp.getPrev())
else:
currentHead = nextp
# 尾节点处理
if currentHead.getData() == data:
if currentHead is self.__head:
self.__head = None
else:
currentHead.getPrev().setNext(self.__head)
self.__head.setNext(currentHead.getPrev())
def search(self, data):
if self.isEmpty():
return False
currentHead = self.__head
while currentHead.getNext() is not self.__head:
if currentHead.getData() == data:
return True
else:
currentHead = currentHead.getNext()
if currentHead.getData() == data:
return True
return False
def size(self):
if self.isEmpty():
return 0
currentHead = self.__head
count = 1
while currentHead.getNext() is not self.__head:
count += 1
currentHead = currentHead.getNext()
return count
def isEmpty(self):
return self.__head is None
def travel(self):
if self.isEmpty():
return []
dataList = []
currentHead = self.__head
dataList.append(currentHead.getData())
while currentHead.getNext() is not self.__head:
currentHead = currentHead.getNext()
dataList.append(currentHead.getData())
return dataList
| [
"1589664970@qq.com"
] | 1589664970@qq.com |
e7b6c241b5f80e25dae1240d0b7d4aadb11d3c3b | 37d8a02e4976a8ca516500d5b9d2fa6626c2b9e3 | /B_Scorecard/featureEngineering/featureEncoding/woe_encoding.py | a55295c53be636df70c9000195dbbcad0b50335b | [] | no_license | sucre111/xiaoxiang_fengkong_peixun | b0bb59243346fc02fea8126d729af1fb29bf907d | 5eac4e3011e5bbc7e59e79296c12e81074166551 | refs/heads/master | 2021-09-17T05:16:19.362017 | 2018-06-28T11:33:29 | 2018-06-28T11:33:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,896 | py | import pandas as pd
import numpy as np
import pickle
from settings import *
from util.scorecard_functions import *
"""
对预处理后的特征进行分箱与WOE编码
"""
class WOEEncoding:
def __init__(self, file):
self.categoricalFeatures = []
self.numericalFeatures = []
self.WOE_IV_dict = {}
self.train_data = pd.read_csv(file)
self.toRemove_list = []
self.bin_dict = []
def feature_encoding_process(self):
#按照值分布,将特征分为类别型变量和连续性变量
featureList = [x for x in self.train_data.columns if x not in ['CUST_ID', 'label']]
for f in featureList:
if len(set(self.train_data[f])) > NUM_BINS:
self.numericalFeatures.append(f)
else:
self.categoricalFeatures.append(f)
#类别型变量与连续数值型变量分别进行卡方分箱与WOE编码
self.categorical_feature_encoding()
self.numerical_feature_encoding()
#保存最终的WOE编码结果 以及相关特征分箱等信息,用于后续多变量分析 以及模型训练
self.save()
def not_monotone_feature_process(self, not_monotone_list):
print(not_monotone_list)
'''
for var in not_monotone_list:
print(self.train_data.groupby(var)['label'].mean())
print(self.train_data.groupby(var)['label'].count())
'''
self.train_data['M1FreqL3M_Bin'] = self.train_data['M1FreqL3M'].apply(lambda x: int(x >= 1))
print(self.train_data.groupby('M1FreqL3M_Bin')['label'].mean())
self.train_data['M2FreqL3M_Bin'] = self.train_data['M2FreqL3M'].apply(lambda x : int(x >= 1))
print(self.train_data.groupby('M2FreqL3M_Bin')['label'].mean())
#计算WOE值
self.compute_woe('M1FreqL3M_Bin')
self.compute_woe('M2FreqL3M_Bin')
def compute_woe(self, var):
new_var = var + '_WOE'
self.WOE_IV_dict[new_var] = CalcWOE(self.train_data, var, 'label')
self.train_data[new_var] = self.train_data[var].map(lambda x : self.WOE_IV_dict[new_var]['WOE'][x])
print(self.WOE_IV_dict.get(new_var))
def categorical_feature_encoding(self):
not_monotone = []
#1.先对类别变量中badrate不单调的特征进行合并并且计算WOE值
for var in self.categoricalFeatures:
if not BadRateMonotone(self.train_data, var, target='label'):
not_monotone.append(var)
self.not_monotone_feature_process(not_monotone)
#2.针对其他单调的类别型变量,检查是否有一箱的占比低于5%。 如果有,将该变量进行合并
#根据是否存在有小于5%的bin,将特征分为small_bin_var与large_bin_var两个集合
#依次对small_bin_var与large_bin_var两个集合中的特征进行分析处理
small_bin_var = []
large_bin_var = []
N = self.train_data.shape[0]
for var in self.categoricalFeatures:
if var not in not_monotone:
total = self.train_data.groupby([var])[var].count()
pcnt = total * 1.0 / N
if min(pcnt) < 0.05:
small_bin_var.append({var : pcnt.to_dict()})
else:
large_bin_var.append(var)
#2.1针对 samll_bin_var中的变量进行处理
for i in small_bin_var:
print(i)
'''
正常的变量:
{'maxDelq1M': {0: 0.60379372931421049, 1: 0.31880138083205806, 2: 0.069183956724438597, 3: 0.0082209331292928574}}
{'maxDelq3M': {0: 0.22637816292394747, 1: 0.57005587387451506, 2: 0.18068258656891703, 3: 0.022883376632620377}}
{'maxDelq6M': {0: 0.057226235809103528, 1: 0.58489625965336844, 2: 0.31285810882949572, 3: 0.045019395708032317}}
需要被删除的无效变量:
{'M2FreqL1M': {0: 0.99177906687070716, 1: 0.0082209331292928574}}
{'M2FreqL6M': {0: 0.95498060429196774, 1: 0.04003701199330937, 2: 0.0045909107085661408, 3: 0.00032029609594647497,
4: 7.1176910210327775e-05}}
{'M2FreqL12M': {0: 0.92334246770347694, 1: 0.066514822591551295, 2: 0.0092174098722374465, 3: 0.00081853446741876937,
4: 0.00010676536531549166}}
'''
#M2FreqL1M, M2FreqL6M, M2FreqL12M三个特征的分箱分布,尤其特别不平衡,可能会对模型预测带来负面效果,因此删除
self.toRemove_list.append('M2FreqL1M')
self.toRemove_list.append('M2FreqL6M')
self.toRemove_list.append('M2FreqL12M')
#maxDelq1M, maxDelq3M, maxDelq6M 对于这三个正常的变量,进行分箱合并,计算WOE值
self.train_data['maxDelqL1M_Bin'] = self.train_data['maxDelqL1M'].apply(lambda x: MergeByCondition(x,['==0','==1','>=2']))
self.train_data['maxDelqL3M_Bin'] = self.train_data['maxDelqL3M'].apply(lambda x: MergeByCondition(x,['==0','==1','>=2']))
self.train_data['maxDelqL6M_Bin'] = self.train_data['maxDelqL6M'].apply(lambda x: MergeByCondition(x,['==0','==1','>=2']))
self.compute_woe('maxDelqL1M_Bin')
self.compute_woe('maxDelqL3M_Bin')
self.compute_woe('maxDelqL6M_Bin')
#2.2针对large_bin_var中的变量进行处理
#对于不需要分箱合并,原始特征的badrate就已经单调的变量直接计算WOE和IV值
for var in large_bin_var:
self.compute_woe(var)
'''
对于数值型变量,需要先分箱,再计算WOE、IV
分箱的结果需要满足:
1:箱数不超过5
2:bad rate单调
3:每箱占比不低于5%
'''
def numerical_feature_encoding(self):
for var in self.numericalFeatures:
max_bins = NUM_BINS
print(var)
#先进行卡方分箱,将特征数据离散化,并且按照卡方合并的原理,将原始特征的bin数控制在num_bin范围以内
bin = ChiMerge(self.train_data, var, 'label', max_interval=max_bins, minBinPcnt= 0.05)
print('chiMerge bin: ', bin)
new_var = var + '_Bin'
self.train_data[new_var] = self.train_data[var].apply(lambda x : AssignBin(x, bin))
while not BadRateMonotone(self.train_data, new_var, 'label'):
print('not monotone, ChiMerge to make badrate monotone for var: ', var)
max_bins -= 1 #降低分箱数,进一步合并,再判断badrate是否能满足单调
print('max bin:', max_bins)
bin = ChiMerge(self.train_data, var, 'label', max_interval=max_bins, minBinPcnt= 0.05)
self.train_data[new_var] = self.train_data[var].apply(lambda x: AssignBin(x, bin))
#满足单调性后计算WOE值
self.compute_woe(new_var)
self.bin_dict.append({var: bin})
def save(self):
#将所有经过WOE编码的新特征及相关WOE,IV值保存在本地
with open(ROOT_DIR + 'featureEngineering/WOE_IV_dict.pkl', 'wb') as f:
print(self.WOE_IV_dict)
pickle.dump(self.WOE_IV_dict, f)
#print(self.train_data.columns)
self.train_data.to_csv(ROOT_DIR + 'featureEngineering/train_WOE_data.csv', index=None)
with open(ROOT_DIR + 'featureEngineering/numericalFeatures.pkl', 'wb') as f1:
pickle.dump(self.numericalFeatures, f1)
with open(ROOT_DIR + 'featureEngineering/categoricalFeatures.pkl', 'wb') as f2:
pickle.dump(self.categoricalFeatures, f2)
with open(ROOT_DIR + 'featureEngineering/bin_dict.pkl', 'wb') as f3:
pickle.dump(self.bin_dict, f3)
if __name__ == '__main__':
woeEncoding = WOEEncoding(ROOT_DIR + 'featureEngineering/train_derived_feature_data.csv')
woeEncoding.feature_encoding_process() | [
"526633123@qq.com"
] | 526633123@qq.com |
027cde9e9d81df2cc6ae16184a4efc50dc2bef3d | c8844d322204bdad5743874ed67b235287f9494b | /backend/schemas/user.py | b77e6887113f08e8a888e6245587b0a238482799 | [] | no_license | monstermahi982/studentCorner | 44daacb2fed7213ea7eff2a3c59d6aa948746627 | b99a3429a3874f060de533181d4a6d89681e7109 | refs/heads/main | 2023-08-29T12:42:54.806112 | 2021-10-15T15:15:34 | 2021-10-15T15:15:34 | 396,654,555 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | def userEntity(item) -> dict:
return {
"id": str(item['_id']),
"name": item['name'],
"email": item['email'],
"gender": item['gender'],
"profile": item['image']
}
def userPassword(item) -> dict:
return {
"id": str(item['_id']),
"password": item['password']
}
def userLogin(item) -> dict:
return {
"id": str(item['_id']),
"password": item['password']
}
def usersEntity(entity) -> list:
return [userEntity(item) for item in entity]
| [
"maheshgaikwad8892@gmail.com"
] | maheshgaikwad8892@gmail.com |
360b8cbd5f40666df3195b013af0d257512aac24 | b57bce6b9668ebad5c26452469ad5faf79fee5bf | /tasashop/migrations/0030_auto_20210625_2039.py | 27806efc9cb692d2acc1953269e938eb17e3a950 | [] | no_license | DelaCernaJal/TasaShop | c86e95674a0d7727894b4f501a6166b7b06060fb | 2dc077ab2650659a979dc3e415cc8f0bd288442f | refs/heads/main | 2023-06-12T21:24:01.177354 | 2021-06-28T04:01:38 | 2021-06-28T04:01:38 | 377,349,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # Generated by Django 3.1.6 on 2021-06-25 20:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tasashop', '0029_auto_20210625_2032'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='cdesign',
),
migrations.AddField(
model_name='cartitem',
name='cdesign',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tasashop.entries'),
),
]
| [
"sharalyn.delacerna@gsfe.tupcavite.edu.ph"
] | sharalyn.delacerna@gsfe.tupcavite.edu.ph |
01c6d90a4e5dba7d1adfa4c83e8e3c64205ce6eb | c5aba731eed7ccbcdd733c9234579a31027949ba | /src/narma/helper.py | 7d39756588d398020d71bcf4254f2aee3147cf1b | [
"MIT"
] | permissive | Hritikbansal/MultivariateTimeSeries_Generalization | 75cba7afeaf0ef048d17cbd39c352a9e46ee7158 | eebf3bb142acf3b85390e41c5ff067c8c8fef55d | refs/heads/main | 2023-03-10T17:37:32.769579 | 2021-02-20T14:11:27 | 2021-02-20T14:11:27 | 313,085,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,749 | py | import argparse
import torch
import utils
import datetime
import os
import pickle
import random
import numpy as np
from torch.utils import data
from torch import nn
import torch.nn.functional as F
from collections import defaultdict
import sys
def save_predictions(save_folder, used_params, target_,predicted_,i):
a = target_.cpu()
#print(a.shape)
b = predicted_.detach().cpu().numpy()
#target_ = torch.reshape(a,(a.shape[1]*a.shape[0],a.shape[2])).float()
#predicted_ = np.reshape(b,(b.shape[1]*b.shape[0],b.shape[2]))
target_ = torch.reshape(a,(a.shape[0],a.shape[1]*a.shape[2])).float()
predicted_ = np.reshape(b,(b.shape[0],b.shape[1]*b.shape[2]))
with open(save_folder+used_params+"target_prediction_"+str(i)+".csv",'ab') as f:
np.savetxt(f,np.concatenate((target_,predicted_),axis=1),delimiter=',')
def get_indices(control):
ind = []
for i in range(control.size(-1)-1):
if(control[i]!=control[i+1]):
ind.append(i)
return ind
def getHitsandMRR(pred_states, next_states):
topk = [1, 2, 5, 10]
hits_at = defaultdict(int)
num_samples = 0
rr_sum = 0
pred_state_cat = torch.cat(pred_states, dim=0)
next_state_cat = torch.cat(next_states, dim=0)
full_size = pred_state_cat.size(0)
# Flatten object/feature dimensions
next_state_flat = next_state_cat.view(full_size, -1)
pred_state_flat = pred_state_cat.view(full_size, -1)
dist_matrix = utils.pairwise_distance_matrix(
next_state_flat, pred_state_flat)
dist_matrix_diag = torch.diag(dist_matrix).unsqueeze(-1)
dist_matrix_augmented = torch.cat(
[dist_matrix_diag, dist_matrix], dim=1)
# Workaround to get a stable sort in numpy.
dist_np = dist_matrix_augmented.detach().numpy()
np.savetxt(save_folder+used_params+"dist_np.csv",dist_np,delimiter=',',fmt='%10.3f')#.shape,dist_matrix.shape,dist_matrix_diag.shape,dist_matrix_augmented.shape)
indices = []
for row in dist_np:
keys = (np.arange(len(row)), row)
indices.append(np.lexsort(keys))
indices = np.stack(indices, axis=0)
indices = torch.from_numpy(indices).long()
labels = torch.zeros(
indices.size(0), device=indices.device,
dtype=torch.int64).unsqueeze(-1)
num_samples += full_size
print('Size of current topk evaluation batch: {}'.format(
full_size))
for k in topk:
match = indices[:, :k] == labels
num_matches = match.sum()
hits_at[k] += num_matches.item()
match = indices == labels
_, ranks = match.max(1)
reciprocal_ranks = torch.reciprocal(ranks.double() + 1)
rr_sum += reciprocal_ranks.sum()
return num_samples, topk, hits_at, rr_sum
def per_obj_mse(x1, x2):
#x1 (bsz, o)
diff = x1-x2
return (diff**2).mean(dim=0).squeeze() #shape (o, )
def getStages_norm(model):
c = 0
for name, param in model.named_parameters():
if"first_stage" in name:
if "weight" in name:
c+=(torch.norm(param.view(-1), 1)/param.view(-1).size()[0])
# elif "second_stage" in name:
# if "weight" in name:
# c+=(torch.norm(param.view(-1), 1)/param.view(-1).size()[0])
return c
def getTM_norm(model):
c = 0
for name, param in model.named_parameters():
if "transition_model_" in name:
if "weight" in name:
c+=(torch.norm(param.view(-1), 1)/param.view(-1).size()[0])
return c
def applyGroupLassoStages(model, lr, lambda_1, emb_dim):
softShrink = nn.Softshrink(lr*lambda_1)
n = emb_dim
with torch.no_grad():
for name, param in model.named_parameters():
if "first_stage_0_1" in name or "first_stage_1_0" in name:
if "weight" in name:
normTensor = torch.norm(param[:,:n], p=2, keepdim = True)
param[:,:n] = param[:,:n]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
normTensor = torch.norm(param[:,n:], p=2, keepdim = True)
param[:,n:] = param[:,n:]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
# if "second_stage" in name:
# if "weight" in name:
# normTensor = torch.norm(param[:,:n], p=2, keepdim = True)
# param[:,:n] = param[:,:n]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
# normTensor = torch.norm(param[:,n:], p=2, keepdim = True)
# param[:,n:] = param[:,n:]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
'''
if "extract_state_ls" in name:
if "weight" in name:
normTensor = torch.norm(param[:,:n], p=2, keepdim = True)
param[:,:n] = param[:,:n]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
normTensor = torch.norm(param[:,n:], p=2, keepdim = True)
param[:,n:] = param[:,n:]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
'''
def applyGroupLassoTM(model, lr, lambda_1, emb_dim):
softShrink = nn.Softshrink(lr*lambda_1)
n = emb_dim
with torch.no_grad():
for name, param in model.named_parameters():
if "transition_model_" in name:
if "weight" in name:
normTensor = torch.norm(param[:,:n], p=2, keepdim = True)
param[:,:n] = param[:,:n]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
normTensor = torch.norm(param[:,n:], p=2, keepdim = True)
param[:,n:] = param[:,n:]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
def applyGroupLassoBaseLine(model, lr, lambda_1, emb_dim):
softShrink = nn.Softshrink(lr*lambda_1)
n = emb_dim
size = model.transition_model[0].weight.data.shape
assert size[0]%n == 0
assert size[1]%n == 0
assert model.transition_model[2].weight.data.shape[0] == model.transition_model[2].weight.data.shape[1]
O, I = size[0]//n, size[1]//n
with torch.no_grad():
for name, param in model.named_parameters():
if "transition_model.0" in name:
if "weight" in name:
for i in range(O):
for j in range(I):
normTensor = torch.norm(param[i*n:(i+1)*n,j*n:(j+1)*n], p=2, keepdim = True)
param[i*n:(i+1)*n,j*n:(j+1)*n] = param[i*n:(i+1)*n,j*n:(j+1)*n]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
if "transition_model.2" in name:
if "weight" in name:
for i in range(O):
for j in range(O):
normTensor = torch.norm(param[i*n:(i+1)*n,j*n:(j+1)*n], p=2, keepdim = True)
param[i*n:(i+1)*n,j*n:(j+1)*n] = param[i*n:(i+1)*n,j*n:(j+1)*n]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_1*0.1)
def applyGroupLassoDecoder(model, lr, lambda_2, emb_dim, num_objects):
softShrink = nn.Softshrink(lr*lambda_2)
n = emb_dim
size = model.decoder.weight.data.shape
assert size[0] == num_objects
assert size[1]%n == 0
K = size[1]//n
with torch.no_grad():
for i in range(K):
normTensor = torch.norm(model.decoder.weight.data[:,i*n:(i+1)*n], p=2, keepdim=True)
model.decoder.weight.data[:,i*n:(i+1)*n] = model.decoder.weight.data[:,i*n:(i+1)*n]*softShrink(normTensor)/torch.clamp(normTensor, min=lr*lambda_2*0.1)
| [
"hritik@LAPTOP-OAMAAP5J.localdomain"
] | hritik@LAPTOP-OAMAAP5J.localdomain |
8dade03e82f228670399492b9ce5f608fe0057ae | 1979fdf3275c6b6bc1abb1f5531346d5777e8b5d | /app/groceries.py | fb302b7cdbd9f64bd7cfd48ef2424ac48b6c1635 | [] | no_license | NYRChang/groceries-exercise | 14920fff0654becdabb3dfa518285236289fddfd | 5342e0710ddf9ac10b0b70ebe7a187a84d929089 | refs/heads/master | 2022-09-09T19:53:19.144440 | 2020-06-03T01:03:52 | 2020-06-03T01:03:52 | 267,189,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,277 | py | # groceries.py
#from pprint import pprint
products = [
{"id":1, "name": "Chocolate Sandwich Cookies", "department": "snacks", "aisle": "cookies cakes", "price": 3.50},
{"id":2, "name": "All-Seasons Salt", "department": "pantry", "aisle": "spices seasonings", "price": 4.99},
{"id":3, "name": "Robust Golden Unsweetened Oolong Tea", "department": "beverages", "aisle": "tea", "price": 2.49},
{"id":4, "name": "Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce", "department": "frozen", "aisle": "frozen meals", "price": 6.99},
{"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99},
{"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99},
{"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50},
{"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25},
{"id":9, "name": "Light Strawberry Blueberry Yogurt", "department": "dairy eggs", "aisle": "yogurt", "price": 6.50},
{"id":10, "name": "Sparkling Orange Juice & Prickly Pear Beverage", "department": "beverages", "aisle": "water seltzer sparkling water", "price": 2.99},
{"id":11, "name": "Peach Mango Juice", "department": "beverages", "aisle": "refrigerated", "price": 1.99},
{"id":12, "name": "Chocolate Fudge Layer Cake", "department": "frozen", "aisle": "frozen dessert", "price": 18.50},
{"id":13, "name": "Saline Nasal Mist", "department": "personal care", "aisle": "cold flu allergy", "price": 16.00},
{"id":14, "name": "Fresh Scent Dishwasher Cleaner", "department": "household", "aisle": "dish detergents", "price": 4.99},
{"id":15, "name": "Overnight Diapers Size 6", "department": "babies", "aisle": "diapers wipes", "price": 25.50},
{"id":16, "name": "Mint Chocolate Flavored Syrup", "department": "snacks", "aisle": "ice cream toppings", "price": 4.50},
{"id":17, "name": "Rendered Duck Fat", "department": "meat seafood", "aisle": "poultry counter", "price": 9.99},
{"id":18, "name": "Pizza for One Suprema Frozen Pizza", "department": "frozen", "aisle": "frozen pizza", "price": 12.50},
{"id":19, "name": "Gluten Free Quinoa Three Cheese & Mushroom Blend", "department": "dry goods pasta", "aisle": "grains rice dried goods", "price": 3.99},
{"id":20, "name": "Pomegranate Cranberry & Aloe Vera Enrich Drink", "department": "beverages", "aisle": "juice nectars", "price": 4.25}
] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017
#print(products)
#pprint(products)
# TODO: write some Python code here to produce the desired output
# HEADER
products_count = len(products)
print("--------------")
print("THERE ARE", products_count,"PRODUCTS:")
print("--------------")
# LOOP THROUGH THE PRODUCTS AND PRINT EACH ONE
# "id":1
# "name": "Chocolate Sandwich Cookies"
# "department": "snacks"
# "aisle": "cookies cakes"
# "price": 3.50
def sort_by_name(p):
return p["name"]
sorted_products = sorted(products, key=sort_by_name)
for x in sorted_products:
price_usd = "${0:.2f}".format(x["price"])
print("+ " + x["name"] + " (" + str(price_usd) +")")
# --------------
# THERE ARE 20 PRODUCTS:
# --------------
# + All-Seasons Salt ($4.99)
# + Chocolate Fudge Layer Cake ($18.50)
# + Chocolate Sandwich Cookies ($3.50)
# + Cut Russet Potatoes Steam N' Mash ($4.25)
# + Dry Nose Oil ($21.99)
# + Fresh Scent Dishwasher Cleaner ($4.99)
# + Gluten Free Quinoa Three Cheese & Mushroom Blend ($3.99)
# + Green Chile Anytime Sauce ($7.99)
# + Light Strawberry Blueberry Yogurt ($6.50)
# + Mint Chocolate Flavored Syrup ($4.50)
# + Overnight Diapers Size 6 ($25.50)
# + Peach Mango Juice ($1.99)
# + Pizza For One Suprema Frozen Pizza ($12.50)
# + Pomegranate Cranberry & Aloe Vera Enrich Drink ($4.25)
# + Pure Coconut Water With Orange ($3.50)
# + Rendered Duck Fat ($9.99)
# + Robust Golden Unsweetened Oolong Tea ($2.49)
# + Saline Nasal Mist ($16.00)
# + Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce ($6.99)
# + Sparkling Orange Juice & Prickly Pear Beverage ($2.99)
# HEADER2
departments = []
for p in products:
if p["department"] not in departments:
departments.append(p["department"])
unique_departments = list(set(departments))
department_count = len(departments)
print("--------------")
print("THERE ARE", department_count,"DEPARTMENTS:")
print("--------------")
unique_departments.sort()
for d in unique_departments:
dept_products = [p for p in products if p["department"] == d]
dept_products_count = len(dept_products)
if dept_products_count > 1:
label = "products"
else:
label = "product"
print("+ " + d.title() + " (" + str(dept_products_count) + " " + label + ")")
# --------------
# THERE ARE 10 DEPARTMENTS:
# --------------
# + Babies (1 product)
# + Beverages (5 products)
# + Dairy Eggs (1 product)
# + Dry Goods Pasta (1 product)
# + Frozen (4 products)
# + Household (1 product)
# + Meat Seafood (1 product)
# + Pantry (2 products)
# + Personal Care (2 products)
# + Snacks (2 products)
| [
"65420925+NYRChang@users.noreply.github.com"
] | 65420925+NYRChang@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.