blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01d0e72016da46d391601df674992e03cde1592e | c02fa471348a8839fe021cd5c08180b4bf1db9bf | /21.05.12/mootube.py | e726f34cb64061ca452a028f57103bd5abb9cb54 | [] | no_license | kangsungsu/python3_coding | 75da0c1648c5a6bfa9a9aa92907ff2e43c8739ca | c9f26faaabd7e113440afa62a43ecc8dc1db34d8 | refs/heads/main | 2023-04-23T16:55:59.317901 | 2021-05-16T04:29:44 | 2021-05-16T04:29:44 | 352,661,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | from collections import deque
n,g=map(int,input().split())
board=[[] for _ in range(n+1)]
#print(board)
for _ in range(n-1):
p,q,r=map(int,input().split())
board[p].append((q,r))
board[q].append((p,r))
def bfs(board,start,visited,distance):
queue=deque()
queue.append((start))
#distance[start]=0
visited[start]=True
while queue:
a=queue.popleft()
for i in board[a]:
#print(i)
if not visited[i[0]]:
queue.append(i[0])
distance[i[0]]=min(min(distance[i[0]],i[1]),distance[a])
visited[i[0]]=True
return distance
for i in range(g):
k,v=map(int,input().split())
visited=[False for _ in range(n+1)]
distance=[1000000000 for _ in range(n+1)]
distance=bfs(board,v,visited,distance)
count=0
for j in distance:
if j==1000000000 or j<k:
continue
count+=1
print(count)
#print('m',i,q)
| [
"tmdtn0312@naver.com"
] | tmdtn0312@naver.com |
bcbd72200497efbfa37d4aa594662a472c482df3 | 63d500150c735bd82a1c50dd8f07120ad9e81023 | /cirrus_py/__init__.py | 7694eda6c44a0baf80ba255b23a9da787abbd725 | [
"MIT"
] | permissive | elbow-jason/cirrus_py | 49321a23793c72994a524812e8df133d4999f965 | 5ea41518a168709de40b1e1dcd4378acbd72e88d | refs/heads/master | 2021-01-20T01:25:21.099025 | 2017-04-24T22:02:41 | 2017-04-24T22:02:41 | 89,276,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py |
import cirrus_py.credentials
import cirrus_py.google_jwt
| [
"jlgoldb2@asu.edu"
] | jlgoldb2@asu.edu |
a42a9c55720280e1d5a817ef39c97018f2d3d454 | 84e4430be61a6925e41f6fb1792228f7e4c048d5 | /chapter_09/06_zoom_augmentation.py | 41c8134036e9bb848f4755f282bc05d5e4ea3ba0 | [] | no_license | athertahir/deep-learning-python | 193d7f493554223887040fc1752ea11151a50d9f | 4d9f5df1f8e79382b5c045bcbb095344e22b6fce | refs/heads/master | 2020-08-18T12:48:21.749587 | 2019-10-17T16:36:24 | 2019-10-17T16:36:24 | 213,608,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,224 | py | # %%
'''
## Random Zoom Augmentation
A zoom augmentation randomly zooms the image and either adds new pixel values around
the image or interpolates pixel values respectively. Image zooming can be configured by the
zoom range argument to the ImageDataGenerator constructor. You can specify the percentage
of the zoom as a single float or a range as an array or tuple. If a float is specified, then the range
for the zoom will be [1-value, 1+value]. For example, if you specify 0.3, then the range will be
[0.7, 1.3], or between 70% (zoom in) and 130% (zoom out). The zoom amount is uniformly
randomly sampled from the zoom region for each dimension (width, height) separately.
The zoom may not feel intuitive. Note that zoom values less than 1.0 will zoom the image
in, e.g. [0.5,0.5] makes the object in the image 50% larger or closer, and values larger than 1.0
will zoom the image out by 50%, e.g. [1.5, 1.5] makes the object in the image smaller or further
away. A zoom of [1.0,1.0] has no effect. The example below demonstrates zooming the image in,
e.g. making the object in the photograph larger.
'''
# %%
# example of zoom image augmentation
from numpy import expand_dims
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
%matplotlib notebook
from matplotlib import pyplot
# load the image
img = load_img('bird.jpg')
# convert to numpy array
data = img_to_array(img)
# expand dimension to one sample
samples = expand_dims(data, 0)
# create image data augmentation generator
datagen = ImageDataGenerator(zoom_range=[0.5,1.0])
# prepare iterator
it = datagen.flow(samples, batch_size=1)
# generate samples and plot
for i in range(9):
# define subplot
pyplot.subplot(330 + 1 + i)
# generate batch of images
batch = it.next()
# convert to unsigned integers for viewing
image = batch[0].astype('uint8')
# plot raw pixel data
pyplot.imshow(image)
# show the figure
pyplot.show()
# %%
'''
Running the example generates examples of the zoomed image, showing a random zoom
in that is different on both the width and height dimensions that also randomly changes the
aspect ratio of the object in the image.
''' | [
"ather.tahir@bentley.com"
] | ather.tahir@bentley.com |
1984b2372353e09b81adc9d1fa2e823147e69195 | 42c3a5c6ace14aea23a9c130a7fd0f83b08ee727 | /data_management/megadb/sequences_schema_check.py | 1f5cae1a1e44367b674566136f2e748719871512 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | CoreyJaskolski/CameraTraps | cc9bf8d8f235cd3fdfae68a27675bf2a3fe66480 | 290dcca3f93cc2e6160d3c6e8ad08ab2ff23ba9d | refs/heads/master | 2020-07-03T16:06:14.447998 | 2019-11-01T20:46:27 | 2019-11-01T20:46:27 | 201,962,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,708 | py | import argparse
import json
import sys
import os
import jsonschema
"""
This script takes one argument, path to the JSON file containing the entries to be ingested
into MegaDB in a JSON array. It then verifies it against the schema in schema.json in this directory.
"""
def check_frame_num(seq):
# schema already checks that the min possible value of frame_num is 1
if 'images' not in seq:
return
# if there are more than one image item, each needs a frame_num
if len(seq['images']) > 1:
frame_num_set = []
for i in seq['images']:
if 'frame_num' not in i:
assert False, 'sequence {} has more than one image but not all images have frame_num'.format(seq['seq_id'])
frame_num_set.append(i['frame_num'])
assert len(set(frame_num_set)) == len(seq['images']), 'sequence {} has frame_num that are not unique'.format(seq['seq_id'])
def check_class_on_seq_or_image(seq):
"""
Checks if the 'class' property is on either the sequence or on each image.
Sequences or images whose 'class' label is unavailable should be denoted by '__label_unavailable'
Args:
seq: a sequence object
Raises:
AssertionError
"""
class_on_seq = False
class_on_all_img = False
if 'class' in seq:
class_on_seq = True
if 'images' in seq:
class_on_all_img = True
for image in seq['images']:
if 'class' not in image:
class_on_all_img = False
assert class_on_seq or class_on_all_img, 'sequence {} does not have the class property on either sequence or image level'.format(seq['seq_id'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('items_json', action='store', type=str,
help='.json file to ingest into MegaDB')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
cur_dir = os.path.dirname(sys.argv[0])
with open(os.path.join(cur_dir, 'sequences_schema.json')) as f:
schema = json.load(f)
with open(args.items_json) as f:
instance = json.load(f)
jsonschema.validate(instance, schema)
print('Verified that the sequence items conform to the schema.')
# checks across all sequence items
seq_ids = set([seq['seq_id'] for seq in instance])
assert len(seq_ids) == len(instance), 'Not all seq_id in this batch are unique.'
# per sequence item checks
for seq in instance:
check_class_on_seq_or_image(seq)
check_frame_num(seq)
print('Verified that the sequence items meet the additional requirements.')
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | CoreyJaskolski.noreply@github.com |
3c44b9336e437dbabe651550049f02b4222540bb | b1e4c744e37ae2cda707c2319ccc5eca4dfc94d0 | /tensorflow/ไฝฟ็จC++ๅฏผๅ
ฅๆจกๅ็ปไน /model_test.py | 575d70c57d9b6e16c5df09ab07513070135c7922 | [] | no_license | 1124418652/deep_learning | 671da50c1651d1d791b32b736de80c88eda0c8f5 | df16bd59cdbe9d3606d7f6863b46a37c2803251f | refs/heads/master | 2020-04-07T17:19:10.538036 | 2019-05-23T14:06:18 | 2019-05-23T14:06:18 | 150,749,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # -*- coding: utf-8 -*-
import os
import cv2
import numpy as np
import tensorflow as tf
def recognize(imgname):
if not os.path.exists(imgname):
raise ValueError("Don't have this file.")
img = cv2.imread(imgname)
if not isinstance(img, np.ndarray):
raise ValueError("Can't open this image.")
img = cv2.resize(img, (64, 64)) / 255
with tf.Graph().as_default():
output_graph_def = tf.GraphDef()
pb_file_path = 'model_trained.pb'
with open(pb_file_path, 'rb') as f:
output_graph_def.ParseFromString(f.read())
# ๅฐ่ฎก็ฎๅพไป output_graph_def ๅฏผๅ
ฅๅฐๅฝๅ็้ป่ฎคๅพไธญ
_ = tf.import_graph_def(output_graph_def, name = '')
with tf.Session() as sess:
tf.global_variables_initializer().run()
input_x = sess.graph.get_tensor_by_name('input:0') # ่ทๅๅผ ้
prediction = sess.graph.get_tensor_by_name('output:0')
pre = sess.run(prediction, feed_dict = {input_x: [img]}) # ๅฐๅพ็ๅๅ
ฅ็ฝ็ป่ฟ่กๆต่ฏ
print(pre)
recognize('model_in_C++/model_in_C++/test.jpg')
| [
"1124418652@qq.com"
] | 1124418652@qq.com |
24ab81ff2c511dd5587eebf58083e235fd9bdec7 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fc/apinninglbl.py | f7b4c92b96d82adacf7230e8ed621d61e9384b9f | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,523 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class APinningLbl(Mo):
meta = ClassMeta("cobra.model.fc.APinningLbl")
meta.isAbstract = True
meta.moClassName = "fcAPinningLbl"
meta.moClassName = "fcAPinningLbl"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract Fibre Channel Uplink Pinning Label"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.fc.PinningLbl")
meta.concreteSubClasses.add("cobra.model.fc.PinningLblDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
655eeceaa52781e8ac09c4a52121849fed89947f | eb5049f543d2a50b8bce5f13d25d9688fccc52d7 | /make_lerp_positions.py | e5bdf9dda742fc3547b5bc39e038e2fbc324eb0a | [] | no_license | migroble/gan | 196a146806a9c5a66b0780cbaa813dccd3b7cc0d | b8480e886b3256a51da4727b04c52926cf42dd96 | refs/heads/master | 2021-09-13T12:00:29.595347 | 2018-04-29T17:20:36 | 2018-04-29T17:20:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | import numpy as np
STEPS = 64
INPUT_DIM = 150
TYPE_DIM = 5
A_p = np.random.uniform(-1, 1, size=INPUT_DIM).astype(np.float32)
B_p = np.random.uniform(-1, 1, size=INPUT_DIM).astype(np.float32)
A_t = np.random.uniform(0, 1, size=TYPE_DIM).astype(np.float32)
B_t = np.random.uniform(0, 1, size=TYPE_DIM).astype(np.float32)
dist_p = np.linalg.norm(A_p - B_p)
dist_t = np.linalg.norm(A_t - B_t)
vector_p = (B_p - A_p) / dist_p
vector_t = (B_t - A_t) / dist_t
step_dist_p = dist_p / STEPS
step_dist_t = dist_t / STEPS
pos = np.zeros((STEPS, INPUT_DIM))
types = np.zeros((STEPS, TYPE_DIM))
for i in range(STEPS):
pos[i] = A_p + vector_p * step_dist_p * i
types[i] = A_t + vector_t * step_dist_t * i
pos_arr = pos.tolist()
types_arr = types.tolist()
f = open("types_lerp.txt", "w+")
f.write("{\"types\": " + str(types_arr) + "}")
f.close()
f = open("pos_lerp.txt", "w+")
f.write("{\"positions\": " + str(pos_arr) + "}")
f.close() | [
"miguel.robledo.casal@alumnos.upm.es"
] | miguel.robledo.casal@alumnos.upm.es |
7052f55ba468cdaa47d8f6f5976593ac13e75505 | 47a973b2c17797221d282d8782d5743163b53d6a | /omni/libs/cmdb/host.py | 2d4f2dada4118d3a972d923a0676068fb424e954 | [
"Apache-2.0"
] | permissive | cathywife/omni | f944785e1ed3b712a199f697c979dfe53a972c0f | 9a681b38ec552d5d2f5fc129bb7f2551c00424d8 | refs/heads/master | 2020-12-03T05:13:13.990415 | 2017-06-02T11:47:48 | 2017-06-02T11:47:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | # -*- coding:utf8 -*-
"""
Created on 16/8/17 ไธๅ1:05
@author: fmc
"""
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function
from . import cmdb_client
from cmdblib.client import RequestFailException
import logging
log = logging.getLogger(__name__)
server_logic_schema = 'server_logic'
server_model_schema = 'dict_conf_model'
server_life_cycle_schema = 'dict_srv_useStatus'
def get_host(name):
"""
่ทๅ้ป่พๆๅกๅจไธปๆบไฟกๆฏ
:param name: ไธปๆบๅ
:return:
"""
try:
entity_obj_list = cmdb_client.search_entities(schema=server_logic_schema, hostname=name)
except RequestFailException as e:
if not e.message.split()[2] == '404':
raise RequestFailException(e.message)
entity_obj_list = None
return entity_obj_list[0]
def mget_all_host():
"""
่ทๅๆๆๆๅกๅจ
:return:
"""
try:
entity_obj_list = cmdb_client.search_entities(schema=server_logic_schema, page=1, size=100000000)
except RequestFailException as e:
if not e.message.split()[2] == '404':
raise RequestFailException(e.message)
entity_obj_list = []
return entity_obj_list
def mget_all_host_model():
"""
่ทๅๆๆไธปๆบๅๅท
:return:
"""
try:
entity_obj_list = cmdb_client.search_entities(schema=server_model_schema, page=1, size=100000000)
except RequestFailException as e:
if not e.message.split()[2] == '404':
raise RequestFailException(e.message)
entity_obj_list = []
return entity_obj_list
def mget_all_host_life_cycle_status():
"""
่ทๅไธปๆบๆๆ็ๅฝๅจๆ็ถๆ
:return:
"""
try:
entity_obj_list = cmdb_client.search_entities(schema=server_life_cycle_schema, page=1, size=100000000)
except RequestFailException as e:
if not e.message.split()[2] == '404':
raise RequestFailException(e.message)
entity_obj_list = []
return entity_obj_list
| [
"xiaofei4915@163.com"
] | xiaofei4915@163.com |
31212698b833a9003fd16b7a5fc99096aa8e5d13 | b39b0625795b0640a6a68151f2012ce139f423b8 | /iaas/test/test_flavor_profile_api.py | 095a47c5a2b57f517a3c35c6945e5b54508299a9 | [] | no_license | darrylcauldwell/casCodegen | 8e82b1f08e8260482996aec3d8be10934a65dd03 | 1f1ff9ab8a33102bcfcb8be276d51992d96bcb61 | refs/heads/master | 2020-07-27T14:42:28.550855 | 2019-09-17T18:30:28 | 2019-09-17T18:30:28 | 209,127,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | # coding: utf-8
"""
VMware Cloud Assembly IaaS API
A multi-cloud IaaS API for Cloud Automation Services # noqa: E501
OpenAPI spec version: 2019-01-15
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from api.flavor_profile_api import FlavorProfileApi # noqa: E501
from swagger_client.rest import ApiException
class TestFlavorProfileApi(unittest.TestCase):
"""FlavorProfileApi unit test stubs"""
def setUp(self):
self.api = api.flavor_profile_api.FlavorProfileApi() # noqa: E501
def tearDown(self):
pass
def test_create_flavor_profile(self):
"""Test case for create_flavor_profile
Create flavor profile # noqa: E501
"""
pass
def test_delete_flavor_profile(self):
"""Test case for delete_flavor_profile
Delete flavor profile # noqa: E501
"""
pass
def test_get_flavor_profile(self):
"""Test case for get_flavor_profile
Get flavor profile # noqa: E501
"""
pass
def test_get_flavor_profiles(self):
"""Test case for get_flavor_profiles
Get flavor profile # noqa: E501
"""
pass
def test_update_flavor_profile(self):
"""Test case for update_flavor_profile
Update flavor profile # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"dcauldwell@dcauldwell-a01.vmware.com"
] | dcauldwell@dcauldwell-a01.vmware.com |
76192db2b7c2eca6eb3abce5f4eae8f3e2ff6e73 | fbae6d84b4a8a38dca677c3874ef54b29083837b | /timewalk/packages/pygments/lexers/capnproto.py | 203523a1adf126b3394768e418ba7a32921a26f7 | [
"MIT",
"BSD-2-Clause"
] | permissive | desmondlzy/timewalk-core | 9de1457694956b8009af65a6e63d9b25f66736eb | e7ec9e8cd23e15ff49b2344d1c9142230b9c7b43 | refs/heads/master | 2021-05-24T13:06:53.201462 | 2020-04-17T04:40:53 | 2020-04-17T04:40:53 | 253,576,060 | 12 | 0 | MIT | 2020-04-17T04:40:54 | 2020-04-06T17:59:02 | Python | UTF-8 | Python | false | false | 2,188 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.capnproto
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Cap'n Proto schema language.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default
from pygments.token import Text, Comment, Keyword, Name, Literal
__all__ = ['CapnProtoLexer']
class CapnProtoLexer(RegexLexer):
"""
For `Cap'n Proto <https://capnproto.org>`_ source.
.. versionadded:: 2.2
"""
name = 'Cap\'n Proto'
filenames = ['*.capnp']
aliases = ['capnp']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|'
r'extends|in|of|on|as|with|from|fixed)\b',
Keyword),
(r'[\w.]+', Name),
(r'[^#@=:$\w]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
default('#pop'),
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
default('#pop'),
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
default('#pop'),
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
default('#pop'),
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
default('#pop'),
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
default('#pop'),
],
}
| [
"lzy7155@gmail.com"
] | lzy7155@gmail.com |
b01d8bfad69f8575908aeed12be474261228dc12 | 1f7e89bf8fa33caf834d62a2f88bbd36f7ca2c17 | /่่ฉฆๆชๆก/week2/ubike_practice-2.py | bd54cba37ebf1f96be50c3ca5ee8d48499444fe0 | [] | no_license | yachiwu/python_lesson3 | 31626e1b26fc50e01a3b6563be56918ef3d23cb0 | f0d63593a4680d2bbffe3b79d50c155dad4eba27 | refs/heads/master | 2020-05-18T12:14:02.900439 | 2019-05-01T10:47:06 | 2019-05-01T10:47:06 | 184,401,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | import csv,os,datetime
import matplotlib.pyplot as py
f = open('ubike.csv','r')
station = {}#่ฉฒ็ซๆๅนพๅฐๅฏ็งๅ็่
ณ่ธ่ป
count = {}
lat = {} #็ทฏๅบฆ
lon = {} #็ถๅบฆ
capacity = {}
for row in csv.DictReader(f):
time = datetime.datetime.strptime(row['time'],"%Y/%m/%d %H:%M")
hour = time.hour
if hour==17 or hour==18: #ๅจๆไธ5~7้ป้
id = int(row['id'])
if id not in station:
lat[id] = float(row["latitude"])
lon[id] = float(row["longitude"])
station[id]= int(row["bike"])
capacity[id]= int(row["lot"])
count[id]=1
else:
station[id]+=int(row["bike"])
capacity[id]+= int(row["lot"])
count[id]+=1
f.close()
id_seq = station.keys()
id_seq = sorted(id_seq)
redlat = []
redlon = []
yellowlat = []
yellowlon = []
greenlat = []
greenlon = []
bluelat = []
bluelon = []
for k in id_seq:
capacity[k] = float(capacity[k]) / count[k] #่ฉฒ็ซ็ธฝๅ
ฑ่
ณ่ธ่ปๆธ
station[k] = (float(station[k]) / count[k]) / capacity[k]
if station[k]<0.2:
redlat.append(lat[k])
redlon.append(lon[k])
elif 0.2<=station[k]<0.3:
yellowlat.append(lat[k])
yellowlon.append(lon[k])
elif 0.3<=station[k]<0.4:
greenlat.append(lat[k])
greenlon.append(lon[k])
else:
bluelat.append(lat[k])
bluelon.append(lon[k])
py.xlabel('latitude')
py.ylabel('longitude')
py.title('bike distribution')
py.plot(redlat,redlon,'ro',label='<20%')
py.plot(yellowlat,yellowlon,'yo',label='20~30%')
py.plot(greenlat,greenlon,'go',label='30~40%')
py.plot(bluelat,bluelon,'bo',label='>40%')
py.axis([25.01,25.05,121.52,121.56])
py.legend(loc = 'lower right')
py.show() | [
"n0975116268@gmail.com"
] | n0975116268@gmail.com |
5ddbce86bfe417238560aa950da65547d45e964c | 99f384a7e8981f39b84dfb6b153827d7676074f1 | /serve.py | eb67e9b6f5eecf856e0630342017b9d2dbb0862d | [] | no_license | vivym/mmkg-demo | f5fc2daa15d890d2bd3f52c19922150d3fe89aa8 | 8bb5e6888ca4b3339fdd63bb848529d53b9844be | refs/heads/main | 2023-08-16T05:21:49.348743 | 2021-10-15T04:29:05 | 2021-10-15T04:29:05 | 416,675,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | import requests
from flask import Flask, request
from flask_cors import CORS
app = Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
@app.route("/", methods=["GET"])
def index_page():
return app.send_static_file("index.html")
@app.route("/structuring", methods=["POST"])
def structuring():
rsp = requests.post("http://101.200.120.155:8705/structuring", data={"text": request.form["text"]})
return rsp.text
if __name__ == "__main__":
app.run(host="localhost", port=28810)
| [
"ymviv@qq.com"
] | ymviv@qq.com |
afeaf8646f38274d9f263158e4701dcf223226c8 | 1f24c959106eede5c810c93e66cd1d7c74cc145f | /algorithms/arithmetic/lcm.py | 9dfa71126b583570ed2b8ae2696c057f80e842b0 | [
"MIT"
] | permissive | mchao409/python-algorithms | deae48aa12d5022e28fbb61d887d9fe0c1d8097b | acadeaf8e584737ec308442d03812179a2315ac5 | refs/heads/master | 2021-04-27T11:40:25.402064 | 2018-02-23T06:04:40 | 2018-02-23T06:04:40 | 122,567,163 | 0 | 0 | MIT | 2018-02-23T03:11:58 | 2018-02-23T03:11:57 | null | UTF-8 | Python | false | false | 1,139 | py | """
The least common multiple, lowest common multiple, or smallest common multiple of two integers a and b,
usually denoted by LCM(a, b), is the smallest positive integer that is divisible by both a and b. [Wikipedia]
"""
from functools import reduce
from algorithms.arithmetic.gcd import gcd
def _lcm(integer_a: int, integer_b: int) -> int:
"""
Private function for calculating LCM [least common multiple] of 2 integers
Args:
integer_a: first integer
integer_b: second integer
Returns:
Least common multiple of 2 positive integers.
"""
# Move here to have extra check that we have in GCD
_gcd = gcd(integer_a, integer_b)
return int(abs(integer_a * integer_b) / _gcd)
def lcm(*integer_nums: int) -> int:
"""
Private function for calculating LCM [least common multiple] of N integers
Args:
*integer_nums: integer arguments
Returns:
Least common multiple of N positive integers.
Examples:
>>> lcm(16, 20)
80
>>> lcm(8, 9, 21)
504
"""
return int(reduce((lambda i, j: _lcm(i, j)), integer_nums))
| [
"arseny.antonov@gmail.com"
] | arseny.antonov@gmail.com |
0b0317bb35cdd5fb0d298371e67cf8604ce45cb0 | 22da469a6083d3c1d9979e26186e1c14a39c2b6a | /gan_training/metrics/inception.py | a45b4e519fa992992b4d33b6978bb0495d0704e6 | [
"MIT"
] | permissive | ErikValle/controllable_image_synthesis | 5b5971c267bf3505a13b1fcc4bb3ba9a87bc3a66 | 296fd5115ec385f9fcb94529afff2c138abae3c8 | refs/heads/master | 2022-11-22T09:58:39.698623 | 2020-07-23T15:48:23 | 2020-07-23T15:48:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,649 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
# Fully connected
self.fc = inception.fc
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, x):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
if self.resize_input:
x = F.interpolate(
x,
size=(299, 299),
mode='bilinear',
align_corners=False
)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
net = x
for idx, block in enumerate(self.blocks):
net = block(net)
if idx in self.output_blocks:
outp.append(net)
# N x 2048 x 1 x 1
net = F.dropout(net, training=self.training)
# N x 2048 x 1 x 1
net = torch.flatten(net, 1)
# N x 2048
logits = self.fc(net)
return logits[:, :1000], outp
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = models.inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception
class FIDInceptionA(models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
| [
"yiyi.liao@tue.mpg.de"
] | yiyi.liao@tue.mpg.de |
4e8fe94dd07a1e37a8e67ef1573b73d710893ce4 | 47b1f12ce4c4de419d2e080eb881fe2f30808984 | /pages/locators.py | 7c606edf788ad3aec5d4d5e0d83bde3d5f9f6ad9 | [] | no_license | twymyn/stepik-selenium-project | e8fca3c5cab8c7f0305b9b975a6e0ad1a274126c | 4f8595a705b3aea5f89d1532b5eb7c0040621ba7 | refs/heads/master | 2021-08-08T14:39:48.544599 | 2021-04-20T19:35:19 | 2021-04-20T19:35:19 | 252,729,207 | 0 | 0 | null | 2021-06-02T01:21:30 | 2020-04-03T12:40:50 | Python | UTF-8 | Python | false | false | 1,500 | py | from selenium.webdriver.common.by import By
class BasePageLocators():
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
VIEW_BASKET_BUTTON = (By.CSS_SELECTOR, ".basket-mini .btn-group > a.btn-default")
USER_ICON = (By.CSS_SELECTOR, ".icon-user")
class BasketPageLocators():
BASKET_ITEMS = (By.CSS_SELECTOR, ".basket_items")
EMPTY_BASKET_MESSAGE = (By.CSS_SELECTOR, ".content p")
class MainPageLocators():
pass
class LoginPageLocators():
LOGIN_FORM = (By.CSS_SELECTOR, "#login_form")
LOGIN_USERNAME = (By.CSS_SELECTOR, "#id_login-username")
LOGIN_PASSWORD = (By.CSS_SELECTOR, "#id_login-password")
LOGIN_BUTTON = (By.CSS_SELECTOR, ".btn-lg[name='login_submit']")
REGISTER_FORM = (By.CSS_SELECTOR, "#register_form")
REGISTER_EMAIL = (By.CSS_SELECTOR, "#id_registration-email")
REGISTER_PASSWORD = (By.CSS_SELECTOR, "#id_registration-password1")
REGISTER_PASSWORD_CONFIRM = (By.CSS_SELECTOR, "#id_registration-password2")
REGISTER_BUTTON = (By.CSS_SELECTOR, ".btn-lg[name='registration_submit']")
class ProductPageLocators():
ADD_TO_BASKET_BUTTON = (By.CSS_SELECTOR, "button.btn-add-to-basket")
PRODUCT_NAME = (By.CSS_SELECTOR, ".product_main > h1")
PRODUCT_PRICE = (By.CSS_SELECTOR, ".price_color")
SUCCESS_MESSAGE = (By.CSS_SELECTOR, ".alert-success")
SUCCESS_PRODUCT_IN_BASKET = (By.CSS_SELECTOR, ".alert-success .alertinner strong")
SUCCESS_BASKET_TOTAL = (By.CSS_SELECTOR, ".alert-info .alertinner strong")
| [
"lc.tinatran@gmail.com"
] | lc.tinatran@gmail.com |
56a9b5a8d7698086474480fadc0fb87b5ffcfba8 | 9e6c133ff440426d16c21699e2176fd185d12d6d | /data/Xe133/result_xe133_tripleRun4.py | 7d6d9f4b193d3d00c8331205085baa3200bd824e | [] | no_license | MarielleAkser/Master_thesis | 1e8c0596d2b7ba7f9667e9ec94eb208bf1f88ffd | ee415c2af23b60094d573d53641624656a547953 | refs/heads/main | 2023-06-01T09:39:59.985406 | 2021-06-17T14:46:37 | 2021-07-01T08:56:30 | 339,763,134 | 1 | 3 | null | 2021-03-10T08:13:59 | 2021-02-17T15:13:07 | C++ | UTF-8 | Python | false | false | 10,333 | py | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from numpy.lib.function_base import average
import pandas as pd
import seaborn as sns
# Opens the corresponding files, Run5 has 10 000 events
eDep_file_t0 = np.genfromtxt('xe133_tripleRun4_t0.csv', delimiter=',')
eDep_file_t1 = np.genfromtxt('xe133_tripleRun4_t1.csv', delimiter=',')
# From files to array:
eDep = []
for rad in eDep_file_t0:
a_row = []
for tal in rad:
a_row.append(tal)
eDep.append(a_row)
for rad in eDep_file_t1:
a_row = []
for tal in rad:
a_row.append(tal)
eDep.append(a_row)
# To calculate when the number of events converges
nr_events = 0
nr_coinc = 0
nr_array = []
# Create a triple coincidence arrays
coincidence = []
# Creating a coincidence array for beta- and CE-energies for when gamma = 31.6 keV
coin_CEbeta = []
# Creating a beta_3 array for the beta energy when all 3 particle is deposit energy > 0
beta_3 = []
for rad in eDep:
nr_events += 1
"""gamma:[1] = 31.6 +- 5 keV --> 0.0266 & 0.0366 MeV,
conversion electron:[7] = 45 +- 5 keV --> 0.04 & 0.05,
beta:[11] > 0"""
a_row = []
t = []
if ( (rad[1] > 0) and (rad[7] > 0) and (rad[11] > 0) ): # All particle gets detected
# if ( (0.0266 <= rad[1] <= 0.0366) and (0.04 <= rad[7] <= 0.5) and (rad[11] > 0) ):
a_row.append(rad[1])
a_row.append(rad[7])
a_row.append(rad[11])
coincidence.append(a_row)
nr_coinc += 1
a = []
if (rad[1] == 0.0316): # getting the values when beta en CE are in coincidence
a.append(rad[7])
a.append(rad[11])
coin_CEbeta.append(a)
if ( (rad[1] > 0) and (rad[7] > 0) and (rad[11] > 0) ): # All particle gets detected
beta_3.append(rad[11])
t.append(nr_events)
t.append(nr_coinc)
nr_array.append(t)
c = np.array(coincidence)
# Getting all the beta, gamma and CE energies in the given intervall
eDep_beta = []
eDep_gamma = []
eDep_CE = []
eDep_CEbeta = []
for rad in eDep:
if (0.0266 <= rad[1] <= 0.0366):
eDep_gamma.append(rad[1])
if (0.04 <= rad[7] <= 0.5):
eDep_CE.append(rad[7])
if (rad[11] > 0):
eDep_beta.append(rad[11])
a = []
if (rad[1] == 0.0316): # getting all the values for beta and CE when gamma = 31.6 keV
a.append(rad[7])
a.append(rad[11])
eDep_CEbeta.append(a)
# Get the correct values for the beta distribution:
with open('XE133_beta_spectrum.txt') as f:
values = []
for line in f:
rad = line.strip("\n").split(" ")
en_rad =[]
for tal in rad:
en_rad.append( tal )
values.append(en_rad)
values.pop() # Remove the last empty line
# Create a array with float insted of strings
h = []
for row in values:
r = []
for nr in row:
r.append( float(nr) )
h.append(r)
x = [] # energy
y = [] # probability, not normalized
for i in range(len(h)):
x.append(h[i][0] / 1000 ) # To get the energy in MeV
y.append(h[i][1])
# Need to normalize y:
y_norm = (np.array(y) / sum(y) ) * len(eDep) # len(eDep) = nr of events
print("Total number if particles generated: ", len(eDep) )
print("......................... GAMMA ...............................")
print("Total nr of gamma particles detected: ", len(eDep_gamma),
"\n which gives the %: ", len(eDep_gamma) / len(eDep))
print("Nr of gamma particles detected in coincidence: ", len(c[:,0]),
"\n which gives the %: ", len(c[:,0]) / len(eDep))
print("% that are detected that are in coincidence ", len(c[:,0]) / len(eDep_gamma) )
print("...............................................................")
print("-------------------------- BETA --------------------------")
print("Total nr of beta particles detected: ", len(eDep_beta),
"\n which gives the %: ", len(eDep_beta) / len(eDep))
print("Nr of beta particles detected in coincidence: ", len(c[:,2]),
"\n which gives the %: ", len(c[:,2]) / len(eDep))
print("% that are detected that are in coincidence: ", len(c[:,2]) / len(eDep_beta) )
print("----------------------------------------------------------")
print("************************** CE ****************************")
print("Total nr of CE particles detected: ", len(eDep_CE),
"\n which gives the %: ", len(eDep_CE) / len(eDep))
print("Nr of CE particles detected in coincidence: ", len(c[:,1]),
"\n which gives the %: ", len(c[:,1]) / len(eDep))
print("% that are detected that are in coincidence: ", len(c[:,1]) / len(eDep_CE) )
print("**********************************************************")
# print(coincidence) # [[1,2,3],[4,5,6], ] skriver ut alla rader
# print(c) #[[1 2 3 ] Skriver bara ut tre i bรถrjan och dom tre sista
#[4 5 6]]
coin_procent = (len(c) / len(eDep)) * 100
print("% of coincidence for tripleRun4 xe133:", coin_procent )
####################################################
# Number of events that is needed util it converges:
#---------------------------------------------------
p = []
for i in range(len(nr_array)):
p.append( (nr_array[i][1] / nr_array[i][0]) * 100 ) # 'nr of coinc' / 'nr of events'
print("with the standard deviation: ", np.std(p) )
std = [1]
everyNth = 1000
for i in range(len(p)):
if ( i > 0 and i%everyNth == 0): # To only calc the std of 'everyNth' value
std.append( np.std( p[0:i] ) )
x = np.arange(1,len(nr_array)+1, 1)
xPlot = x[0::everyNth]
pPlot = p[0::everyNth]
plt.errorbar(xPlot, pPlot, yerr=std )
plt.xlabel("Number of events")
plt.ylabel("% of three detected particles")
plt.title(" Events needed to get the detection \n rate of three particles to converge")
plt.savefig("plot_convergesAll_tripleRun4_xe133.png")
plt.show()
# # Zoomed in:
# plt.errorbar(xPlot, pPlot, yerr=std )
# plt.xlabel("Number of events")
# plt.ylabel("% of coincidence")
# plt.title("Events needed to get coincidence rate to converges")
# plt.xlim(len(eDep)-100001, len(eDep))
# plt.ylim(coin_procent-3*np.std(p), coin_procent+3*np.std(p))
# plt.savefig("plot_convergesZoom_tripleRun4_xe133.png")
# plt.show()
######################################################
######################################################
# 2D coincidence histogram over beta and gamma energy:
# ----------------------------------------------------
# plt.hist2d(c[:,2], c[:,0], bins=50, density=False, cmap="Greys")
# plt.ylabel(r'$E_\gamma$ [MeV]')
# plt.xlabel(r'$E_\beta$ [MeV]')
# plt.title("Coincidence histogram over \n the gamma and beta energies")
# plt.ylim(0, 0.1)
# plt.xlim(0, 0.346)
# plt.colorbar()
# plt.savefig("plot_beta_gamma_coin_tripleRun.png")
# plt.show()
######################################################
######################################################
# 2D coincidence histogram over conversion and gamma energy:
# ----------------------------------------------------
# plt.hist2d(c[:,1], c[:,0], bins=50, density=False, cmap="Greys")
# plt.ylabel(r'$E_\gamma$ [MeV]')
# plt.xlabel(r'$E_{CE}$ [MeV]')
# plt.title("Coincidence histogram over \n the gamma and CE energies")
# # plt.ylim(0, 0.1)
# plt.xlim(0, 0.06)
# plt.colorbar()
# plt.show()
# ######################################################
######################################################
# 2D coincidence histogram over beta and CE energy:
# ----------------------------------------------------
# plt.hist2d(c[:,2], c[:,1], bins=50, density=False, cmap="Greys")
# plt.xlabel(r'$E_\beta$ [MeV]')
# plt.ylabel(r'$E_{CE}$ [MeV]')
# plt.title("Coincidence histogram over \n the beta and CE energies")
# plt.ylim(0, 0.05)
# plt.xlim(0, 0.346)
# plt.colorbar()
# plt.show()
######################################################
#######################################################################
# 1D histogram plot over Beta energy together with the 'correct' values
# #--------------------------------------------------------------------
# Plot
# plt.plot(x,y_norm, label="True", color="black")
# plt.hist(eDep_beta, bins=130, label="All beta from simulation", color="#4575b4")
# plt.hist(beta_3, bins=130, label="Beta when three particles \n\
# are detected", color="#d73027")
# plt.hist(c[:,2], bins=130, label="From simulation in \n\
# triple coincidence", color="#ffffbf")
# plt.xlabel("Energy [MeV]")
# plt.ylabel("Counts")
# plt.title("Energy distribution for the beta particle of Xe-133")
# plt.legend()
# plt.xlim(0,0.346)
# plt.savefig("plot_BetaEnergy_tripleRun4_xe133.png")
# plt.show()
#######################################################################
# ####################################
# 1D histogram plot over gamma
# #-----------------------------------
# print( min(c[:,0]))
# plt.hist(eDep_gamma, bins=100, label="All detected gamma")
# plt.hist(c[:,0], bins=100, label="gamma in coincidence")
# plt.legend()
# plt.title("Histogram over the gamma")
# plt.xlabel("Energy [MeV]")
# plt.ylabel("Counts")
# plt.xlim(0)
# plt.ylim(0)
# plt.show()
##############
# 3D:
# # -------------
# data = pd.DataFrame(coin_CEbeta, columns=['CE energy [MeV]','Beta energy [MeV]'])
# print(data)
# for col in data:
# plt.hist(data[col], density=True, alpha=0.5)
# with sns.axes_style('white'):
# sns.jointplot("CE energy [MeV]", "Beta energy [MeV]", data, kind='kde', cbar=True)
# plt.savefig("plot_CEbeta_histo_xe133.png")
# plt.show()
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# c_betaCE = np.array(coin_betaCE)
# x = c_betaCE[:,0] #CE
# y = c_betaCE[:,1] #beta
# plt.hist2d(x, y, bins=100)
# plt.colorbar()
# plt.xlabel("CE")
# plt.ylabel("beta")
# plt.show()
# hist , _ ,_ = np.histogram2d(x, y, bins=100)
# fig = plt.figure()
# ax = fig.add_subplot(projection='3d')
# ax.bar3d(x,y,z)
# z =
# z = c[:,2]
# k = np.random.standard_normal(len(c[:,0]))
# img = ax.scatter(x, y, cmap="Greys")
# fig.colorbar(img)
# ax.set_xlabel('gamma')
# ax.set_ylabel('CE')
# ax.set_zlabel('Beta')
# plt.show() | [
"marielle.akser@gmail.com"
] | marielle.akser@gmail.com |
31504abdd5dddc46c6321a982a457d1e55bcb295 | 758b6ecb4783b26d172c41bbec961e3691c009d5 | /blockchain_poc/blockchain.py | 5208d30ebb86d14d976ad8bf6ba30a817a78c0bc | [
"MIT"
] | permissive | jmcph4/blockchain-poc | 605d02295349ba9034ab6137fa351b68494d4381 | 252b4ce4cd8b23a8b196e154a1091d210b95eb2d | refs/heads/master | 2020-06-22T13:05:38.563328 | 2019-07-23T23:42:55 | 2019-07-23T23:42:55 | 197,719,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,234 | py | from copy import deepcopy
import time
import datetime
import json
from .block import Block
BLOCK_GENERATION_INTERVAL = 10 # 10 seconds
DIFFICULTY_ADJUSTMENT_INTERVAL = 10 # 10 blocks
DEFAULT_INITIAL_DIFFICULTY = 1
class Blockchain(object):
def __init__(self):
genesis_block = self.__generate_genesis_block()
self.__blocks = [genesis_block]
@property
def blocks(self):
return deepcopy(self.__blocks)
def get_latest_block(self):
return deepcopy(self.__blocks[-1])
def add_data(self, data):
prev_block = self.get_latest_block()
new_index = prev_block.index + 1
new_timestamp = int(time.time())
difficulty = self.get_difficulty()
# mine new block
nonce = 0
while True:
new_block = Block(new_index,
prev_block.hash,
new_timestamp,
data,
difficulty,
nonce)
print(datetime.datetime.now(), nonce, new_block.hash.hex(), flush=True) # DEBUG
if self.__validate_hash(new_block.hash, difficulty):
self.__blocks.append(new_block)
return new_block
nonce += 1
def add_block(self, block):
candidate_blocks = self.__blocks + [block]
if Block.is_valid(block) and \
Blockchain.__validate_raw_blocks(candidate_blocks):
self.__blocks.append(block)
def get_difficulty(self):
latest_block = self.get_latest_block()
if latest_block.index % DIFFICULTY_ADJUSTMENT_INTERVAL == 0 and \
latest_block.index > 0:
return self.__get_adjusted_difficulty()
else:
return latest_block.difficulty
def load_from_json(self, json_string):
data = json.loads(json_string)
for block_json in data:
index = block_json["index"]
hash = bytes.fromhex(block_json["hash"])
prev = bytes.fromhex(block_json["prev"])
timestamp = block_json["timestamp"]
data = block_json["data"]
difficulty = block_json["difficulty"]
nonce = block_json["nonce"]
block = Block(
index,
prev,
timestamp,
data,
difficulty,
nonce)
self.add_block(block)
def __get_adjusted_difficulty(self):
prev_adjustment_block = self.__blocks[len(self) - \
DIFFICULTY_ADJUSTMENT_INTERVAL]
expected_time = BLOCK_GENERATION_INTERVAL * \
DIFFICULTY_ADJUSTMENT_INTERVAL
actual_time = latest_block.timestamp - prev_adjustment_block.timestamp
if time_taken < expected_time / 2:
return prev_adjustment_block.difficulty + 1
elif time_taken > expected_time * 2:
return prev_adjustment_block.difficulty - 1
else:
return prev_adjustment_block.difficulty
def as_json(self):
return json.dumps([block.as_dict() for block in self.__blocks])
def __len__(self):
return len(self.__blocks)
@staticmethod
def __generate_genesis_block():
index = 0
prev = None
timestamp = int(time.time())
data = b""
difficulty = DEFAULT_INITIAL_DIFFICULTY
nonce = 0
genesis_block = Block(index, prev, timestamp, data, difficulty, nonce)
return deepcopy(genesis_block)
@staticmethod
def __validate_hash(hash, difficulty):
return hash[:difficulty] == b"\0" * difficulty
@staticmethod
def is_valid(blockchain):
return self.__validate_raw_blocks(blockchain.blocks)
@staticmethod
def __validate_raw_blocks(blocks):
for i in range(1, len(blocks)):
curr_block = blocks[i]
prev_block = blocks[i-1]
if not Block.is_valid(curr_block):
return False
if not curr_block.prev != prev_block.hash:
return False
if curr_block.index != prev_block.index + 1:
return False
return True
| [
"jmcph4@users.noreply.github.com"
] | jmcph4@users.noreply.github.com |
1ba90dd656c8980eff31b4972d50accaaff84971 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Quantization/trend_MovingAverage/cycle_30/ar_/test_artificial_1024_Quantization_MovingAverage_30__20.py | 1d8cd7fcd6989efe67729b85e14bd6887518a581 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 273 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
6c5c0347d432b2582ccdf305067a2b196ad93947 | 41bc01dd5de49ae26ed6fd640a0e75dd197fb34f | /customapp/models.py | 4d5840911f86f8e8519c73bce4ca52f802bf2c2c | [] | no_license | Shilpa106/customumodel.github.io | a7d65b4cabb6a49f281e2aa95c996b67e7484ebd | cd338e331d16818fb3fc417247deb4727fa1b513 | refs/heads/master | 2023-07-07T23:02:48.461391 | 2021-04-24T11:24:26 | 2021-04-24T11:24:26 | 361,143,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from datetime import date
class User(AbstractUser):
username = models.CharField(max_length = 50, blank = True, null = True, unique = True)
email = models.EmailField(_('email address'), unique = True)
native_name = models.CharField(max_length = 5)
phone_no = models.CharField(max_length = 10)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
def __str__(self):
return "{}".format(self.email)
| [
"shilpayadav108@gmail.com"
] | shilpayadav108@gmail.com |
4856ff37929574d65c945f9fbbda013b668bf9d4 | 17a7c2e28afaf9907de4ffd083358b67d6a6a589 | /blog/migrations/0001_initial.py | bddde1bfd9851a0b80fd753425ca41bc224951ca | [] | no_license | lmdragun/my-first-blog | a07d15ed115852582d4cc2463032f1977c7b550c | 41294980646992a03cb747053e876301dbbe6fd8 | refs/heads/master | 2021-01-19T07:43:37.050287 | 2017-08-17T21:13:57 | 2017-08-17T21:13:57 | 100,645,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-17 20:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"lmdragun@gmail.com"
] | lmdragun@gmail.com |
e6343d4aa9ce5e620e7342abeafd3c802ca57978 | 2f5ffdea706074fbff9228078e16f9475fe58f8e | /EXAMEN/examen.py | a43476a12b68cea3f5f870a8e69fbb9ff985744b | [] | no_license | brayner573/examen | 40108359556e226d6445a4afee35882ef713903a | 1c6cbf439cb19ae2da58eb67906ed3cffbd56584 | refs/heads/main | 2023-04-05T19:22:25.287828 | 2021-05-13T00:55:43 | 2021-05-13T00:55:43 | 366,888,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | def notafinaldeprogramacionBAMC():
#Definir variables y otros
print("nota final del curso de Fundamentos de programaciรณn")
#Datos de entrada
n1BAMC=int(input("Nota primera unidad:"))
n2BAMC=int(input("Nota segunda unidad:"))
n3BAMC=int(input("Nota tercera unidad:"))
nf4BAMC=int(input("Nota trabajo final:"))
#Proceso
promediofinalBAMC=(n1BAMC*0.2+n2BAMC*0.15+n3BAMC*0.15+nf4BAMC*0.5)
#Datos de salida
print("La nota final es:", promediofinalBAMC)
def premiodocenteBAMC():
#definir variables
premioObtenidoBAMC=930
#datos de entrada
salarioMinimo=float(input("Ingrese el salario minimo:"))
puntuacionObtenida=float(input("Ingrese la puntuacion que ha obtenido:"))
#Proceso
if puntuacionObtenida<=100 and puntuacionObtenida>=50:
premioObtenidoBAMC=salarioMinimo+93
elif puntuacionObtenida>=101 and puntuacionObtenida<=150:
premioObtenidoBAMC=salarioMinimo+372
elif puntuacionObtenida>150:
premioObtenidoBAMC=salarioMinimo+651
#datos de salida
print("El docente obtendra un sueldo total de:", premioObtenidoBAMC)
notafinaldeprogramacionBAMC()
#premiodocenteBAMC() | [
""
] | |
22757da163182112c235044cdbd8c35ad87fc018 | ebd491367ca7c12f67b3da31a24ad93c2db552c8 | /main/users/apps.py | 78fc0ee57663f8791315a6ab644f0dc34f7f1f57 | [
"MIT"
] | permissive | shibaPuppy/django-settings | a8159d881a3f9710333ab554b7f9dd0ee798a547 | 82bd08dd4572b3638d3f1ac9095b86b82bbbffab | refs/heads/master | 2023-01-14T00:51:50.400132 | 2019-01-11T06:00:05 | 2019-01-11T06:00:05 | 164,996,737 | 0 | 0 | MIT | 2022-12-26T20:38:51 | 2019-01-10T05:36:01 | Python | UTF-8 | Python | false | false | 270 | py | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'main.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| [
"dg.kwon@navercorp.com"
] | dg.kwon@navercorp.com |
821b9aab0cbf582c5cbda4874a5061281ad858f2 | b9646a2d58bad4ad0d0e713c277deb530933ffa1 | /Damas/Damas/Draw/__init__.py | e8aa8048b1a5a921dd72fed9c1fd20c907244914 | [] | no_license | CaioBadner/Damas | 244a33512668e9f414b0b651443ebcea5e21f6f9 | ab78bf4d5a164d425646d57ceec3f4d487ebfead | refs/heads/main | 2023-02-18T03:57:53.686316 | 2021-01-22T11:58:04 | 2021-01-22T11:58:04 | 328,147,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | #__init__.py
from .Constants import Constants
from .Objects import *
from .Screen import Screen
| [
"noreply@github.com"
] | CaioBadner.noreply@github.com |
e0d15eea5b6d89432ba750f5c3a61bdb7bd0ce84 | 730103ddecd23142238defe2a2b1ab3c582cdc45 | /onnx2tf/ops/ReverseSequence.py | c2c8dc8337c257539be89abccb5dab2eb3372482 | [
"Apache-2.0",
"MIT"
] | permissive | PINTO0309/onnx2tf | dcfb0fd8a4810ef1262aa565ba42b5124012bdb2 | b0e7d106cc69c0ea0fd464c4dd9064a5b0d6668b | refs/heads/main | 2023-08-30T23:28:56.386741 | 2023-08-29T01:48:40 | 2023-08-29T01:48:40 | 541,831,874 | 345 | 45 | MIT | 2023-09-14T16:53:12 | 2022-09-27T00:06:32 | Python | UTF-8 | Python | false | false | 3,308 | py | import random
random.seed(0)
import numpy as np
np.random.seed(0)
import tensorflow as tf
import onnx_graphsurgeon as gs
from onnx2tf.utils.common_functions import (
get_constant_or_variable,
print_node_info,
inverted_operation_enable_disable,
make_tf_node_info,
get_replacement_parameter,
pre_process_transpose,
post_process_transpose,
)
@print_node_info
@inverted_operation_enable_disable
@get_replacement_parameter
def make_node(
*,
graph_node: gs.Node,
tf_layers_dict: dict,
**kwargs: dict,
):
"""ReverseSequence
Parameters
----------
graph_node: gs.Node
graph_surgeon Node
tf_layers_dict: dict
optype, shape, dtype, tensorflow graph
"""
before_op_output_shape_trans_1 = \
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
before_op_output_shape_trans = \
before_op_output_shape_trans_1
graph_node_input_1 = get_constant_or_variable(
graph_node.inputs[0],
before_op_output_shape_trans,
)
input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
graph_node_input_2 = get_constant_or_variable(
graph_node.inputs[1],
before_op_output_shape_trans,
)
sequence_lens = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
graph_node_output: gs.Variable = graph_node.outputs[0]
shape = graph_node_output.shape
dtype = graph_node_output.dtype
batch_axis = graph_node.attrs.get('batch_axis', 1)
time_axis = graph_node.attrs.get('time_axis', 0)
# Preserving Graph Structure (Dict)
tf_layers_dict[graph_node_output.name] = {
'optype': graph_node.op,
'shape': shape,
'dtype': dtype,
}
# Pre-process transpose
input_tensor = pre_process_transpose(
value_before_transpose=input_tensor,
param_target='inputs',
param_name=graph_node.inputs[0].name,
**kwargs,
)
# Generation of TF OP
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.reverse_sequence(
input=input_tensor,
seq_lengths=sequence_lens,
seq_axis=time_axis,
batch_axis=batch_axis,
name=graph_node.name,
)
# Post-process transpose
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
param_target='outputs',
param_name=graph_node.outputs[0].name,
**kwargs,
)
# Generation of Debug Info
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
make_tf_node_info(
node_info={
'tf_op_type': tf.reverse_sequence,
'tf_inputs': {
'input': input_tensor,
'seq_lengths': sequence_lens,
'seq_axis': time_axis,
'batch_axis': batch_axis,
},
'tf_outputs': {
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
},
}
)
| [
"rmsdh122@yahoo.co.jp"
] | rmsdh122@yahoo.co.jp |
574c54f096735802874cf3f2da7a5862b6a1a07c | 663b0fe7cc6e81586cbdf0baa7daf384f9e0bc9c | /preprocess/components/main.py | 63d5ece5323abcd63714afb43359480a993b85aa | [] | no_license | hvu53/ml | 396a71e41b056a08dcd3fdad1208887a3ef7cf7e | b011404f469c369d9c30c8f589d1e3cfd09802bb | refs/heads/master | 2021-01-10T10:10:03.802914 | 2015-12-10T06:00:51 | 2015-12-10T06:00:51 | 47,584,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,685 | py | import efficiency, sales, products, survey
import convert
import os, sys
# Keywords for splits stored in meats, seafoods, vegetables
# This accuracy depends on the keywords being split on
# We'll split on meat first, then seafood, then vegetarian
Monthly_dir = '../data/Monthly_csv_converted/'
Weekly_dir = '../data/Weekly_Sales_Records/'
SKU_dir = '../data/SKU_Master/'
Monthly_out_dir = '../output/Monthly_Sold_Waste/'
Weekly_out_dir = '../output/Weekly_Sales_Records/'
SKU_out_dir = '../output/SKU_Master/'
raw_dir = '../data/raw_xlsx/'
convert_outdir = '../data/Monthly_csv_converted/'
survey_dir = '../data/survey/'
survey_out_dir = '../output/survey/'
def process_file(process_fun, file_name, out_file, meats=False, seafoods=False, vegetables=False):
with open(file_name) as f:
inputs = f.readlines()
if meats:
outputs = map(lambda s : process_fun(s.strip(), meats, seafoods, vegetables), inputs)
else:
outputs = map(process_fun, inputs)
outputs = filter(lambda s : s, outputs)
out = open(out_file, 'w+')
for line in outputs:
out.write("%s\n" % line)
# A simple test case for each file type
def test():
with open('keywords/meats') as f:
meats = map(lambda s : s.strip(), f.readlines())
with open('keywords/seafoods') as f:
seafoods = map(lambda s : s.strip(), f.readlines())
with open('keywords/vegetables') as f:
vegetables = map(lambda s : s.strip(), f.readlines())
try:
efficiency.process_line('1910,,,,,3,0,0,0,,3,0,100.00%,0.00%')
sales.process_line('Chinese Chicken Salad Wrap,238908,1,null,$5.75 ,$5.75 ,$0.00 ,Y,9/9/2015 19:42,CMU-1')
products.process_line('Sandwich,1823,B&B Grilled Steak Sandwich,$7.25,"Herb Marinated Grilled Skirt Steak, Bacon, Butter Lettuce, Blue Cheese Spread, Whole Grain Mustard, Sliced Sourdough Bread",Dairy/ Gluten,Yes', meats, seafoods, vegetables)
products.process_line('Sandwich,1804,Classic Tuna Sandwich,$7.25,"Classic Tuna Salad, Swiss Cheese, Lettuce, Tomato, Sliced Red Onion, Croissant",Dairy/ Gluten,Yes', meats, seafoods, vegetables)
except:
print "Test failed!"
return False
return True
def monthly_file(fname):
if not os.path.exists(Monthly_out_dir):
os.makedirs(Monthly_out_dir)
print "Created Monthly output directory..."
fullname = Monthly_dir + fname
outname = Monthly_out_dir + fname
process_file(efficiency.process_line, fullname, outname)
return outname
def weekly_file(fname):
if not os.path.exists(Weekly_out_dir):
os.makedirs(Weekly_out_dir)
print "Created Weekly output directory..."
fullname = Weekly_dir + fname
outname = Weekly_out_dir + fname
process_file(sales.process_line, fullname, outname)
return outname
def survey_file(fname):
if not os.path.exists(survey_out_dir):
os.makedirs(survey_out_dir)
print "Created Survey output directory..."
fullname = survey_dir + fname
outname = survey_out_dir + fname
process_file(survey.process_line, fullname, outname)
return outname
def sku_file(fname):
if not os.path.exists(SKU_out_dir):
os.makedirs(SKU_out_dir)
print "Created SKU output directory..."
fullname = SKU_dir + fname
outname = SKU_out_dir + fname
with open('keywords/meats') as f:
meats = map(lambda s : s.strip(), f.readlines())
with open('keywords/seafoods') as f:
seafoods = map(lambda s : s.strip(), f.readlines())
with open('keywords/vegetables') as f:
vegetables = map(lambda s : s.strip(), f.readlines())
process_file(products.process_line, fullname, outname, meats, seafoods, vegetables)
return outname
def convert_file(fname):
if not os.path.exists(convert_outdir):
print "Making output directory"
os.makedirs(convert_outdir)
converted = convert.convert_xls_to_csv(fname)
return map(lambda s: s.split('/')[-1], converted)
def main():
if not test():
print "Preliminary tests did not pass!"
print "Check that the keyword files and components are all present"
return
args = sys.argv
if len(args) <= 1 or 'all' in args:
print "Converting and Processing all directories"
args = ['m', 'w', 'sku', 'c', 's']
if 'h' in args or 'help' in args:
print "Usage: h for help"
print "c or convert for converting xlsx to csv files"
print "m to process monthly directory"
print "w to process weekly directory"
print "sku to process SKU master"
print "Default: nothing for all 3 directories\n"
if 'c' in args or 'convert' in args:
print "Converting xlsx files to .csv"
for fname in os.listdir(raw_dir):
convert_file(fname)
print "Finished converting files to csv"
if len(args) != 4:
return
else:
args = args[1:]
if 'm' in args:
print "Processing Monthly directory now..."
for fname in os.listdir(Monthly_dir):
monthly_file(fname)
print "Monthly Processing Done.\n"
if 'w' in args:
print "Processing Weekly directory now"
for fname in os.listdir(Weekly_dir):
weekly_file(fname)
print "Weekly Processing Done.\n"
if 'sku' in args:
print "Processing SKU Master directory now"
for fname in os.listdir(SKU_dir):
sku_file(fname)
print "SKU Processing Done.\n"
if 's' in args:
print "Processing Survey directory now"
for fname in os.listdir(survey_dir):
sku_file(fname)
print "Survey Processing Done.\n"
#main()
| [
"hoavt040789@gmail.com"
] | hoavt040789@gmail.com |
979b699a367d604f9353cf9805004d4f0d43b7c5 | 966280ab617298a3ced79bc60189b301c795067a | /Sliding-Window/239_sliding_window_maximum.py | 445ece104ef138fc8ad1d83b3627505908fe52ce | [] | no_license | Rishabhh/LeetCode-Solutions | c0382e5ba5b77832322c992418f697f42213620f | 2536744423ee9dc7da30e739eb0bca521c216f00 | refs/heads/master | 2020-06-10T02:37:42.103289 | 2019-05-29T06:38:02 | 2019-05-29T06:38:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | import collections
class Solution:
def max_sliding_window(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
res = []
q = collections.deque()
n = len(nums)
for i in range(n):
while q and q[-1][1] <= nums[i]:
q.pop()
q.append((i, nums[i]))
if i >= k:
while q and q[0][0] <= i - k:
q.popleft()
if i >= k - 1:
res.append(q[0][1])
return res
| [
"weihewang2012@gmail.com"
] | weihewang2012@gmail.com |
879ed203a95faf1ad6a9ca1ed7ab98c3695fd4b6 | bd2a975f5f6cd771393f994ebd428e43142ee869 | /new_render_data/input/p/script/abort/kafka/consumer/group.py | 54a3711aeb79c67b8eeaec2a9f8a97e0c5b52feb | [] | no_license | sol87/Pycharm_python36 | 1a297c9432462fc0d3189a1dc7393fdce26cb501 | fa7d53990040d888309a349cfa458a537b8d5f04 | refs/heads/master | 2023-03-16T10:35:55.697402 | 2018-11-08T09:52:14 | 2018-11-08T09:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,351 | py | from __future__ import absolute_import
import copy
import logging
import socket
import sys
import time
from kafka.errors import KafkaConfigurationError, UnsupportedVersionError
from kafka.vendor import six
from kafka.client_async import KafkaClient, selectors
from kafka.consumer.fetcher import Fetcher
from kafka.consumer.subscription_state import SubscriptionState
from kafka.coordinator.consumer import ConsumerCoordinator
from kafka.coordinator.assignors.range import RangePartitionAssignor
from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor
from kafka.metrics import MetricConfig, Metrics
from kafka.protocol.offset import OffsetResetStrategy
from kafka.structs import TopicPartition
from kafka.version import __version__
log = logging.getLogger(__name__)
class KafkaConsumer(six.Iterator):
"""Consume records from a Kafka cluster.
The consumer will transparently handle the failure of servers in the Kafka
cluster, and adapt as topic-partitions are created or migrate between
brokers. It also interacts with the assigned kafka Group Coordinator node
to allow multiple consumers to load balance consumption of topics (requires
kafka >= 0.9.0.0).
The consumer is not thread safe and should not be shared across threads.
Arguments:
*topics (str): optional list of topics to subscribe to. If not set,
call :meth:`~kafka.KafkaConsumer.subscribe` or
:meth:`~kafka.KafkaConsumer.assign` before consuming records.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): A name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
group_id (str or None): The name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: None
key_deserializer (callable): Any callable that takes a
raw message key and returns a deserialized key.
value_deserializer (callable): Any callable that takes a
raw message value and returns a deserialized value.
fetch_min_bytes (int): Minimum amount of data the server should
return for a fetch request, otherwise wait up to
fetch_max_wait_ms for more data to accumulate. Default: 1.
fetch_max_wait_ms (int): The maximum amount of time in milliseconds
the server will block before answering the fetch request if
there isn't sufficient data to immediately satisfy the
requirement given by fetch_min_bytes. Default: 500.
fetch_max_bytes (int): The maximum amount of data the server should
return for a fetch request. This is not an absolute maximum, if the
first message in the first non-empty partition of the fetch is
larger than this value, the message will still be returned to
ensure that the consumer can make progress. NOTE: consumer performs
fetches to multiple brokers in parallel so memory usage will depend
on the number of brokers containing partitions for the topic.
Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 Mb).
max_partition_fetch_bytes (int): The maximum amount of data
per-partition the server will return. The maximum total memory
used for a request = #partitions * max_partition_fetch_bytes.
This size must be at least as large as the maximum message size
the server allows or else it is possible for the producer to
send messages larger than the consumer can fetch. If that
happens, the consumer can get stuck trying to fetch a large
message on a certain partition. Default: 1048576.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 40000.
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. To avoid connection storms, a
randomization factor of 0.2 will be applied to the backoff
resulting in a random range between 20% below and 20% above
the computed value. Default: 1000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
auto_offset_reset (str): A policy for resetting offsets on
OffsetOutOfRange errors: 'earliest' will move to the oldest
available message, 'latest' will move to the most recent. Any
other value will raise the exception. Default: 'latest'.
enable_auto_commit (bool): If True , the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): Number of milliseconds between automatic
offset commits, if enable_auto_commit is True. Default: 5000.
default_offset_commit_callback (callable): Called as
callback(offsets, response) response will be either an Exception
or an OffsetCommitResponse struct. This callback can be used to
trigger custom actions when a commit request completes.
check_crcs (bool): Automatically check the CRC32 of the records
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata, even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
partition_assignment_strategy (list): List of objects to use to
distribute partition ownership amongst consumer instances when
group management is used.
Default: [RangePartitionAssignor, RoundRobinPartitionAssignor]
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management feature. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group management facilities. Default: 30000
max_poll_records (int): The maximum number of records returned in a
single call to :meth:`~kafka.KafkaConsumer.poll`. Default: 500
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). The java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). The java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
consumer_timeout_ms (int): number of milliseconds to block during
message iteration before raising StopIteration (i.e., ending the
iterator). Default block forever [float('inf')].
skip_double_compressed_messages (bool): A bug in KafkaProducer <= 1.2.4
caused some messages to be corrupted via double-compression.
By default, the fetcher will return these messages as a compressed
blob of bytes with a single offset, i.e. how the message was
actually published to the cluster. If you prefer to have the
fetcher automatically detect corrupt messages and skip them,
set this option to True. Default: False.
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
Default: True.
ssl_cafile (str): Optional filename of ca file to use in certificate
verification. Default: None.
ssl_certfile (str): Optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
api_version (tuple): Specify which Kafka API version to use. If set to
None, the client will attempt to infer the broker version by probing
various APIs. Different versions enable different functionality.
Examples:
(0, 9) enables full group coordination features with automatic
partition assignment and rebalancing,
(0, 8, 2) enables kafka-storage offset commits with manual
partition assignment only,
(0, 8, 1) enables zookeeper-storage offset commits with manual
partition assignment only,
(0, 8, 0) enables basic functionality but requires manual
partition assignment and offset management.
For the full list of supported versions, see
KafkaClient.API_VERSIONS. Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version set to 'auto'
metric_reporters (list): A list of classes to use as metrics reporters.
Implementing the AbstractMetricsReporter interface allows plugging
in classes that will be notified of new metric creation. Default: []
metrics_num_samples (int): The number of samples maintained to compute
metrics. Default: 2
metrics_sample_window_ms (int): The maximum age in milliseconds of
samples used to compute metrics. Default: 30000
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to True
the only way to receive records from an internal topic is
subscribing to it. Requires 0.10+ Default: True
sasl_mechanism (str): String picking sasl mechanism when security_protocol
is SASL_PLAINTEXT or SASL_SSL. Currently only PLAIN is supported.
Default: None
sasl_plain_username (str): Username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): Password for sasl PLAIN authentication.
Default: None
Note:
Configuration parameters are described in more detail at
https://kafka.apache.org/documentation/#newconsumerconfigs
"""
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'group_id': None,
'key_deserializer': None,
'value_deserializer': None,
'fetch_max_wait_ms': 500,
'fetch_min_bytes': 1,
'fetch_max_bytes': 52428800,
'max_partition_fetch_bytes': 1 * 1024 * 1024,
'request_timeout_ms': 40 * 1000,
'retry_backoff_ms': 100,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'auto_offset_reset': 'latest',
'enable_auto_commit': True,
'auto_commit_interval_ms': 5000,
'default_offset_commit_callback': lambda offsets, response: True,
'check_crcs': True,
'metadata_max_age_ms': 5 * 60 * 1000,
'partition_assignment_strategy': (RangePartitionAssignor, RoundRobinPartitionAssignor),
'heartbeat_interval_ms': 3000,
'session_timeout_ms': 30000,
'max_poll_records': 500,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'consumer_timeout_ms': float('inf'),
'skip_double_compressed_messages': False,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'connections_max_idle_ms': 9 * 60 * 1000,
'metric_reporters': [],
'metrics_num_samples': 2,
'metrics_sample_window_ms': 30000,
'metric_group_prefix': 'consumer',
'selector': selectors.DefaultSelector,
'exclude_internal_topics': True,
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
}
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest'}
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
request_timeout_ms = self.config['request_timeout_ms']
session_timeout_ms = self.config['session_timeout_ms']
fetch_max_wait_ms = self.config['fetch_max_wait_ms']
if request_timeout_ms <= session_timeout_ms:
raise KafkaConfigurationError(
"Request timeout (%s) must be larger than session timeout (%s)" %
(request_timeout_ms, session_timeout_ms))
if request_timeout_ms <= fetch_max_wait_ms:
raise KafkaConfigurationError("Request timeout (%s) must be larger than fetch-max-wait-ms (%s)" %
(request_timeout_ms, fetch_max_wait_ms))
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
# TODO _metrics likely needs to be passed to KafkaClient, etc.
# api_version was previously a str. Accept old format for now
if isinstance(self.config['api_version'], str):
str_version = self.config['api_version']
if str_version == 'auto':
self.config['api_version'] = None
else:
self.config['api_version'] = tuple(map(int, str_version.split('.')))
log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
str(self.config['api_version']), str_version)
self._client = KafkaClient(metrics=self._metrics, **self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): Assignment for this instance.
Raises:
IllegalStateError: If consumer has already called
:meth:`~kafka.KafkaConsumer.subscribe`.
Warning:
It is not possible to use both manual partition assignment with
:meth:`~kafka.KafkaConsumer.assign` and group assignment with
:meth:`~kafka.KafkaConsumer.subscribe`.
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions])
def assignment(self):
"""Get the TopicPartitions currently assigned to this consumer.
If partitions were directly assigned using
:meth:`~kafka.KafkaConsumer.assign`, then this will simply return the
same partitions that were previously assigned. If topics were
subscribed using :meth:`~kafka.KafkaConsumer.subscribe`, then this will
give the set of topic partitions currently assigned to the consumer
(which may be None if the assignment hasn't happened yet, or if the
partitions are in the process of being reassigned).
Returns:
set: {TopicPartition, ...}
"""
return self._subscription.assigned_partitions()
def close(self, autocommit=True):
"""Close the consumer, waiting indefinitely for any needed cleanup.
Keyword Arguments:
autocommit (bool): If auto-commit is configured for this consumer,
this optional flag causes the consumer to attempt to commit any
pending consumed offsets prior to close. Default: True
"""
if self._closed:
return
log.debug("Closing the KafkaConsumer.")
self._closed = True
self._coordinator.close(autocommit=autocommit)
self._metrics.close()
self._client.close()
try:
self.config['key_deserializer'].close()
except AttributeError:
pass
try:
self.config['value_deserializer'].close()
except AttributeError:
pass
log.debug("The KafkaConsumer has closed.")
def commit_async(self, offsets=None, callback=None):
"""Commit offsets to kafka asynchronously, optionally firing callback.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
callback (callable, optional): Called as callback(offsets, response)
with response as either an Exception or an OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
log.debug("Committing offsets: %s", offsets)
future = self._coordinator.commit_offsets_async(
offsets, callback=callback)
return future
def commit(self, offsets=None):
"""Commit offsets to kafka, blocking until success or error.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
Blocks until either the commit succeeds or an unrecoverable error is
encountered (in which case it is thrown to the caller).
Currently only supports kafka-topic offset storage (not zookeeper).
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
self._coordinator.commit_offsets_sync(offsets)
def committed(self, partition):
"""Get the last committed offset for the given partition.
This offset will be used as the position for the consumer
in the event of a failure.
This call may block to do a remote call if the partition in question
isn't assigned to this consumer or if the consumer hasn't yet
initialized its cache of committed offsets.
Arguments:
partition (TopicPartition): The partition to check.
Returns:
The last committed offset, or None if there was no prior commit.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
if self._subscription.is_assigned(partition):
committed = self._subscription.assignment[partition].committed
if committed is None:
self._coordinator.refresh_committed_offsets_if_needed()
committed = self._subscription.assignment[partition].committed
else:
commit_map = self._coordinator.fetch_committed_offsets([partition])
if partition in commit_map:
committed = commit_map[partition].offset
else:
committed = None
return committed
def topics(self):
"""Get all topics the user is authorized to view.
Returns:
set: topics
"""
cluster = self._client.cluster
if self._client._metadata_refresh_in_progress and self._client._topics:
future = cluster.request_update()
self._client.poll(future=future)
stash = cluster.need_all_topic_metadata
cluster.need_all_topic_metadata = True
future = cluster.request_update()
self._client.poll(future=future)
cluster.need_all_topic_metadata = stash
return cluster.topics()
def partitions_for_topic(self, topic):
"""Get metadata about the partitions for a given topic.
Arguments:
topic (str): Topic to check.
Returns:
set: Partition ids
"""
return self._client.cluster.partitions_for_topic(topic)
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions.
"""
assert timeout_ms >= 0, 'Timeout must not be negative'
if max_records is None:
max_records = self.config['max_poll_records']
# Poll for new data until the timeout expires
start = time.time()
remaining = timeout_ms
while True:
records = self._poll_once(remaining, max_records)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms, max_records):
"""Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block.
Returns:
dict: Map of topic to list of records (may be empty).
"""
if self._use_consumer_group():
self._coordinator.ensure_coordinator_known()
self._coordinator.ensure_active_group()
# 0.8.2 brokers support kafka-backed offset storage via group coordinator
elif self.config['group_id'] is not None and self.config['api_version'] >= (0, 8, 2):
self._coordinator.ensure_coordinator_known()
# Fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# If data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
records, partial = self._fetcher.fetched_records(max_records)
if records:
# Before returning the fetched records, we can send off the
# next round of fetches and avoid block waiting for their
# responses to enable pipelining while the user is handling the
# fetched records.
if not partial:
self._fetcher.send_fetches()
return records
# Send any new fetches (won't resend pending fetches)
self._fetcher.send_fetches()
self._client.poll(timeout_ms=timeout_ms, sleep=True)
records, _ = self._fetcher.fetched_records(max_records)
return records
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): Partition to check
Returns:
int: Offset
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
offset = self._subscription.assignment[partition].position
if offset is None:
self._update_fetch_positions([partition])
offset = self._subscription.assignment[partition].position
return offset
def highwater(self, partition):
"""Last known highwater offset for a partition.
A highwater offset is the offset that will be assigned to the next
message that is produced. It may be useful for calculating lag, by
comparing with the reported position. Note that both position and
highwater refer to the *next* offset -- i.e., highwater offset is
one greater than the newest available message.
Highwater offsets are returned in FetchResponse messages, so will
not be available if no FetchRequests have been sent for this partition
yet.
Arguments:
partition (TopicPartition): Partition to check
Returns:
int or None: Offset if available
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
return self._subscription.assignment[partition].highwater
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition)
def paused(self):
"""Get the partitions that were previously paused using
:meth:`~kafka.KafkaConsumer.pause`.
Returns:
set: {partition (TopicPartition), ...}
"""
return self._subscription.paused_partitions()
def resume(self, *partitions):
"""Resume fetching from the specified (paused) partitions.
Arguments:
*partitions (TopicPartition): Partitions to resume.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Resuming partition %s", partition)
self._subscription.resume(partition)
def seek(self, partition, offset):
"""Manually specify the fetch offset for a TopicPartition.
Overrides the fetch offsets that the consumer will use on the next
:meth:`~kafka.KafkaConsumer.poll`. If this API is invoked for the same
partition more than once, the latest offset will be used on the next
:meth:`~kafka.KafkaConsumer.poll`.
Note: You may lose data if this API is arbitrarily used in the middle of
consumption to reset the fetch offsets.
Arguments:
partition (TopicPartition): Partition for seek operation
offset (int): Message offset in partition
Raises:
AssertionError: If offset is not an int >= 0; or if partition is not
currently assigned.
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert isinstance(offset, int) and offset >= 0, 'Offset must be >= 0'
assert partition in self._subscription.assigned_partitions(), 'Unassigned partition'
log.debug("Seeking to offset %s for partition %s", offset, partition)
self._subscription.assignment[partition].seek(offset)
def seek_to_beginning(self, *partitions):
"""Seek to the oldest available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to beginning of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)
def seek_to_end(self, *partitions):
"""Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)
def subscribe(self, topics=(), pattern=None, listener=None):
"""Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with :meth:`~kafka.KafkaConsumer.assign`.
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call.
Raises:
IllegalStateError: If called after previously calling
:meth:`~kafka.KafkaConsumer.assign`.
AssertionError: If neither topics or pattern is provided.
TypeError: If listener is not a ConsumerRebalanceListener.
"""
# SubscriptionState handles error checking
self._subscription.subscribe(topics=topics,
pattern=pattern,
listener=listener)
# Regex will need all topic metadata
if pattern is not None:
self._client.cluster.need_all_topic_metadata = True
self._client.set_topics([])
self._client.cluster.request_update()
log.debug("Subscribed to topic pattern: %s", pattern)
else:
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics(self._subscription.group_subscription())
log.debug("Subscribed to topic(s): %s", topics)
def subscription(self):
"""Get the current topic subscription.
Returns:
set: {topic, ...}
"""
return self._subscription.subscription.copy()
def unsubscribe(self):
"""Unsubscribe from all topics and clear all assigned partitions."""
self._subscription.unsubscribe()
self._coordinator.close()
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics([])
log.debug("Unsubscribed all topics or patterns and assigned partitions")
def metrics(self, raw=False):
"""Warning: this is an unstable interface.
It may change in future releases without warning"""
if raw:
return self._metrics.metrics
metrics = {}
for k, v in self._metrics.metrics.items():
if k.group not in metrics:
metrics[k.group] = {}
if k.name not in metrics[k.group]:
metrics[k.group][k.name] = {}
metrics[k.group][k.name] = v.value()
return metrics
def offsets_for_times(self, timestamps):
"""Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
for tp, ts in timestamps.items():
timestamps[tp] = int(ts)
if ts < 0:
raise ValueError(
"The target time for partition {} is {}. The target time "
"cannot be negative.".format(tp, ts))
return self._fetcher.get_offsets_by_times(
timestamps, self.config['request_timeout_ms'])
def beginning_offsets(self, partitions):
"""Get the first offset for the given partitions.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The earliest available offsets for the
given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms.
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
offsets = self._fetcher.beginning_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def end_offsets(self, partitions):
"""Get the last offset for the given partitions. The last offset of a
partition is the offset of the upcoming message, i.e. the offset of the
last available message + 1.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The end offsets for the given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
offsets = self._fetcher.end_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def _use_consumer_group(self):
"""Return True iff this consumer can/should join a broker-coordinated group."""
if self.config['api_version'] < (0, 9):
return False
elif self.config['group_id'] is None:
return False
elif not self._subscription.partitions_auto_assigned():
return False
return True
def _update_fetch_positions(self, partitions):
"""Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
Arguments:
partitions (List[TopicPartition]): The partitions that need
updating fetch positions.
Raises:
NoOffsetForPartitionError: If no offset is stored for a given
partition and no offset reset policy is defined.
"""
if (self.config['api_version'] >= (0, 8, 1) and
self.config['group_id'] is not None):
# Refresh commits for all assigned partitions
self._coordinator.refresh_committed_offsets_if_needed()
# Then, do any offset lookups in case some positions are not known
self._fetcher.update_fetch_positions(partitions)
def _message_generator(self):
assert self.assignment() or self.subscription() is not None, 'No topic subscription or manual partition assignment'
while time.time() < self._consumer_timeout:
if self._use_consumer_group():
self._coordinator.ensure_coordinator_known()
self._coordinator.ensure_active_group()
# 0.8.2 brokers support kafka-backed offset storage via group coordinator
elif self.config['group_id'] is not None and self.config['api_version'] >= (0, 8, 2):
self._coordinator.ensure_coordinator_known()
# Fetch offsets for any subscribed partitions that we arent tracking yet
if not self._subscription.has_all_fetch_positions():
partitions = self._subscription.missing_fetch_positions()
self._update_fetch_positions(partitions)
poll_ms = 1000 * (self._consumer_timeout - time.time())
if not self._fetcher.in_flight_fetches():
poll_ms = 0
self._client.poll(timeout_ms=poll_ms, sleep=True)
# We need to make sure we at least keep up with scheduled tasks,
# like heartbeats, auto-commits, and metadata refreshes
timeout_at = self._next_timeout()
# Because the consumer client poll does not sleep unless blocking on
# network IO, we need to explicitly sleep when we know we are idle
# because we haven't been assigned any partitions to fetch / consume
if self._use_consumer_group() and not self.assignment():
sleep_time = max(timeout_at - time.time(), 0)
if sleep_time > 0 and not self._client.in_flight_request_count():
log.debug('No partitions assigned; sleeping for %s', sleep_time)
time.sleep(sleep_time)
continue
# Short-circuit the fetch iterator if we are already timed out
# to avoid any unintentional interaction with fetcher setup
if time.time() > timeout_at:
continue
for msg in self._fetcher:
yield msg
if time.time() > timeout_at:
log.debug("internal iterator timeout - breaking for poll")
break
# An else block on a for loop only executes if there was no break
# so this should only be called on a StopIteration from the fetcher
# We assume that it is safe to init_fetches when fetcher is done
# i.e., there are no more records stored internally
else:
self._fetcher.send_fetches()
def _next_timeout(self):
timeout = min(self._consumer_timeout,
self._client._delayed_tasks.next_at() + time.time(),
self._client.cluster.ttl() / 1000.0 + time.time())
# Although the delayed_tasks timeout above should cover processing
# HeartbeatRequests, it is still possible that HeartbeatResponses
# are left unprocessed during a long _fetcher iteration without
# an intermediate poll(). And because tasks are responsible for
# rescheduling themselves, an unprocessed response will prevent
# the next heartbeat from being sent. This check should help
# avoid that.
if self._use_consumer_group():
heartbeat = time.time() + self._coordinator.heartbeat.ttl()
timeout = min(timeout, heartbeat)
return timeout
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
if not self._iterator:
self._iterator = self._message_generator()
self._set_consumer_timeout()
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
raise
def _set_consumer_timeout(self):
# consumer_timeout_ms can be used to stop iteration early
if self.config['consumer_timeout_ms'] >= 0:
self._consumer_timeout = time.time() + (
self.config['consumer_timeout_ms'] / 1000.0)
# Old KafkaConsumer methods are deprecated
def configure(self, **configs):
raise NotImplementedError(
'deprecated -- initialize a new consumer')
def set_topic_partitions(self, *topics):
raise NotImplementedError(
'deprecated -- use subscribe() or assign()')
def fetch_messages(self):
raise NotImplementedError(
'deprecated -- use poll() or iterator interface')
def get_partition_offsets(self, topic, partition,
request_time_ms, max_num_offsets):
raise NotImplementedError(
'deprecated -- send an OffsetRequest with KafkaClient')
def offsets(self, group=None):
raise NotImplementedError('deprecated -- use committed(partition)')
def task_done(self, message):
raise NotImplementedError(
'deprecated -- commit offsets manually if needed')
| [
"superdkk@gmail.com"
] | superdkk@gmail.com |
6d42aa8011c6d68079539b9748e2a3c3cd6946e7 | 1cccd3d03a9c63950491121a10030efb122a3926 | /cipherwallet/constants.sample.py | 5de054ff22f36b737c6f0617a3640bf565e5a176 | [
"MIT"
] | permissive | drivefast/pycipherwallet | f21a27ff40d673c914b182310fbcb140a12f53b7 | 04e23f6c4291bf61812d8621238658207e2d70f3 | refs/heads/master | 2021-01-10T21:42:42.293677 | 2015-01-09T02:30:51 | 2015-01-09T02:30:51 | 26,985,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,929 | py |
# your customer ID and API secret key, as set on https://cipherwallet.com/dashboard.html
CUSTOMER_ID = "YOUR_CIPHERWALLET_CUSTOMER_ID"
API_SECRET = "YOUR_CIPHERWALLET_API_SECRET"
# API location
API_URL = "http://api.cqr.io"
# preferred hashing method to use on message encryption: md5, sha1, sha256 or sha512
H_METHOD = "sha256"
# how long (in seconds) do we delay a "still waiting for user data" poll response
POLL_DELAY = 2
# service id, always "cipherwallet"
SERVICE_ID = "cipherwallet"
# an alphabet with characters used to generate random strings
ALPHABET = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_@"
# depending on your temporary datastore of choice, uncomment one of the following sections
# and adjust the settings accordingly
# memcached:
#TMP_DATASTORE = 'memcached'; MCD_CONFIG = ['localhost:11211', 'localhost:11212']
# redis:
#TMP_DATASTORE = 'redis'; REDIS_HOST = "localhost"; REDIS_PORT = 6379; REDIS_DB = 0
# plaintext files:
#TMP_DATASTORE = 'sessionfiles'; TMPSTORE_DIR = "/path/to/session/directory/"
# how long are we supposed to retain the information about a QR scanning session
# the value should be slightly larger than the maximum QR time-to-live that you use
CW_SESSION_TIMEOUT = 610
# for logins via QR code scanning, you need to provide access to a SQL database where your users
# information is stored (we're assuming here you are using a SQL database). cipherwallet only
# needs read/write access to a table it creates (cw_logins), so feel free to restrict as needed.
# we use the core sqlalchemy to create an uniform database access layer; for more details about
# sqlalchemy see http://docs.sqlalchemy.org/en/rel_0_8/core/connections.html
# to set the database connection, uncomment and configure one of the lines below
#DB_CONNECTION_STRING = "postgresql+psycopg2://{0}:{1}@server_host:port/database_name"
#DB_CONNECTION_STRING = "mysql+mysqldb://{0}:{1}@server_host:port/database_name"
#DB_CONNECTION_STRING = "oracle+cx_oracle://{0}:{1}@tnsname"
#DB_CONNECTION_STRING = "mssql+pymssql://{0}:{1}@server_host:port/database_name"
#DB_CONNECTION_STRING = "sqlite:////path/to/your/dbfile.db"
DB_CONNECTION_USERNAME = "god"
DB_CONNECTION_PASSWORD = "zzyzx"
# in your database, YOU MUST create a table called 'cw_logins', which will have a 1-1 relationship with
# your users table; something like this (but check the correct syntax for on your SQL server type):
"""
CREATE TABLE cw_logins (
user_id VARCHAR(...) PRIMARY KEY, -- or whatever type your unique user ID is
cw_id VARCHAR(20),
secret VARCHAR(128),
reg_tag CHAR(), -- it's an UUID
hash_method VARCHAR(8), -- can be md5, sha1, sha256
created INTEGER
);
"""
# 'user_id' is the unique identifier of an user in your main users table, and should be declared as
# a primary key and foreign index in your users table; if your SQL server supports it, cascade
# the changes and deletes
# 'cw_id' is the cipherwallet ID assigned to the user
# 'secret' is the secret encryption key assigned to the user; YOU MUST ENCRYPT THIS!
# 'reg_tag' is an identifier that the cipherwallet API maintains; use this identifier when you need
# to remove an user's registration. it is an UUID, so you may use a more appropriate data type if
# your database supports one
# 'hash_method' is the hash type the user will hash their user credentials with, on QR scan logins;
# can be md5, sha1, sha256
# 'created' is the date when the record was created, epoch format (feel free to change this field type
# to a date/time field, if you find it more convenient)
# you should also create an index on cw_id, it will help during your queries
# your user's secret keys must be stored in an encrypted form in the cw_logins table
# we use an AES-256 encryption algorithm for that, with the encryption key below
# the encryption itself comes in play in db-interface.lib.php
# the AES-256 encryption key must be 32-bytes long; example:
#CW_SECRET_ENC_KEY = "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F"
# hint: to easily generate a 32-byte encryption key like needed here, just generate 2 random UUIDs,
# concatenate them, and remove the formatting dashes
OP_SIGNUP = "signup"
OP_LOGIN = "login"
OP_CHECKOUT = "checkout"
OP_REGISTRATION = "reg"
# provide a service descriptor entry in this map for every cipherwallet QR code you are using
# on each entry, you provide:
# - 'operation': the operation type, one of the OP_* constants above
# - 'qr_ttl': a time-to-live for the QR code, in seconds
# - 'callback_url': the URL used by the mobile app to transfer the data back to your web app
# - 'display': a message that gets displayed at the top of the screen, in the mobile app, when
# the user is asked to select the data they want to send; you may provide a string, or
# a function that returns a string (for example, you can customize the message for a
# checkout service, such that it mentions the amount to be charged to the credit card)
# - 'confirmation': a message to be displayed as a popup-box in the mobile app, that informs if
# the last QR code scanning and data transfer operations was successful or not; you may
# provide a string, or a function that returns a string
# the service descriptor parameters specified here will override the ones pre-programmed with the
# the dashboard page
# the 'operation' must be specified; 'qr_ttl' has default and max values for each type of service;
# 'display' is only effective for the signup and checkout services; and 'confirm' is only
# effective for signup, checkout and registration services
# here is an example that contains 4 services: a signup, a registration, a login and a checkout
# commented out values indicate default values
qr_requests = {
'signup-form-qr': {
'operation': OP_SIGNUP,
'qr_ttl': 300,
'display': "Simulate you are signing up for a new account at\\your.website.com",
'confirm': "Your signup data has been submitted.",
},
'login-form-qr': {
'operation': OP_LOGIN,
# 'qr_ttl': 60,
# 'callback_url': "https://thiswebsite.com/cipherwallet/login",
},
'checkout-form-qr': {
'operation': OP_CHECKOUT,
# 'qr_ttl': 120,
# 'callback_url': "https://thiswebsite.com/php-cipherwallet/checkout",
# 'display': get_message_for_cart_value(), # implement this function in hooks.py
'confirm': "Thank you for your purchase.",
},
'reg-qr': {
'operation': OP_REGISTRATION,
# 'qr_ttl': 30,
# 'callback_url': "https://thiswebsite.com/cipherwallet/login",
'confirm': {
'title': "cipherwallet registration",
'message': "Thank you. You may now use cipherwallet to log in to this website.",
},
},
}
| [
"radu@socal.rr.com"
] | radu@socal.rr.com |
959a82f3526c2ae93838acf4582165b449e6fd6a | 7099d19b54afb3be921aefb164d57beec6cf0f21 | /2445.py | f863c3285a9811de36e18f4ed6e2f5453d32a545 | [] | no_license | kpreference/pythonProject | 8b172d0c5a5bc9fc67b5ea0dd8ec6f8bbc11b983 | 48e71495550ecbd524b83217adce1bec00733b1a | refs/heads/master | 2023-09-04T10:39:50.279412 | 2021-11-17T06:10:03 | 2021-11-17T06:10:03 | 366,012,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | n=int(input())
for i in range(1,n+1):
for j in range(i):
print("*",end="")
for k in range(2*(n-i)):
print(" ",end="")
for l in range(i):
print("*",end="")
print()
for m in range(n-1,0,-1):
for o in range(m):
print("*",end="")
for p in range(2*(n-m)):
print(" ",end="")
for q in range(m):
print("*",end="")
print()
| [
"ksh091411@naver.com"
] | ksh091411@naver.com |
c16526cc565c48f7f41dbc963e284d4f5ce44160 | 3e1fcf34eae508a3f3d4668edfb334069a88db3d | /court_scraper/configs.py | 3c97d17d3c3bde34e18c1f667fb59a09be10a102 | [
"ISC"
] | permissive | mscarey/court-scraper | 26d32cb7354b05bb5d5d27a55bf4042e5dde1a4d | e29135331526a11aa5eb0445a9223fc3f7630895 | refs/heads/main | 2023-07-14T20:23:33.488766 | 2020-08-31T14:02:19 | 2020-08-31T14:02:19 | 384,977,976 | 0 | 0 | ISC | 2021-07-11T15:04:57 | 2021-07-11T15:04:57 | null | UTF-8 | Python | false | false | 539 | py | import os
from pathlib import Path
class Configs:
def __init__(self):
try:
self.cache_dir = os.environ['COURT_SCRAPER_DIR']
except KeyError:
self.cache_dir = str(
Path(os.path.expanduser('~'))\
.joinpath('.court-scraper')
)
self.config_file_path = str(
Path(self.cache_dir)\
.joinpath('config.yaml')
)
self.db_path = str(
Path(self.cache_dir)\
.joinpath('cases.db')
)
| [
"zstumgoren@gmail.com"
] | zstumgoren@gmail.com |
ca23ea32aa269a5daf4fefa0781ef57d55f08917 | 19c2721a24cf763b6783fd3374cacd31a375e965 | /TwosumTarget.py | feb72c99df1acaf81e4fbf7d3e1ac52b6060c8bb | [] | no_license | aliabbas1031/Pythonexercises | 125d424a8661831740ca9268780b88e9c9ae4e8f | e3f0d33fc812c06885c72b7ee23ea19381b3d9d7 | refs/heads/master | 2021-03-27T05:46:26.412311 | 2020-12-17T16:24:52 | 2020-12-17T16:24:52 | 247,792,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | def two_sum_brute_force(A,arr):
#A = [1,3,4,2,5,6,9,-2,-4,-9]
#arr = 6
for i in range(len(A)-1):
for j in range(i+1,len(A)):
if A[i] + A[j] == arr:
return [A[i],A[j]]
return [0,1]
print(two_sum_brute_force([1,3,4,2,5,6,9,-2,-4,-9], 7)) | [
"noreply@github.com"
] | aliabbas1031.noreply@github.com |
4686304e5272d38d5559b24f2410068350599bea | 9130bdbd90b7a70ac4ae491ddd0d6564c1c733e0 | /venv/lib/python3.8/site-packages/pylsp/python_lsp.py | 78c538c324a0083a40dd1865f74748fc992446cd | [] | no_license | baruwaa12/Projects | 6ca92561fb440c63eb48c9d1114b3fc8fa43f593 | 0d9a7b833f24729095308332b28c1cde63e9414d | refs/heads/main | 2022-10-21T14:13:47.551218 | 2022-10-09T11:03:49 | 2022-10-09T11:03:49 | 160,078,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/aa/42/2f/669662002785de21312365d0e6f7beb46001f06bc2fa9fd2af8f9da7e6 | [
"45532744+baruwaa12@users.noreply.github.com"
] | 45532744+baruwaa12@users.noreply.github.com |
5f2aa8ca47d120e199ee3e9b6210bbc7d474e2f3 | 4a0348ccb890c73ebd88feafafc279af26e05f25 | /django/django_intro/first2/manage.py | 9fdc1b41afe7cc2a027d9ccbcbc59b711bd4fda3 | [] | no_license | wadeeeawwad/python_stack | 00936837103b9f78f66961d88ae3a6233adbbea3 | 6d2c3712c40b035e0d43cc7a27b2e2f48d4a8281 | refs/heads/master | 2023-07-11T14:59:02.617899 | 2021-08-23T11:37:15 | 2021-08-23T11:37:15 | 364,533,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"wadee_awwad@hotmail.com"
] | wadee_awwad@hotmail.com |
a94d4f6646875930d94d09068b21013e8e11c0b4 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/app_info_host_vm_list_in_csv_format.py | c68bd8aec7c8d133e43bc961f5b83387b9a11720 | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,356 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.app_info_host_vm_csv_record import AppInfoHostVmCsvRecord # noqa: F401,E501
from swagger_client.models.csv_list_result import CsvListResult # noqa: F401,E501
class AppInfoHostVmListInCsvFormat(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'file_name': 'str',
'results': 'list[AppInfoHostVmCsvRecord]'
}
attribute_map = {
'file_name': 'file_name',
'results': 'results'
}
def __init__(self, file_name=None, results=None): # noqa: E501
"""AppInfoHostVmListInCsvFormat - a model defined in Swagger""" # noqa: E501
self._file_name = None
self._results = None
self.discriminator = None
if file_name is not None:
self.file_name = file_name
if results is not None:
self.results = results
@property
def file_name(self):
"""Gets the file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
File name set by HTTP server if API returns CSV result as a file. # noqa: E501
:return: The file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this AppInfoHostVmListInCsvFormat.
File name set by HTTP server if API returns CSV result as a file. # noqa: E501
:param file_name: The file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def results(self):
"""Gets the results of this AppInfoHostVmListInCsvFormat. # noqa: E501
List of appplications discovered during an application discovery session # noqa: E501
:return: The results of this AppInfoHostVmListInCsvFormat. # noqa: E501
:rtype: list[AppInfoHostVmCsvRecord]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this AppInfoHostVmListInCsvFormat.
List of appplications discovered during an application discovery session # noqa: E501
:param results: The results of this AppInfoHostVmListInCsvFormat. # noqa: E501
:type: list[AppInfoHostVmCsvRecord]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppInfoHostVmListInCsvFormat):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tcraft@pivotal.io"
] | tcraft@pivotal.io |
1358e9c534f5e5c9bddf63c6019496723641868b | fe57b067d46973b3807e184c385472fa7accac42 | /Django_posgresql/Django_posgresql/settings.py | ab5e126ef6d1eef9825cf82c1e465ae0e1afd493 | [] | no_license | imanursar/REST_APIs_with_Django | 84a6cafe084e7b2e34b8a3994d3dc08a40d4b3bb | c0c3ac35dece0fa528535f9716e65684debbcf93 | refs/heads/master | 2022-12-07T17:40:47.979147 | 2020-08-27T15:49:56 | 2020-08-27T15:49:56 | 286,973,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,565 | py | """
Django settings for Django_posgresql project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'onvfsn5j8l+%umk08_r&3gftk)=z=&b2i1(3++wx3yt(mfq1z$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Django REST framework
'rest_framework',
# Tutorials application
'tutorials.apps.TutorialsConfig',
# CORS
'corsheaders',
]
MIDDLEWARE = [
# CORS
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
'http://localhost:8081',
)
ROOT_URLCONF = 'Django_posgresql.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Django_posgresql.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'Django_postgresql',
'USER': 'postgres',
'PASSWORD': '2468264!',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"imanursar@gmail.com"
] | imanursar@gmail.com |
6f1ce69f66b79c11989426517bab38e317a3e9f1 | 0b63f38c7fb468e478e5be82c685de1b7ddb87e5 | /meiduo/meiduo_mall/meiduo_mall/apps/goods/serializers.py | 5f87ef206f4094af198abe31f08914950ba75438 | [
"MIT"
] | permissive | Highsir/Simplestore | fcf5ef81a754604c0953a3c1433a7bc09290c121 | 5fc4d9930b0cd1e115f8c6ebf51cd9e28922d263 | refs/heads/master | 2020-09-01T07:55:45.362457 | 2019-11-01T04:55:48 | 2019-11-01T04:55:48 | 218,913,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from drf_haystack.serializers import HaystackSerializer
from rest_framework import serializers
from goods.models import GoodsCategory, GoodsChannel, SKU
from goods.search_indexes import SKUIndex
class CategorySerializer(serializers.ModelSerializer):
"""็ฑปๅซๅบๅๅๅจ"""
class Meta:
model = GoodsCategory
fields = ('id','name')
class ChannelSerializer(serializers.ModelSerializer):
"""้ข้ๅบๅๅๅจ"""
category = CategorySerializer
class Meta:
model = GoodsChannel
fields = ('category','url')
class SKUSerializer(serializers.ModelSerializer):
"""
ๅบๅๅๅจ่พๅบๅๅskuไฟกๆฏ
"""
class Meta:
# ่พๅบ:ๅบๅๅๅญๆฎต
model = SKU
fields = ('id','name','price','default_image_url','comments')
class SKUIndexSerializer(HaystackSerializer):
"""SKU็ดขๅผ็ปๆๆฐๆฎๅบๅๅๅจ"""
class Meta:
index_classes = [SKUIndex]
fields = ('text', 'id', 'name', 'price', 'default_image_url', 'comments') | [
"highsir421@163.com"
] | highsir421@163.com |
66861fb7bbc255b95f8d51fc314c98a9645eb0c1 | 70b07fb6516bfc6f05ce8ced8796e135fceb34d6 | /transactionid.py | a87080b9c36e3036751cec9c37eda066d9ec2395 | [] | no_license | MirandaLv/Script_Level0 | ebada9080f7a13f4e6db0fe892e5b72588f5dfc9 | 25454a3add5f36c9c4fbed5ad1e06eab2c884f50 | refs/heads/master | 2021-01-10T16:23:35.376122 | 2015-06-04T15:23:57 | 2015-06-04T15:23:57 | 36,877,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # Create a UUID for transaction table
# Name: Miranda Lv
# Date: 2/16/2015
import uuid
from pandas import DataFrame as df
import pandas as pd
import shortuuid
inf = r"/home/snowhui/itpir_repo/minerva-colombia-geocoded-dataset/products/Level_0/new/transactions_new.tsv.csv"
outf = r"/home/snowhui/itpir_repo/minerva-colombia-geocoded-dataset/products/Level_0/new/transactions.tsv"
data = pd.read_csv(inf, sep='\t')
idlist = list(data.project_id)
transactionid = []
for i in idlist:
#tranid = uuid.uuid4()
tranid = shortuuid.uuid()
transactionid.append(tranid)
data['transaction_id'] = transactionid
data.to_csv(outf, sep='\t', encoding='utf-8', index=False)
| [
"zhonghui.lv@gmail.com"
] | zhonghui.lv@gmail.com |
1979d64a1540d510194a1064ab3dd19ceaa3585b | b511bcf3b3c8724a321caa95f381956f56c81197 | /collective/wpadmin/widgets/draft.py | c1c4dd4bfba27029e4bbf9f9d56d38ede2eb8eca | [] | no_license | toutpt/collective.wpadmin | 6957f8fadd5f62a12e4b5cd3eb40794874712cea | b5f2384ff2421f1529f7f844d75c1cb4073ac959 | refs/heads/master | 2016-08-05T00:30:36.097097 | 2013-01-18T10:37:26 | 2013-01-18T10:37:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | from zope import component
from plone import api
from plone.registry.interfaces import IRegistry
from collective.wpadmin.widgets import widget
from collective.wpadmin import i18n
_ = i18n.messageFactory
class Draft(widget.Widget):
name = "draft"
title = _(u"Draft")
content_template_name = "draft.pt"
def get_drafts(self):
registry = component.getUtility(IRegistry)
key = 'collective.wpadmin.settings.WPAdminSettings.blog_type'
post_type = registry.get(key, 'News Item')
query = self.get_query()
query['review_state'] = 'private'
query['Creator'] = api.user.get_current().getId()
query['portal_type'] = post_type
brains = self.query_catalog(query)
return brains
| [
"toutpt@gmail.com"
] | toutpt@gmail.com |
8a31191831ee69ccd5d9018a724e1d82d3351362 | b4b99a973182a1caf7c4954b7e884365001e7bc8 | /02-create_planet_dataframe.py | 4aec07e78040fbcd19be9053d4439ddf10d370f6 | [] | no_license | DeSouzaSR/Vulcan | 69be92e1b904e1e1d29bf0088628906cf159310f | 37b0d336d30dc4d326a44a048b2aa15bfcc12fb8 | refs/heads/master | 2021-01-23T00:44:54.207114 | 2017-06-13T21:06:53 | 2017-06-13T21:06:53 | 92,839,515 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,192 | py | #!/usr/bin/env python
# coding: utf-8
# Import modules
import os
import sys
import yaml
import pandas as pd
import numpy as np
import rebound
from oe2pv import orbel_el2xv
from read_config_file import read_config_file
# Transforms orbel_el2xv in vectorized function
orbel_el2xv_vec = np.vectorize(orbel_el2xv)
def main():
#Read config file
config = read_config_file("config.yaml")
# Read variable in config file
planets_names = config["planets_names"]
vulcans_variants = len(config["vulcans_semi_axis"])
vulcans_clones = config["vulcans_clones"]
# Mass of the Sum [kg]
mass_sun_kg = config["mass_sun_kg"]
# Mass of the Sun, considering G = 1
mass_sun_grav = config["mass_sun_grav"]
# Conic section is ellipse # Constant used in oe2pv function
ialpha = config["ialpha"]
# Gravitational factor of the Sun
gm = config["gm"]
# Initial dataframe
for i in planets_names:
# Create raw dataframe
exec("{0} = pd.DataFrame(config['{0}'], index = [0])".format(i))
# Create gravitational mass
for i in planets_names:
exec("{0}['mass_grav'] = {0}['mass'] * mass_sun_grav / mass_sun_kg".format(i))
# Create gmpl column
for i in planets_names:
exec("{0}['gmpl'] = {0}['mass_grav'] + gm".format(i))
# Replicate initial values in each simulate
for i in planets_names:
exec("{0} = {0}.append([{0}] * (vulcans_variants * vulcans_clones - 1),\
ignore_index=True)".format(i))
# Data for terrestrial planets
terrestrial = planets_names[0:4]
# Create random eccentricity
# Usin numpy.random.ranf. For range = (a,b): (b - a) * random_sample() + a
for i in terrestrial:
exec("{0}['e'] = 0.01 * np.random.ranf((vulcans_variants * \
vulcans_clones,))".format(i))
# Create random inclination
# Usin numpy.random.ranf. For range = (a,b): (b - a) * random_sample() + a
for i in terrestrial:
exec("{0}['inc'] = np.deg2rad(0.01 * np.random.ranf((vulcans_variants *\
vulcans_clones,)))".format(i))
# Create capom angle
for i in terrestrial:
exec("{0}['capom'] = np.deg2rad(np.random.randint(0, 361, \
vulcans_variants * vulcans_clones))".format(i))
# Create omega angle
for i in terrestrial:
exec("{0}['omega'] = np.deg2rad(np.random.randint(0, 361,\
vulcans_variants * vulcans_clones))".format(i))
# Create M angle - Mean Anomaly
for i in terrestrial:
exec("{0}['capm'] = np.deg2rad(np.random.randint(0, 361,\
vulcans_variants * vulcans_clones))".format(i))
# Create postions and velocities
for i in terrestrial:
exec('x, y, z, vx, vy, vz = orbel_el2xv_vec({0}["gmpl"],\
ialpha,{0}["a"], {0}["e"], {0}["inc"], {0}["capom"],\
{0}["omega"],{0}["capm"])'.format(i))
for j in ['x', 'y', 'z', 'vx', 'vy', 'vz']:
exec("{0}['{1}'] = {1} ".format(i, j))
# Data for giants planets
giants = planets_names[4:8]
sim = rebound.Simulation()
for i in giants:
sim.add(i) # Read data from NASA
# for j in giants:
# for p in sim.particles:
# exec("{0}['x'] = {1}".format(j,p.x))
# exec("{0}['y'] = {1}".format(j,p.y))
# exec("{0}['z'] = {1}".format(j,p.z))
# exec("{0}['vx'] = {1}".format(j,p.vx))
# exec("{0}['vy'] = {1}".format(j,p.vy))
# exec("{0}['vz'] = {1}".format(j,p.vz))
for j, p in zip(giants, sim.particles):
exec("{0}['x'] = {1}".format(j,p.x))
for j, p in zip(giants, sim.particles):
exec("{0}['y'] = {1}".format(j,p.y))
for j, p in zip(giants, sim.particles):
exec("{0}['z'] = {1}".format(j,p.z))
for j, p in zip(giants, sim.particles):
exec("{0}['vx'] = {1}".format(j,p.vx))
for j, p in zip(giants, sim.particles):
exec("{0}['vy'] = {1}".format(j,p.vy))
for j, p in zip(giants, sim.particles):
exec("{0}['vz'] = {1}".format(j,p.vz))
# Save planet dataframe
for i in planets_names:
exec("{0}.to_csv('{0}.csv', index=False)".format(i))
if __name__ == '__main__':
main() | [
"sandro.fisica@gmail.com"
] | sandro.fisica@gmail.com |
309fe09df642bd2ce4d1fa2dc6a854f6a5564c61 | 3b4ba450ed0ae3b52c74d529fefe43210dc49b84 | /example_app/strings_demo.py | 0738a6c8be98ba2f5298cb86003df7b9a1a5899f | [
"BSD-3-Clause"
] | permissive | MongoEngine/flask-mongoengine | 477d3ed7f02be2f9f543c7d54f168facdab95dd6 | d4526139cb1e2e94111ab7de96bb629d574c1690 | refs/heads/master | 2023-08-23T12:14:31.087462 | 2022-08-16T14:02:03 | 2022-08-16T14:02:03 | 4,335,022 | 721 | 234 | NOASSERTION | 2023-08-15T10:52:24 | 2012-05-15T11:33:26 | Python | UTF-8 | Python | false | false | 1,056 | py | """Strings and strings related fields demo model."""
import re
from example_app.models import db
from flask_mongoengine.wtf import fields as mongo_fields
class StringsDemoModel(db.Document):
"""Documentation example model."""
string_field = db.StringField()
regexp_string_field = db.StringField(
regex=re.compile(
r"^(https:\/\/)[\w.-]+(?:\.[\w\.-]+)+[\w\-\._~:/?#[\]@!\$&'\(\)\*\+,;=]+$"
)
)
sized_string_field = db.StringField(min_length=5)
tel_field = db.StringField(wtf_field_class=mongo_fields.MongoTelField)
password_field = db.StringField(
wtf_field_class=mongo_fields.MongoPasswordField,
required=True,
min_length=5,
)
email_field = db.EmailField()
url_field = db.URLField()
StringsDemoForm = StringsDemoModel.to_wtf_form()
def strings_demo_view(pk=None):
"""Return all fields demonstration."""
from example_app.views import demo_view
return demo_view(
model=StringsDemoModel, view_name=strings_demo_view.__name__, pk=pk
)
| [
"ashpak@ashpak.ru"
] | ashpak@ashpak.ru |
54586360599e7dd218b73f42c67c23d7636413ad | 3c80fe9029065d47a5c16f4d2f6b55e6d7876c6e | /arrivalAndLeaveMonitor/arrivalMonitor.py | 125e1e78f10f2b752e9bad872f7ccf355a81abb1 | [] | no_license | veagseass/matrix | 6b296373d3773a1925fb5eaab7c09b9fc456cc83 | d65c51e2b32f9d399005f3312d0271b9fe731aec | refs/heads/master | 2020-07-30T06:07:43.072521 | 2019-10-01T12:05:22 | 2019-10-01T12:05:22 | 210,112,917 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,970 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
**********************************************************************
This moudle can monitor arrival list
and when a new car arrivaes,it pops up a Dialog window
**********************************************************************
version:1.1.0 ๆฐๅข็ๆงไธv6็ธ็ปๅ
add V6 obj in ArrivalMonitor class
----------------------------------------------------------------------
version:1.1.1 ๅฎ้
ๅฐ่ฝฆ็ๆงไผๅจๅฏๅจๆถๅ
่ทๅไธๆฌก
----------------------------------------------------------------------
author:dingjian
last date:2019-1-11 10:53
version:1.1.1
'''
import sys
sys.path.append("..")
from yunli import yunli
from v6web import v6web
import tkinter as tk
import tkinter.messagebox as messagebox
import time
import json
import threading
def getArrivalInfoByRecordList(recordList,center):
'''
'ไป่ๅค่ฎฐๅฝไธญ่ทๅๅฐ่พพไฟกๆฏ '
'ๅฆๆ็ฆ็ๆ่
ๅๆจๆๅก ่ฟๅ(True,scanTime)'
'ๅฆๆๆฒกๆ่ฟๆธฏ ่ฟๅ(False,None)'
recordList ่ๅค่ฎฐๅฝ
center ๆฌ็ซ
return (True,eachRecord) or (False,None)
'''
if center is not None:
if '้้
็ซ' in center:
center = center
elif 'ๅๆจ' in center:
center = center
else:
center += 'ๅๆจ'
else:
print('getArrivalInfoByRecordList center error')
raise Exception('can not be None')
if recordList is not None:
if len(recordList) != 0:
for eachRecord in recordList:
if eachRecord['inout'] == 'IN' and (eachRecord['scanType'] == 'DRIVER_PLATFORM' or eachRecord['scanType'] == 'CLIENT') and eachRecord['nodeName'] == center:
return (True,eachRecord)
return (False,None)
else:
print('getArrivalInfoByRecordList recordList no record')
return (False,None)
else:
print('getArrivalInfoByRecordList recordList is None')
raise Exception('can not be None')
def extractListFromPlanArrivalList(arrivalList):
'''
'ไป่ฎกๅ่ฟๆธฏๅ่กจไธญ ๆๅๆช่ฟๆธฏ็ไฟกๆฏ'
arrivalList ่ฎกๅ่ฟๆธฏๅ่กจ
return [] ่ฎกๅ่ฟๆธฏไธญๆช่ฟๆธฏๅ่กจ
'''
returnList = []
if arrivalList is not None:
for each in arrivalList:
jsonEach = json.loads(each)
eachList = jsonEach['pageList']['list']
for item in eachList:
#removeAlreadyArrivalFromList
if item['type'] == 'ๆชๅฐ่พพ':
returnList.append(item)
return returnList
else:
print('extractListFromPlanArrivalList fail')
return None
def extractListFromActualArrivalList(arrivalList):
'''
extract List From Actual Arrival List
accept ['{"success":true,"pageList":{"list":[{"id":7784845,"lockVersion":0,"jobId":3906816,"nodeId":290501,
return a list like [{},{},{}...]
['{"success":true,"pageList":{"l...] ==> [{},{},{}...]
'ไปๅๅบๅ่กจไธญๆๅไธไธชๅ่กจ'
return [] ๅฎ้
ๅฐ่พพๅ่กจ
'''
returnList = []
if arrivalList is not None:
for each in arrivalList:
jsonEach = json.loads(each)
eachList = jsonEach['pageList']['list']
for item in eachList:
returnList.append(item)
return returnList
else:
print('extractListFromActualArrivalList fail')
return None
def isItemInArrivalList(item,ArrivalList):
'''
'ๅคๆญitemๆฏๅฆๅจArrivalListไธญ'
return True or False
'''
if item is None or ArrivalList is None:
raise Exception('can not be None')
if len(ArrivalList) == 0:
return False
for each in ArrivalList:
'''
if each['jobCode'] == item['jobCode'] and str(each['scanTime']) == str(item['scanTime']):
return True
'''
#ๅชๅคๆญไปปๅกๅๅท
if each['jobCode'] == item['jobCode'] :
return True
return False
'''
def popWindow():
def return_callback(event):
print('quit...')
print(entry.get())
root.quit()
def close_callback():
print('message', 'no click...')
root.quit()
root = tkinter.Tk(className='title')
root.wm_attributes('-topmost', 1)
screenwidth, screenheight = root.maxsize()
width = 300
height = 100
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
root.geometry(size)
root.resizable(0, 0)
lable = tkinter.Label(root, height=2)
lable['text'] = 'message'
lable.pack()
entry = tkinter.Entry(root)
entry.bind('<Return>', return_callback)
entry.pack()
btn = tkinter.Button(root)
btn.pack()
entry.focus_set()
root.protocol("WM_DELETE_WINDOW", close_callback)
root.mainloop()
str = entry.get()
root.destroy()
'''
class ArrivalMonitor:
'''
main class
'''
#็ๆง็ๅๆจ
center = None
#ArrivalMonitor็ฑปไธญ็Yunliๅฏน่ฑก
yunli = None
#ๅผๅง็ๆง็ๆถ้ด๏ผๅจstartMonitor()ไธญ่ขซ่ตๅผ๏ผๅชๅจ็ฑปๅฎไพๅๅนถๅผๅง็ๆงๆถ่ตๅผไธๆฌก
firstStartMonitorTimeLong = None
#ๆฏๅฆๅฏๅจๅฎ้
ๅฐ่พพ็ๆง
isMonitorWithActualArrivalList = False
#ๆฏๅฆๅฏๅจ่ฎกๅๅฐ่พพ็ๆง
isMonitorWithPlanArrivalList = False
#ๅๅงๅฐ่พพๅ่กจ
firstStartArrivalList = None
#็จไบๅๆดพๅทฅๅ็v6web
v6 = None
def __init__(self,center=None,mYunli=None,mV6=None,userName=None,psw=None):
'''
__init__()ๅๅงๅๅฝๆฐ
'ๅๅงๅArrivalMonitorไธญ็yunli'
'ๅฆๆไผ ๅไธบ็ฉบ๏ผๅ่ชๅทฑๅฎไพๅ'
center ็ๆง็ๅๆจ
mYunli ไผ ๅ็yunliๅฏน่ฑก ๅฆๆไผ ๅไธบ็ฉบ๏ผๅ่ชๅทฑๅฎไพๅ
mV6 ็จไบๅๆดพๅทฅๅ็v6web
userName ่ฟๅ็userName
psw ่ฟๅ็psw
'''
if center is not None:
self.center = center
if mYunli is None:
self.yunli = yunli.Yunli(userName,psw)
self.yunli.login()
if self.yunli.testIfLogin() == True:
print('New Yunli success')
elif mYunli is not None:
self.yunli = mYunli
if self.yunli.testIfLogin() == False:
self.yunli.login()
else:
print('Parameter Yunli success')
if mV6 is None:
self.v6 = v6web.V6()
self.v6.loginWithWindow()
if self.v6.testIfLogin():
if self.v6.workTeamList == None:
print('่ทๅ็ญ็ปไธญ...')
self.v6.workTeamList = self.v6.getCenterWorkTeam()
if self.v6.workTeamList is not None:
print('ๅ
ฑ่ทๅ%dไธช็ญ็ป'%len(self.v6.workTeamList))
self.center = self.v6.centerName
else:
print('่ทๅ็ญ็ปๅคฑ่ดฅ...')
self.v6 = None
else:
print('่ทๅ็ญ็ปๅคฑ่ดฅ...')
self.v6 = None
else:
self.v6 = mV6
self.v6.loginWithWindow()
if self.v6.workTeamList == None:
print('่ทๅ็ญ็ปไธญ...')
self.v6.workTeamList = self.v6.getCenterWorkTeam()
if self.v6.workTeamList is not None:
print('ๅ
ฑ่ทๅ%dไธช็ญ็ป'%len(self.v6.workTeamList))
self.center = self.v6.centerName
else:
print('่ทๅ็ญ็ปๅคฑ่ดฅ...')
self.v6 = None
def monitor(self,dataList=None):
'''
not ok
'''
if dataList is not None:
self.firstStartArrivalList=extractListFromActualArrivalList(dataList)
nowArrivalList = []
nowArrivalList = extractListFromActualArrivalList(self.yunli.getActualArrivalList(thisCenter=self.center,actualTimeBegin=self.firstStartMonitorTimeLong,actualTimeEnd=None))
#print(nowArrivalList)
if nowArrivalList is not None and self.firstStartArrivalList is not None:
for each in nowArrivalList:
if isItemInArrivalList(each, self.firstStartArrivalList) == False:
#print(each)
t = threading.Thread(target=self.popWindow)
t.start()
time.sleep(delay)
else:
pass
def startMonitorWithActualArrivalList(self,delaySecond=None,updatePeriodMinute=None):
'''
ArrivalMonitor With ActualArrivalList main loop
delaySecond ๅทๆฐ้ด้
updatePeriodMinute ็ณป็ปๆดๆฐ็ถๆ้ด้
'็ฑไบ่ฐๅบฆ็ณป็ป็ๅ่กจๆฏๆด็นๅทๆฐ๏ผ0ๆ5ๅ้ๆด็นๅทๆฐ๏ผๅ ๆญคๆด็นๆถๆ็ๆง'
'ๅฎ้
ๅฐ่พพๅ่กจ็ๆงไธปๅพช็ฏ'
'''
if delaySecond is None:
delaySecond = 20
if updatePeriodMinute is None:
updatePeriodMinute = 1
#ๅๅงๅๅผๅงๆถ้ด
if self.firstStartMonitorTimeLong is None:
self.firstStartMonitorTimeLong = int(yunli.getCurrentLongTime())
#ๅๅงๅๅ่กจ
if self.firstStartArrivalList is None:
self.firstStartArrivalList=extractListFromActualArrivalList(self.yunli.getActualArrivalList(thisCenter=self.center,actualTimeBegin=self.firstStartMonitorTimeLong-5*60*1000,actualTimeEnd=None))
#print(self.firstStartArrivalList)
self.isMonitorWithActualArrivalList = True
print("start monitor with ActualArrivalList... ")
#็ฌฌไธๆฌกๅฏๅจ่ชๅทฑๅผๅง่ทๅไธๆฌก
nowArrivalList = extractListFromActualArrivalList(self.yunli.getActualArrivalList(thisCenter=self.center,actualTimeBegin=None,actualTimeEnd=None))
if nowArrivalList is not None and self.firstStartArrivalList is not None:
for each in nowArrivalList:
if isItemInArrivalList(each, self.firstStartArrivalList) == False:
self.firstStartArrivalList.append(each)
task = threading.Thread(target=self.popWindow,args=(each,None))
task.start()
time.sleep(1)
#่ทๅๅฎๅ๏ผๅผๅงไธปๅพช็ฏ
while self.isMonitorWithActualArrivalList == True:
nowArrivalList = None
nowTimeLong = int(yunli.getCurrentLongTime())
nowTimeStr = yunli.parseLongTimeToDateString(nowTimeLong)
nowMinute = int(nowTimeStr.split(":")[1])
#print(nowTimeStr)
if nowMinute % updatePeriodMinute == 0:
#print("geting....")
nowArrivalList = extractListFromActualArrivalList(self.yunli.getActualArrivalList(thisCenter=self.center,actualTimeBegin=None,actualTimeEnd=None))
if nowArrivalList is not None and self.firstStartArrivalList is not None:
for each in nowArrivalList:
if isItemInArrivalList(each, self.firstStartArrivalList) == False:
#print(each)
self.firstStartArrivalList.append(each)
task = threading.Thread(target=self.popWindow,args=(each,None))
task.start()
time.sleep(1)
else:
print("start monitor with ActualArrivalList fail")
print("reStart")
self.startMonitorWithActualArrivalList(delaySecond=delaySecond,updatePeriodMinute=updatePeriodMinute)
time.sleep(delaySecond)
else:
time.sleep(delaySecond)
def startMonitorWithPlanArrivalList(self,delaySecond=None):
'''
ArrivalMonitor With ActualPlanList main loop
delaySecond ๅทๆฐ้ด้
'่ฎกๅๅฐ่พพๅ่กจ็ๆงไธปๅพช็ฏ'
'''
if delaySecond is None:
delaySecond = 5
#ๅๅงๅๅ่กจ
if self.firstStartArrivalList is None:
self.firstStartArrivalList = []
self.isMonitorWithPlanArrivalList = True
print("start monitor with PlanArrivalList... ")
while self.isMonitorWithPlanArrivalList == True:
#ๅๅพๆชๅฐ่พพ่ฎกๅๅ่กจ
planList=extractListFromPlanArrivalList(self.yunli.getPlanArrivalList(thisCenter=self.center,planArrTimeBegin=None,planArrTimeEnd=None))
#print(planList)
#print(len(planList))
if planList is not None:
for each in planList:
task = threading.Thread(target=self.popWindowIfArrival,args=(each,))
task.start()
time.sleep(delaySecond)
def startMonitor(self,ActualDelay=None,ActualUpdatePeriod=None,planDelay=None):
t1 = threading.Thread(target=monitor.startMonitorWithActualArrivalList,args=(ActualDelay,ActualUpdatePeriod))
t2 = threading.Thread(target=monitor.startMonitorWithPlanArrivalList,args=(planDelay,))
t1.start()
t2.start()
def stopMonitor(self):
print("stop monitor")
self.isMonitorWithActualArrivalList = False
self.isMonitorWithPlanArrivalList = False
def popWindow(self,eachInfo,scanTime=None):
'''
eachInfo ้่ฆๅผนๆกๆ็คบ็ไปปๅกๅๅงไฟกๆฏ
scanTime ๅฎ้
่ฟๆธฏๆซๆๆถ้ด ๅฆๆไธบNone ๅๅจeachInfoไธญ่ทๅ
'ๆพ็คบๆ็คบๆก'
'''
#print('popWindow start')
app = tk.Tk()
app.wm_attributes('-topmost', 1)
screenwidth, screenheight = app.maxsize()
width = 300
height = 250
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width)/2, (screenheight - height)/2)
app.geometry(size)
app.resizable(width=False, height=False)
app.title("ๅฐ่พพ้ข่ญฆ")
def buttonCallBack():
stringInEntry = entry.get()
if stringInEntry == '':
app.destroy()
return
scanCode = eachInfo['pinCode']
workTeamCode = stringInEntry
if self.v6.assignUnloadTaskByScanCode(scanCode,workTeamCode):
messagebox.showinfo('ๆ็คบ', 'ๅๆดพๆๅ')
#self.popMessageBox('ๆ็คบ', 'ๅๆดพๆๅ')
#win32api.MessageBox(0, "ๅๆดพๆๅ", "ๆ็คบ",win32con.MB_OK)
app.destroy()
#print('ๅๆดพๆๅ')
else:
messagebox.showinfo('ๆ็คบ', 'ๅๆดพๅคฑ่ดฅ')
#win32api.MessageBox(0, "ๅๆดพๅคฑ่ดฅ", "ๆ็คบ",win32con.MB_OK)
#handle = win32gui.FindWindow("ๆ็คบ", None)
#print(handle)
#win32gui.SetForegroundWindow (handle)
#win32gui.SetWindowPos(handle, win32con.HWND_TOPMOST, 0,0,0,0, win32con.SWP_NOMOVE | win32con.SWP_NOACTIVATE| win32con.SWP_NOOWNERZORDER|win32con.SWP_SHOWWINDOW)
#popMessageBox('ๆ็คบ', 'ๅๆดพๅคฑ่ดฅ')
app.destroy()
time.sleep(0.5)
if eachInfo['type'] == 'ๆชๅฐ่พพ':
self.popWindow(eachInfo, scanTime=None)
else:
self.popWindow(eachInfo, scanTime)
#self.popWindowIfArrival(eachItem)
#print('ๅๆดพๅคฑ่ดฅ')
def entryReturnCallBack(event):
buttonCallBack()
label = tk.Label(app,text=eachInfo['laneName']+'ๅฐไบ',font=('้ปไฝ',16),pady=20)
label.pack()
scanTimeStr = ''
if scanTime is not None:
scanTimeStr = yunli.parseLongTimeToDateString(int(scanTime))
else:
scanTimeStr = yunli.parseLongTimeToDateString(int(eachInfo['scanTime']))
textStr = '่ฝฆ็บฟ:'+eachInfo['laneName']+'\n'\
+'ไปปๅกๅ๏ผ'+eachInfo['jobCode']+'\n'\
+'่ๅค็ ๏ผ'+eachInfo['pinCode']+'\n'\
+'่ฝฆ็๏ผ'+eachInfo['licensePlate']+' ' + yunli.getValueInDic(eachInfo,'trailerLicensePlate')+'\n'\
+'ๆๅกๆถ้ด๏ผ'+scanTimeStr
text = tk.Text(app,font=('ๅฎไฝ',10),height=6)
text.insert(tk.INSERT, textStr)
text.config(state=tk.DISABLED)
text.pack()
frame = tk.Frame(app)
label2 = tk.Label(frame,text="ๅๆดพๅธ่ฝฆๅทฅๅ๏ผ")
label2.pack(side=tk.LEFT)
entry = tk.Entry(frame)
entry.bind('<Return>', entryReturnCallBack)
entry.pack(side=tk.RIGHT)
frame.pack(pady=20)
button = tk.Button(app,text='็กฎๅฎ',font=('้ปไฝ',14),command=buttonCallBack)
button.pack()
app.mainloop()
def popWindowIfArrival(self,eachItem):
'''
eachItem ่ฎกๅไฝ่ฟๆช่ฟๆธฏ็ไปปๅก
'่ฎกๅ่ฟๆธฏๅ่กจไธญ ๆช่ฟๆธฏ็ไปปๅก ๅฆๆๆฃๆฅๅฐๅจๆฌ็ซๆ็ฆ็ๆๅกๆ่
ๅๆจๆๅก่ฎฐๅฝ๏ผๅๅผนๆกๆ้'
'''
jobCode = eachItem['jobCode']
recordList = self.yunli.getClientBarCodeRecordListByJobCode(jobCode)
arrivalInfo = getArrivalInfoByRecordList(recordList,self.center)
if arrivalInfo[0]:
eachItem['scanTime'] = arrivalInfo[1]['scanTime']
if arrivalInfo[0] == True and isItemInArrivalList(eachItem, self.firstStartArrivalList) == False:
#print(recordList)
self.firstStartArrivalList.append(eachItem)
self.popWindow(eachItem,arrivalInfo[1]['scanTime'])
if __name__ == "__main__":
#popWindow()
monitor = ArrivalMonitor(center=None,userName='BG269073',psw='123456789Mm')
#monitor.startMonitorWithActualArrivalList(delaySecond=3,updatePeriodMinute=1)
'''
t1 = threading.Thread(target=monitor.startMonitorWithPlanArrivalList)
t2 = threading.Thread(target=monitor.startMonitorWithActualArrivalList)
t1.start()
t2.start()
'''
if monitor.v6 is not None:
t = threading.Thread(target=monitor.startMonitor)
t.start()
| [
"noreply@github.com"
] | veagseass.noreply@github.com |
8133abf5f7c5e4db646298fa6d4710fad9243d12 | ebe68411c9dd36fe86f54325c3b69b2927792c64 | /controlevue.py | 747580e13de47b68105a1eb7e755f393cc991596 | [] | no_license | vernichon/openerp_tools | fc18def18be9e41b31bd0678732e8823ac8b35e4 | 6f88f76e67e85937876e1016fa11441b077ad7cf | refs/heads/master | 2021-01-01T15:50:59.088200 | 2014-03-15T08:46:53 | 2014-03-15T08:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,889 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import xmlrpclib
from optparse import OptionParser
from odf.table import Table, TableColumn, TableRow, TableCell
from odf.opendocument import OpenDocumentSpreadsheet
def addcell(tr, val, lestyle):
if val == 'True':
val = "Oui"
if val == 'False':
val = "Non"
tc = TableCell(stylename=lestyle)
tr.addElement(tc)
p = P(text=val)
tc.addElement(p)
def addcelldate(tr, val, stylename="dcs"):
tc = TableCell(valuetype='date', datevalue=val, stylename=stylename)
TableCell()
tr.addElement(tc)
def cellpos(row, col):
global rows
cells = rows[row].getElementsByType(TableCell)
retour = {}
if col > (len(cells) - 1):
retour['value'] = False
return retour
cell = cells[col]
res = cell.getElementsByType(P)
retour = {}
if len(res) > 0:
res = cell.getElementsByType(P)[0].firstChild
#print dir(res)
if 'data' in dir(res):
retour['value'] = res.data
if 'table:formula' in cell.attributes:
retour['formule'] = cell.getAttribute('formula').replace('of:=', '')
return retour
else:
retour['value'] = False
else:
retour['value'] = False
return retour
calc = OpenDocumentSpreadsheet()
WhiteStyle = Style(name='Blanc', family="table-cell")
WhiteStyle.addElement(TextProperties(fontweight="bold", fontfamily="Arial", fontsize="14pt"))
lestyle = WhiteStyle
dcs = DateStyle(name="dcs", formatsource="AAAA-MM-JJ")
widthshort = Style(name="Wshort", family="table-column")
widthshort.addElement(TableColumnProperties(columnwidth="5cm"))
widthlong = Style(name="Wshort", family="table-column")
widthlong.addElement(TableColumnProperties(columnwidth="15cm"))
calc.automaticstyles.addElement(dcs)
calc.automaticstyles.addElement(WhiteStyle)
calc.automaticstyles.addElement(widthshort)
calc.automaticstyles.addElement(widthlong)
row = 0
parser = OptionParser()
parser.add_option("-d", "--db", dest="db", default='terp', help="Nom de la base ")
parser.add_option("-U", "--user", dest="user", default='terp', help="User Openerp")
parser.add_option("-W", "--passwd", dest="passwd", default='terp', help="mot de passe Openerp ")
parser.add_option("-H", "--host", dest="host", default='127.0.0.1', help="Adresse Serveur")
parser.add_option("-p", "--port", dest="port", default='8069', help="port du serveur")
parser.add_option("-P", "--protocole", dest="protocole", default='https', help="protocole http/https")
(options, args) = parser.parse_args()
user = options.user
pwd = options.passwd
base = options.db
host = options.host
port = options.port
prot = options.protocole
table = Table(name=base)
print
print
"---------------------- " + base + " ------------------------------------------"
print
table.addElement(TableColumn(numbercolumnsrepeated=1, stylename=widthshort))
table.addElement(TableColumn(numbercolumnsrepeated=2, stylename=widthlong))
table.addElement(TableColumn(numbercolumnsrepeated=3, stylename=widthshort))
server = xmlrpclib.ServerProxy(prot + host + ':' + port + '/xmlrpc/common', allow_none=True)
uid = server.login(base, user, password)
sock = xmlrpclib.ServerProxy(prot + host + ':' + port + '/xmlrpc/object', allow_none=True)
root = None
account_ids = sock.execute(base, uid, password, 'account.account', 'search',
[('active', 'in', ('True', 'False')), ('type', '<>', 'view')], 0, 8000, 'code')
trouve = False
for account_id in account_ids:
search_child = sock.execute(base, uid, password, 'account.account', 'search',
[('active', 'in', ('True', 'False')), ('parent_id', '=', account_id)], 0, 1000)
if len(search_child) > 0:
if not trouve:
trouve = True
tr = TableRow()
table.addElement(tr)
addcell(tr, "", lestyle)
addcell(tr, base, lestyle)
account = sock.execute(base, uid, password, 'account.account', 'read', [account_id],
['code', 'name', 'type', 'balance'])
#if account[0]['balance'] <> 0 :
tr = TableRow()
table.addElement(tr)
addcell(tr, "", lestyle)
row += 1
tr = TableRow()
table.addElement(tr)
addcell(tr, account[0]['code'], lestyle)
addcell(tr, account[0]['name'], lestyle)
addcell(tr, account[0]['type'], lestyle)
addcell(tr, len(search_child), lestyle)
addcell(tr, account[0]['balance'], lestyle)
# print account[0]['code'],account[0]['name'].encode('utf-8')," "*(65-len(account[0]['name']))," type : ",account[0]['type'],' enfants : ', len(search_child),account[0]['balance']
for child_id in search_child:
account = sock.execute(base, uid, password, 'account.account', 'read', [child_id],
['code', 'name', 'type', 'balance'])
search_child = sock.execute(base, uid, password, 'account.account', 'search',
[('active', 'in', ('True', 'False')), ('parent_id', '=', child_id)], 0,
1000)
row += 1
tr = TableRow()
table.addElement(tr)
addcell(tr, "", lestyle)
addcell(tr, account[0]['code'], lestyle)
addcell(tr, account[0]['name'], lestyle)
addcell(tr, account[0]['type'], lestyle)
addcell(tr, len(search_child), lestyle)
addcell(tr, account[0]['balance'], lestyle)
#print "\t",account[0]['code'],account[0]['name'].encode('utf-8')," "*(65-len(account[0]['name']))," type : ",account[0]['type'],' enfants : ', len(search_child),account[0]['balance']
if trouve:
calc.spreadsheet.addElement(table)
calc.save('/home/evernichon/controle_vue', True)
| [
"eric@vernichon.fr"
] | eric@vernichon.fr |
f741dcc7d0e9e08af10b0b8fdc7103f4490677e2 | d0c664d31ac71c664101e4984c21fafb77c536f3 | /is103/Project2 (v1.0)/p2q2 -v2 (cluster).py | 62ce483dcf6c03bb798845dff4c3f6b80b443368 | [] | no_license | thao1923/projects | 9562a316e5e07fc3c8169734117ce65466eafe56 | 5c319abc936db6572e0d66af86f3a3d1e6ea6b25 | refs/heads/master | 2023-08-20T22:19:16.370344 | 2021-10-07T18:51:05 | 2021-10-07T18:51:05 | 249,877,492 | 0 | 0 | null | 2021-05-06T20:02:43 | 2020-03-25T03:25:06 | Jupyter Notebook | UTF-8 | Python | false | false | 9,516 | py | # <Your Team ID>
# <Team members' names>
# project 2 Q2
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import pandas as pd
import numpy as np
# replace the content of this function with your own algorithm
# inputs:
# p: min target no. of points team must collect. p>0
# v: 1 (non-cycle) or 2 (cycle)
# flags: 2D list [[flagID, value, x, y], [flagID, value, x, y]....]
# returns:
# A list of n lists. Each "inner list" represents a route. There must be n routes in your answer
def two_opt(arr, flags_dict):
line_arr = generate_line_arr(arr, flags_dict)
next_arr = resolve_lines(arr, line_arr)
while next_arr != arr:
arr = next_arr
line_arr = generate_line_arr(arr, flags_dict)
next_arr = resolve_lines(arr, line_arr)
line_arr = generate_line_arr(arr, flags_dict)
return next_arr
def resolve_lines(result, lineArr):
for i in range(len(lineArr) - 2):
for j in range(i + 2, len(lineArr)):
if do_lines_intersect(lineArr[i], lineArr[j]):
temp = result[:i + 1]
temp += result[j:i : -1]
temp += result[j + 1:]
return temp
return result
def has_intersect_lines(lineArr):
for i in range(0, len(lineArr) - 2):
for j in range(i + 2, len(lineArr)):
if do_lines_intersect(lineArr[i], lineArr[j]):
return True
return False
def generate_line_arr(result, flags_dict):
lineArr = []
for i in range(0, len(result) - 1):
lineArr.append(Line(Point(x = flags_dict[result[i]][2], y = flags_dict[result[i]][3] ), Point(x = flags_dict[result[i + 1]][2], y = flags_dict[result[i + 1]][3])))
return lineArr
class Point:
def __init__(self, x = 0, y = 0, v = None):
if v != None:
self.x = v.x
self.y = v.y
else:
self.x = x
self.y = y
def get_x(self):
return self.x
def get_y(self):
return self.y
def __repr__(self):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
class Line:
def __init__(self, p1, p2):
self.point1 = p1
self.point2 = p2
def get_first(self):
return self.point1
def get_second(self):
return self.point2
def __repr__(self):
return 'p1: ' + str(self.point1) + ', p2: ' + str(self.point2)
def cross_product(a, b):
return a.x * b.y - b.x * a.y
def is_point_on_line(line, point):
tempLine = Line(Point(0, 0), Point(line.get_second().x - line.get_first().x, line.get_second().y - line.get_first().y))
tempPoint = Point(point.x - line.get_first().x, point.y - line.get_first().y)
r = cross_product(tempLine.get_second(), tempPoint)
return abs(r) < 0.000001
def is_point_right_of_line(line, point):
tempLine = Line(Point(0, 0), Point(line.get_second().x - line.get_first().x, line.get_second().y - line.get_first().y));
tempPoint = Point(point.x - line.get_first().x, point.y - line.get_first().y);
return cross_product(tempLine.get_second(), tempPoint) < 0
def line_segment_touches_or_crosses_line(line1, line2):
return (is_point_right_of_line(line1, line2.get_first()) ^ is_point_right_of_line(line1, line2.get_second()))
def do_lines_intersect(line1, line2):
return line_segment_touches_or_crosses_line(line1, line2) and line_segment_touches_or_crosses_line(line2, line1)
def get_distance(node_A, node_B):
return ((node_A[2] - node_B[2]) ** 2 + (node_A[3] - node_B[3]) ** 2) ** 0.5
def generate_flags_dict(flags_list):
d = {}
for item in flags_list:
# flagID, points, x, y
d[item[0]] = [item[0], int(item[1]), float(item[2]), float(item[3])]
return d
def get_dist_and_points1(your_route, flags_dict, v):
# check for syntax error first
dist = 0
points = 0
start_node = ["Start", 0, 0, 0] # starting point (0, 0)
last_node = start_node
for flagID in your_route:
curr_node = flags_dict[flagID]
dist_to_curr_node = get_distance(last_node, curr_node)
dist += dist_to_curr_node
points += curr_node[1]
last_node = curr_node
# to go back to SP?
if v == 2: # cycle back to SP
dist += get_distance(last_node, start_node)
return dist, points # no error
def get_dist_and_points2(your_routes, flags_dict, v, n):
# need to call get_dist_and_points_q1 for every route in your_routes
tot_dist = 0
tot_points = 0
for route in your_routes:
curr_dist, curr_points = get_dist_and_points1(route, flags_dict, v)
tot_dist += curr_dist
tot_points += curr_points
return tot_dist, tot_points # all OK
def get_most_suitable_flag(last_node, flags, touched_flags, flags_dict):
lst = []
for flag in flags:
if flag[0] not in touched_flags:
distance = get_distance(flags_dict[last_node], flags_dict[flag[0]])
fitness = float(flag[1]) / distance
lst.append([flag , fitness])
lst.sort(key= lambda x: -x[1])
return [lst[0][0][0], lst[0][1]]
# def get_most_suitable_flag(route, flags, touched_flags, flags_dict, v):
# lst = []
# curr_dist, curr_point = get_dist_and_points1(route, flags_dict, v)
# for flag in flags:
# if flag[0] not in touched_flags:
# temp = route[:]
# temp.append(flag[0])
# distance, p = get_dist_and_points1(temp, flags_dict, v)
# fitness = float(flag[1]) / (distance - curr_dist)
# lst.append([flag , fitness])
# lst.sort(key= lambda x: -x[1])
# return [lst[0][0][0], lst[0][1]]
# def get_best_position(route, flags, v, flags_dict, touched_flags):
# flags_at_best_position = []
# curr_dist, curr_point = get_dist_and_points1(route, flags_dict, v)
# for flag in flags:
# if flag[0] not in touched_flags:
# best_fitness_score = 0
# index = None
# pointt = float(flag[1])
# for i in range(len(route)):
# temp_route = route[:]
# temp_route.insert(i, flag[0])
# dist, point = get_dist_and_points1(temp_route, flags_dict, v)
# fitness_score = pointt / (dist - curr_dist)
# if fitness_score > best_fitness_score:
# best_fitness_score = fitness_score
# index = i
# flags_at_best_position.append([flag, index, best_fitness_score])
# flags_at_best_position.sort(key= lambda x: -x[2])
# insert_flag = flags_at_best_position[0][0]
# insert_index = flags_at_best_position[0][1]
# flags.remove(insert_flag)
# return insert_flag[0], insert_index, flags
# def cluster_flags(flags,n):
# df = pd.DataFrame(flags, columns = ['flagID', "point", "X", "Y"])
# df1 = df.loc[:,"X":"Y"]
# model1 = KMeans(n_clusters = n, random_state = 99)
# model1.fit(df1)
# df['player'] = model1.labels_
# clustered_flags = df.values.tolist()
# return clustered_flags
# def choose_cluster(flags, flags_dict):
# temp = []
# for flag in flags:
# dist = get_distance([0,0,0,0], flags_dict[flag[0]])
# point = float(flag[1])
# temp.append([flag[0], point, dist])
# data = pd.DataFrame(temp, columns = ['flagID', "point", "dist"])
# prepared_df = data[['point', 'dist']]
# sil = []
# for k in range(2, 11):
# kmeans = KMeans(n_clusters = k).fit(prepared_df)
# labels = kmeans.labels_
# sil.append([k, silhouette_score(prepared_df, labels)])
# sil.sort(key=lambda x: -x[1])
# n_clusters = sil[0][0]
# model = KMeans(n_clusters = n_clusters, random_state = 99)
# model.fit(prepared_df)
# data['cluster'] = model.labels_
# a = data.groupby('cluster')['dist'].max().values.tolist()
# min_dist = a[0]
# chosen = 0
# for i in range(1, len(a)):
# if a[i] < min_dist:
# min_dist = a[i]
# chosen = i
# return_flags = data[data['cluster'] == i]['flagID'].to_list()
# return return_flags
def get_routes(p, v, flags, n):
# code here
flags_dict = generate_flags_dict(flags)
temp = []
for flag in flags:
dist = get_distance([0,0,0,0], flags_dict[flag[0]])
fitness = float(flag[1]) / dist
temp.append([flag, fitness])
temp.sort(key= lambda x: -x[1])
comparing = []
for i in range(0, n*10, n):
return_lst = [[] for i in range(n)]
fitness_lst = [[] for i in range(n)]
touched_flags = []
j = i
for k in range(len(return_lst)):
last_node = temp[k][0][0]
return_lst[k].append(last_node)
touched_flags.append(last_node)
fitness_lst[k] = [k] + get_most_suitable_flag(last_node, flags, touched_flags, flags_dict)
j += 1
tot_dist, tot_points = get_dist_and_points2(return_lst, flags_dict, v, n)
while tot_points < p:
fitness_lst.sort(key= lambda x: -x[2])
insert_flag = fitness_lst[0][1]
insert_route = fitness_lst[0][0]
if insert_flag not in touched_flags:
return_lst[insert_route].append(insert_flag)
touched_flags.append(insert_flag)
tot_dist, tot_points = get_dist_and_points2(return_lst, flags_dict, v, n)
if tot_points >= p:
break
last_node = return_lst[insert_route][-1]
fitness_lst[0] = [insert_route] + get_most_suitable_flag(last_node, flags, touched_flags, flags_dict)
comparing.append([return_lst, tot_dist])
comparing.sort(key= lambda x: x[1])
return_lst = comparing[0][0]
for lst in return_lst:
lst = two_opt(lst, flags_dict)
return return_lst
| [
"ptbui.2018@sis.smu.edu.sg"
] | ptbui.2018@sis.smu.edu.sg |
d89dd7b8b51d948c8c80287f46f58e498aa2275c | 1f2625b09a851b43f51c6dcbacf4826c68416086 | /rooms/migrations/0004_room_slug.py | 14fe75938b7728178773c4252d3b4c92d072880d | [] | no_license | wbrefvem/conference-room-api | eee0aee0f5f90be799c7f4cf635f234a9fd0e9e3 | 5195db678d361fd6ee0224db65c64051d88e3f82 | refs/heads/master | 2021-01-10T02:14:01.592882 | 2018-02-02T03:55:07 | 2018-02-02T03:55:07 | 36,463,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rooms', '0003_building_slug'),
]
operations = [
migrations.AddField(
model_name='room',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
]
| [
"wbrefvem@gmail.com"
] | wbrefvem@gmail.com |
244cf370ca24c26d0db9a20a8aaa4b735f4021b2 | 8a752dc77828ecbfa6e0c52c382fac8fd0269429 | /neteasenews/run.py | 6f0f73f01f11ae79ce2a2115996ea26549b3057c | [] | no_license | Dpuntu/scrapy | 3f908585b281b2bf78a93ed690b3b7cce359c9b1 | b90f118bf648a91238b9cff0c4b1baaa62c0e721 | refs/heads/master | 2021-04-30T13:58:31.560485 | 2018-02-12T06:11:46 | 2018-02-12T06:11:46 | 121,205,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | # -*- coding: utf-8 -*-
from scrapy import cmdline
cmdline.execute("scrapy crawl neteasenews".split()) | [
"fangmingxing@kezaihui.com"
] | fangmingxing@kezaihui.com |
97bf61ba48700eba44b7c4c4a29727d8cf717721 | 22ed4ce6a56a1147332d6fb28638482756f7ed98 | /mybook/settings.py | 420e4ee18c4a9aa0ef29d74d7abea65307487075 | [] | no_license | taikomegane/django-practice-mybook | a8503dfb03473e2e975f98dff88048392eca21c4 | d99f87d2d879f214ba183f4abe57b334346109f1 | refs/heads/master | 2021-05-06T18:19:37.408625 | 2017-11-25T04:09:48 | 2017-11-25T04:09:48 | 111,974,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | """
Django settings for mybook project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zgij)b*!ou)%42^br-ch@dgi-#u1c=*uq9dn)8ut31p8+07)^c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrapform',
'cms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mybook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mybook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# ้็ใใกใคใซใๅ
ฑ้ใง็ฝฎใ
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "mybook/static"),
) | [
"taiko.megane.gobou@gmail.com"
] | taiko.megane.gobou@gmail.com |
8d86a2ae1826be7472e7ce97855909581605f18c | 7d34c85937d080b08ad6a6ccd08f978f59b3aa04 | /goal_app/migrations/0004_auto_20180414_0105.py | f94e01c59dce9343cde7c36f48da61873e656c71 | [] | no_license | homeahmed2012/life-planner | ba93b19553adceb6c16656d802e3b66574548307 | 8ac7cf4cb4cca930e2fd9ece37473ad4841530de | refs/heads/master | 2022-12-13T15:34:01.475678 | 2019-09-05T13:01:34 | 2019-09-05T13:01:34 | 128,756,085 | 0 | 0 | null | 2022-05-26T21:10:56 | 2018-04-09T10:41:47 | JavaScript | UTF-8 | Python | false | false | 1,041 | py | # Generated by Django 2.0.2 on 2018-04-14 01:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goal_app', '0003_auto_20180414_0038'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='goal_id',
new_name='goal',
),
migrations.RenameField(
model_name='goal',
old_name='parent_id',
new_name='parent',
),
migrations.RenameField(
model_name='goal',
old_name='user_id',
new_name='user',
),
migrations.RenameField(
model_name='task',
old_name='day_id',
new_name='day',
),
migrations.RenameField(
model_name='task',
old_name='goal_id',
new_name='goal',
),
migrations.RenameField(
model_name='task',
old_name='user_id',
new_name='user',
),
]
| [
"ahmed_mohamed_3@azhar.edu.eg"
] | ahmed_mohamed_3@azhar.edu.eg |
f7a396551040d29bf25b4c493b8b32db6133418e | 65ccbf98d87f8e3c61e5df53aba67b542a360499 | /case/migrations/0004_auto_20201214_2234.py | d3252f59b8bd801f90612d3978065bbbb6776a17 | [] | no_license | CMKJwebsite/CMKJ_website | d043499167e3b94cf01af50327e6c3c4c1d774f1 | f5fc8aee606d846427b8df88e85d9b1812702cc2 | refs/heads/master | 2023-02-12T07:32:04.960277 | 2021-01-05T13:29:07 | 2021-01-05T13:29:07 | 318,972,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2020-12-14 22:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('case', '0003_auto_20201213_1722'),
]
operations = [
migrations.AlterField(
model_name='case',
name='c_picture',
field=models.ImageField(upload_to='project_images/', verbose_name='้กน็ฎ็
ง็'),
),
]
| [
"onlyweiyi852951@outlook.com"
] | onlyweiyi852951@outlook.com |
00766e298a33dcae5f92d7859cc87d876ccca112 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2463/60782/304860.py | a0914fcd8b479f7c6f75f9999f2477a83b960f6a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | """
้ข็ฎๆ่ฟฐ
็ปๅฎไธไธชๅทฒๆ็
งๅๅบๆๅ ็ๆๅบๆฐ็ป๏ผๆพๅฐไธคไธชๆฐไฝฟๅพๅฎไปฌ็ธๅ ไนๅ็ญไบ็ฎๆ ๆฐใ
ๅฝๆฐๅบ่ฏฅ่ฟๅ่ฟไธคไธชไธๆ ๅผ index1 ๅ index2๏ผๅ
ถไธญ index1 ๅฟ
้กปๅฐไบ index2ใ
่ฏดๆ:
่ฟๅ็ไธๆ ๅผ๏ผindex1 ๅ index2๏ผไธๆฏไป้ถๅผๅง็ใ
ไฝ ๅฏไปฅๅ่ฎพๆฏไธช่พๅ
ฅๅชๅฏนๅบๅฏไธ็็ญๆก๏ผ่ไธไฝ ไธๅฏไปฅ้ๅคไฝฟ็จ็ธๅ็ๅ
็ด ใ
"""
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
dic = {}
li = []
for i in range(len(numbers)):
if numbers[i] in dic.keys():
# ๅฐๅๅงๅผๅๅทฎๅผ็ไธๆ ๅๅซๆทปๅ ๅฐliไธญ
li.append(dic[numbers[i]] + 1) # ๅๅงๅผ็ไธๆ
li.append(i + 1) # ๅทฎๅผ็ไธๆ
return li
# ๅฐๆฏไธชๅผ็ๅทฎๅผๅๅฏนๅบ็ไธๆ , ไฟๅญๅจๅญๅ
ธไธญ
dic[target - numbers[i]] = i
return None
s = Solution()
print(s.twoSum(list(map(int, input().split(", "))), int(input())))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
72482de2ffb3429bf4106a937ef0d209a699fabb | 9441d42c40a628111cf8d72734f68c35a4bf0fdd | /src/hurricane/webrpc/api/user.py | de62ee580406d82ae1410909b93ea821310bd0de | [] | no_license | johnson-li/hurricane | 2057557e146af8b0231586b95a6aea4ff5fee0cb | 5a1c6cf6c9f38e857ffd732d9fb58ee9a2b5aac9 | refs/heads/master | 2020-09-28T00:26:36.768353 | 2016-10-05T14:33:07 | 2016-10-05T14:33:31 | 67,291,355 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | def get_user(client, user_id):
user_id = int(user_id)
return client.get_user(user_id=user_id)
def create_user(client, name, email, password, bio=''):
return client.create_user(name=name, email=email, password=password, bio=bio)
def update_user(client, user_id, name=None, email=None, password=None, bio=None):
return client.update_user(user_id=user_id, name=name, password=password, bio=bio)
| [
"johnsonli1993@163.com"
] | johnsonli1993@163.com |
a9003fdff24c89d3d9fa50bcfc64c24a0cc79586 | a24a03163cf643249922edc29bc2086517615e53 | /thewema/urls.py | 7bcf11a899a1294d7c8cbb12dff05605f0faab60 | [] | no_license | ErickMwazonga/The-Wema-Academy | 165203e8e337459f6bae4f7178b3bfad715f052a | 61f9b778e423326d8dbd2c04f2dd6ce19e15e2a9 | refs/heads/master | 2021-01-19T14:22:00.568982 | 2017-04-13T10:41:06 | 2017-04-13T10:41:06 | 88,153,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,004 | py | """wema URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
app_name = 'thewema'
urlpatterns = [
# url(r'^$', views.index_view, name='index'),
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^students$', views.StudentListView.as_view(), name='students'),
url(r'^student$', views.StudentCreateView.as_view(), name='student'),
url(r'^student/(?P<pk>[0-9]+)/$', views.StudentDetailView.as_view(), name='student_detail'),
url(r'^class$', views.StudentClassCreateView.as_view(), name='sclass'),
url(r'^classes$', views.StudentClassListView.as_view(), name='classes'),
url(r'^exam$', views.ExamCreateView.as_view(), name='exam'),
url(r'^score$', views.ScoreCreateView.as_view(), name='score'),
url(r'^scores$', views.ScoreListView.as_view(), name='scores'),
url(r'^scores/(?P<pk>[0-9]+)/$', views.ScoreDetailView.as_view(), name='score_detail'),
url(r'^feedback$', views.FeedbackCreateView.as_view(), name='feedback'),
url(r'^login$', auth_views.login, {
'template_name': 'thewema/login.html',
'authentication_form': AuthenticationForm
},
name='login'
),
url(r'^logout/$', auth_views.logout_then_login, {'login_url': 'thewema:login'}, name='logout'),
]
| [
"erickmwazonga@gmail.com"
] | erickmwazonga@gmail.com |
8be965fe3d91be2e4660755b7aac07fbd420f966 | 8b5f58a8c21cea7d6f4197eeb24fe7d0b836d280 | /src/blog/migrations/0010_auto_20190604_0930.py | 375813330ddcb30a4bd0dd2ebe580ce410c35dbb | [] | no_license | Sabrigan/monsitedjango | c775ac5342f8bb1dc2fd6be5768e56d6f416a5c9 | ba4dbd67e29ce6b4b97c43d2ba2bd9f33b5c53ab | refs/heads/master | 2020-05-31T03:05:16.841475 | 2019-07-02T17:12:19 | 2019-07-02T17:12:19 | 190,073,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | # Generated by Django 2.1.7 on 2019-06-04 09:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20190424_1947'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'verbose_name_plural': 'PostS'},
),
]
| [
"pierre.dauphin@me.com"
] | pierre.dauphin@me.com |
8041d74cf42e2fd29ead13232244b00460b34b16 | acd57cdb216c10925800d31015ad817ecd17fe5e | /userQuerry/serializers.py | de8731b88fef64fd033f22cdd4bca917bc9ee941 | [] | no_license | vipinbharti121/serverAPIAI | 2521a4d3ee90f083b34412b96fda00fcb14fd5c3 | 099c1896aad2226666626205e7e703a31c144861 | refs/heads/master | 2021-01-22T18:01:43.821693 | 2017-04-06T05:54:21 | 2017-04-06T05:54:21 | 86,451,370 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from rest_framework import serializers
from .models import UserQuerries
class UserQuerriesSerializer(serializers.ModelSerializer):
class Meta:
model = UserQuerries
fields = '__all__'
| [
"vipinbharti121@gmail.com"
] | vipinbharti121@gmail.com |
6fd03bbe4f4ec5a79c35fd747a1a5b3594f217c1 | 453ce6b8e35ec6993ce40a2793218e629c72bafa | /pymarc/record.py | 614af2d336877030fa2da5c1ab388799820ce086 | [] | no_license | MagnusEnger/Libriotech-GAE | 0eb717112ae5b50675d6b2fccd7f5d691ce8284d | 5d3c2167249d7f76482f3bb374a3d87ac2b78e6b | refs/heads/master | 2020-12-24T14:52:36.455670 | 2010-02-23T10:49:20 | 2010-02-23T10:49:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,495 | py | import re
from pymarc.exceptions import BaseAddressInvalid, RecordLeaderInvalid, \
BaseAddressNotFound, RecordDirectoryInvalid, NoFieldsFound
from pymarc.constants import LEADER_LEN, DIRECTORY_ENTRY_LEN, END_OF_RECORD
from pymarc.field import Field, SUBFIELD_INDICATOR, END_OF_FIELD, \
map_marc8_field
from pymarc.marc8 import marc8_to_unicode
isbn_regex = re.compile(r'([0-9\-]+)')
class Record(object):
"""
A class for representing a MARC record. Each Record object is made up of
multiple Field objects. You'll probably want to look at the docs for Field
to see how to fully use a Record object.
Basic usage:
field = Field(
tag = '245',
indicators = ['0','1'],
subfields = [
'a', 'The pragmatic programmer : ',
'b', 'from journeyman to master /',
'c', 'Andrew Hunt, David Thomas.',
])
record.add_field(field)
Or creating a record from a chunk of MARC in transmission format:
record = Record(data=chunk)
Or getting a record as serialized MARC21.
raw = record.as_marc()
You'll normally want to use a MARCReader object to iterate through
MARC records in a file.
"""
def __init__(self, data='', to_unicode=False, force_utf8=False):
self.leader = (' '*10) + '22' + (' '*8) + '4500'
self.fields = list()
self.pos = 0
if len(data) > 0:
self.decode_marc(data, to_unicode=to_unicode,
force_utf8=force_utf8)
def __str__(self):
"""
In a string context a Record object will return a prettified version
of the record in MARCMaker format. See the docstring for Field.__str__
for more information.
"""
# join is significantly faster than concatenation
text_list = ['=LDR %s' % self.leader]
text_list.extend([str(field) for field in self.fields])
text = '\n'.join(text_list) + '\n'
return text
def __getitem__(self, tag):
"""
Allows a shorthand lookup by tag:
record['245']
"""
fields = self.get_fields(tag)
if len(fields) > 0:
return fields[0]
return None
def __iter__(self):
self.__pos = 0
return self
def next(self):
if self.__pos >= len(self.fields):
raise StopIteration
self.__pos += 1
return self.fields[self.__pos - 1]
def add_field(self, *fields):
"""
add_field() will add pymarc.Field objects to a Record object.
Optionally you can pass in multiple fields.
"""
self.fields.extend(fields)
def get_fields(self, *args):
"""
When passed a tag ('245'), get_fields() will return a list of all the
fields in a record with a given tag.
title = record.get_fields('245')
If no fields with the specified
tag are found then an empty list is returned. If you are interested
in more than one tag you can pass in a list:
subjects = record.get_fields('600', '610', '650')
If no tag is passed in to fields() a list of all the fields will be
returned.
"""
if (len(args) == 0):
return self.fields
return [f for f in self.fields if f.tag in args]
def decode_marc(self, marc, to_unicode=False, force_utf8=False):
"""
decode_marc() accepts a MARC record in transmission format as a
a string argument, and will populate the object based on the data
found. The Record constructor actually uses decode_marc() behind
the scenes when you pass in a chunk of MARC data to it.
"""
# extract record leader
self.leader = marc[0:LEADER_LEN]
if len(self.leader) != LEADER_LEN:
raise RecordLeaderInvalid
# extract the byte offset where the record data starts
base_address = int(marc[12:17])
if base_address <= 0:
raise BaseAddressNotFound
if base_address >= len(marc):
raise BaseAddressInvalid
# extract directory, base_address-1 is used since the
# director ends with an END_OF_FIELD byte
directory = marc[LEADER_LEN:base_address-1]
# determine the number of fields in record
if len(directory) % DIRECTORY_ENTRY_LEN != 0:
raise RecordDirectoryInvalid
field_total = len(directory) / DIRECTORY_ENTRY_LEN
# add fields to our record using directory offsets
field_count = 0
while field_count < field_total:
entry_start = field_count * DIRECTORY_ENTRY_LEN
entry_end = entry_start + DIRECTORY_ENTRY_LEN
entry = directory[entry_start:entry_end]
entry_tag = entry[0:3]
entry_length = int(entry[3:7])
entry_offset = int(entry[7:12])
entry_data = marc[base_address + entry_offset :
base_address + entry_offset + entry_length - 1]
# assume controlfields are numeric; replicates ruby-marc behavior
if entry_tag < '010' and entry_tag.isdigit():
field = Field(tag=entry_tag, data=entry_data)
else:
subfields = list()
subs = entry_data.split(SUBFIELD_INDICATOR)
first_indicator = subs[0][0]
second_indicator = subs[0][1]
for subfield in subs[1:]:
if len(subfield) == 0:
continue
code = subfield[0]
data = subfield[1:]
if to_unicode:
if self.leader[9] == 'a' or force_utf8:
data = data.decode('utf-8')
else:
data = marc8_to_unicode(data)
subfields.append(code)
subfields.append(data)
field = Field(
tag = entry_tag,
indicators = [first_indicator, second_indicator],
subfields = subfields,
)
self.add_field(field)
field_count += 1
if field_count == 0:
raise NoFieldsFound
def as_marc(self):
"""
returns the record serialized as MARC21
"""
fields = ''
directory = ''
offset = 0
# build the directory
# each element of the directory includes the tag, the byte length of
# the field and the offset from the base address where the field data
# can be found
for field in self.fields:
field_data = field.as_marc().encode('utf-8')
fields += field_data
if field.tag.isdigit():
directory += '%03d' % int(field.tag)
else:
directory += '%03s' % field.tag
directory += '%04d%05d' % (len(field_data), offset)
offset += len(field_data)
# directory ends with an end of field
directory += END_OF_FIELD
# field data ends with an end of record
fields += END_OF_RECORD
# the base address where the directory ends and the field data begins
base_address = LEADER_LEN + len(directory)
# figure out the length of the record
record_length = base_address + len(fields)
# update the leader with the current record length and base address
# the lengths are fixed width and zero padded
self.leader = '%05d%s%05d%s' % \
(record_length, self.leader[5:12], base_address, self.leader[17:])
# return the encoded record
return self.leader + directory + fields
# alias for backwards compatability
as_marc21 = as_marc
def title(self):
"""
Returns the title of the record (245 $a an $b).
"""
try:
title = self['245']['a']
except TypeError:
title = None
if title:
try:
title += self['245']['b']
except TypeError:
pass
return title
def isbn(self):
"""
Returns the first ISBN in the record or None if one is not
present. The returned ISBN will be all numberic; so dashes and
extraneous information will be automatically removed. If you need
this information you'll want to look directly at the 020 field,
e.g. record['020']['a']
"""
try:
isbn_number = self['020']['a']
match = isbn_regex.search(isbn_number)
if match:
return match.group(1).replace('-', '')
except TypeError:
# ISBN not set
pass
return None
def author(self):
if self['100']:
return self['100'].format_field()
elif self['110']:
return self['110'].format_field()
elif self['111']:
return self['111'].format_field()
return None
def uniformtitle(self):
if self['130']:
return self['130'].format_field()
elif self['240']:
return self['240'].format_field()
return None
def subjects(self):
"""
Note: Fields 690-699 are considered "local" added entry fields but
occur with some frequency in OCLC and RLIN records.
"""
subjlist = self.get_fields('600', '610', '611', '630', '648', '650',
'651', '653', '654', '655', '656', '657', '658', '662', '690',
'691', '696', '697', '698', '699')
return subjlist
def addedentries(self):
"""
Note: Fields 790-799 are considered "local" added entry fields but
occur with some frequency in OCLC and RLIN records.
"""
aelist = self.get_fields('700', '710', '711', '720', '730', '740',
'752', '753', '754', '790', '791', '792', '793', '796', '797',
'798', '799')
return aelist
def location(self):
loc = self.get_fields('852')
return loc
def notes(self):
"""
Return all 5xx fields in an array.
"""
notelist = self.get_fields('500', '501', '502', '504', '505',
'506', '507', '508', '510', '511', '513', '514', '515',
'516', '518', '520', '521', '522', '524', '525', '526',
'530', '533', '534', '535', '536', '538', '540', '541',
'544', '545', '546', '547', '550', '552', '555', '556',
'561', '562', '563', '565', '567', '580', '581', '583',
'584', '585', '586', '590', '591', '592', '593', '594',
'595', '596', '597', '598', '599')
return notelist
def physicaldescription(self):
"""
Return all 300 fields in an array
"""
return self.get_fields('300')
def publisher(self):
if self['260']:
return self['260']['b']
return None
def pubyear(self):
if self['260']:
return self['260']['c']
return None
def map_marc8_record(r):
r.fields = map(map_marc8_field, r.fields)
l = list(r.leader)
l[9] = 'a' # see http://www.loc.gov/marc/specifications/speccharucs.html
r.leader = "".join(l)
return r
| [
"magnus@enger.priv.no"
] | magnus@enger.priv.no |
ad7f049e66ec545ce2f1dbbd918afc7e7508ee08 | 5daef14ee2ff9dc19f5f9c21366f719fd81765e0 | /IBMsite/mysite/views/sign_views.py | dfb123acb833043548b995a376c0711a8ef76c54 | [] | no_license | winter-eighteen/IBMclubBBS | 5427e1ed419c0138aaaca5d86fb3b04911fe19ad | 761d3c83ab14ac60d926b9949da399d32f260b18 | refs/heads/master | 2021-05-10T21:07:51.620283 | 2017-09-19T10:53:58 | 2017-09-19T10:53:58 | 118,216,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | #-*-coding:utf-8-*-
from django.shortcuts import render,redirect
from django.http import HttpResponse
from mysite.models import Sign_Model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.timezone import now
from django.views.decorators.csrf import csrf_exempt
import json
@login_required(login_url='/mysite/login/')
@csrf_exempt
def sign(request):
get_email = request.user.username
print("in sign")
if request.is_ajax():
print("in ajax")
is_exists = Sign_Model.objects.filter(email=get_email).exists()
if not is_exists:
Sign_Model.objects.create(email=get_email,cost=5,last_sign=now)
return HttpResponse(json.dumps({"data":"post_success"}))
obj = Sign_Model.objects.get(email=get_email)
if obj.last_sign.date() != now().date():
obj.cost = obj.cost + 5
obj.last_sign = now()
obj.save()
return HttpResponse(json.dumps({"data":"post_success"}))
else:
return HttpResponse(json.dumps({"data":"post_again"}))
#return redirect(request,'../home_page/')
| [
"632991940@qq.com"
] | 632991940@qq.com |
ebab21e6824750e51b35448b7e8d3f1cfec4b09b | 05d460acd3e15c42c4b1ddd2eead83dc48f5312f | /Web/Spider/spider_prototype.py | cf490101d2c8dfca50d18a1f10d859bbf9b4697e | [
"MIT"
] | permissive | OblackatO/OffensiveSecurity | 992e839c8d966eec0da5f89218772b31f249f18b | 4b2c5f981cf41cc463791f279c3ad0115c28d738 | refs/heads/master | 2021-06-26T16:31:31.973911 | 2020-09-26T11:42:13 | 2020-09-26T11:42:13 | 132,665,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | from bs4 import BeautifulSoup
import urllib.request as ur
import sys
urls = []
def get_links(website):
try:
print('im parsing this site',website)
web_page = ur.urlopen(website).read()
bsi = BeautifulSoup(web_page,'lxml')
if bsi == NoneType :
pass
else :
for link in bsi.find_all('a'):
link = link.get('href')
if 'http' in link[:4] :
urls.append(link)
if '/' in link[:2] :
urls.append(website+link)
except Exception as e:
print(e)
pass
get_links(sys.argv[1])
"""
for item in urls :
get_links(item)
print(urls)
urls2 = set(urls)
print('with set,',urls2)
"""
"""
READ_ME :
This script is supposed to be a spider. It would be useful to
parse all links of a website and the links provided by each link,
till no more links are found. I tried to use a while loop and adding
/removing links from a list, and while there were elements on this
list the while loop wouldn't stop. Anyway this could really take a lot of time
, multithreading or multiprocessing should be used. I let the script
able to only make a parse for links on the webpage,without looking for more
links in the found links. Do not forget to organize the list to remove
repeated items. See set() function.
""" | [
"pegom0896@gmail.com"
] | pegom0896@gmail.com |
eef43fcb87220118201d19d79fc0379b36a3bf3d | 7e9745ba5e3473cd01696ae31162f166ee5cf830 | /time_display/apps/timedisplay/views.py | cd483b7dc69776b399a58142246eb9bfe523249c | [] | no_license | Manzurka/Coding-Dojo-projects-Python-Django- | 34bb07e2d08b72f8c6a707c749edb370f2678cf0 | 0caeaf5d1f949b9309176506b6d72a16ff6ea514 | refs/heads/master | 2020-03-19T00:51:56.592622 | 2018-05-30T23:19:05 | 2018-05-30T23:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse, redirect
from time import gmtime, strftime
# Create your views here.
def index(request):
context = {
"time": strftime("%Y-%m-%d %H:%M %p", gmtime())
}
return render(request,'timedisplay/index.html', context) | [
"manzuraz@amazon.com"
] | manzuraz@amazon.com |
97407bac2fdf55befc9a5004ede3f53c605231fc | 4064646d8f333ef2aa1b9dc38e7a61b660188f11 | /python_work/simple_messages.py | f80b752829cafa23b6d1262473e9a7b8b19ee0ea | [] | no_license | OldRice42/PythonCrashCourse2e | 0f2926412e4cb1cc5f7e848b028c04afdf7487b9 | ee6baa2cb54a63e2deb913ce91c2ac75f0400ff0 | refs/heads/master | 2022-12-23T16:34:52.596003 | 2020-10-03T20:44:13 | 2020-10-03T20:44:13 | 298,918,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | fiz = "buzz"
print(fiz)
buzz = "fizz"
print(buzz) | [
"jessica.hylton42@gmail.com"
] | jessica.hylton42@gmail.com |
cd853e8871e7c4001d4bb369d8d185c0a013f170 | 3be1e2f1073e96ce86b6bacc178ea92a890b8f42 | /aplicacion/reporte/bd/db.py | d5548c78af1dccf6d5f3c11ad9e15b86c92eed7b | [] | no_license | eduardouio/sln_control | 48ac3d3bfe3aaa035d8aabffe3d8fd4e7d93bad2 | 2b1d2993dcf1145329f55f04274813fad2939c82 | refs/heads/master | 2020-04-30T20:41:07.429571 | 2013-10-22T19:51:58 | 2013-10-22T19:51:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,142 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Version 1.0
# Autor Eduardo Villota <eduardouio@hotmail.com> <@eduardouio>
# Package Conexion a la base de datos
# File DB.py
# Ubicacion reporte/basedatos/DB.py
# Copyright (c) 2012 Sรณlidos y Lodos Nacionales S.A. <http://sln-ec.com> <info@sln-ec.com>
from PyQt4 import QtCore, QtSql
import conn
class Model(object):
''' Modelo de datos, lo errores ocurridos en la capa son mostrados por lastError()
si un metodo no puede efectuar una accion retorna falso.
Los tipos de error a soportar son errores de conexiรณn y errores en consultas sql'''
def __init__(self):
'''Inicia la conexiรณn al servidor'''
self.Conn = conn.conectar()
def __consultDb(self,sql):
'''Ejecuta una consulta en la base de datos, las consultas son preparadas por el metodo
que invoca a este metodo'''
if (self.Conn):
sql.exec_()
if not sql.isActive():
QtGui.QMessageBox.warning(None,
QtGui.qApp.tr('Ocurriรณ un Error al enviar o recibir informaciรณn'),
QtGui.qApp.tr('La solicitud no se completรณ correctamente, intรฉntelo de nuevo \n'
'El servidor dice... \n' + sql.lastError().databaseText() +
'\n\nSi el problema continรบa comunรญquese con eduardouio7@gmail.com' + str(sql.lastQuery())),
QtGui.QMessageBox.Ok)
return False
return sql
def listTables(self):
''' Lista todas las tablas de la base de datos '''
sql = QtSql.QSqlQuery()
sql.prepare('SHOW TABLES FROM slnecc_control;')
result = self.__consultDb(sql)
if not result:
return False
return result
def listColumns(self,tabla):
''' Lista las columnas de una tabla '''
sql = QtSql.QSqlQuery()
sql.prepare("SHOW COLUMNS FROM " + tabla + " ;")
result = self.__consultDb(sql)
if not result:
return False
return result
def getQueryModel(self,columns,table):
'''Retorna un modelo de solo lectura de una tabla
se especifica las columnas con un diccionario, para
no escribir las cabeceras del model si la consulta tiene un error consultar
QSqlQueryModel.lastError()'''
query = 'SELECT '
#desde
i = 1
#hasta
x = len(columns)
#armamos la consulta
for item in columns:
if ( i < x ):
query = query + item + ' AS ' + columns[item] + ','
if ( i == x ):
query = query + item + ' AS ' + columns[item] + ' FROM ' + table
i += 1
modelo = QtSql.QSqlQueryModel()
modelo.setQuery(query)
return modelo
def getTableModel(self, table, condition):
'''Retorna un modelo editable de una tabla, la condicion string sql
lo errores estan en lastError() '''
modelo = QtSql.QSqlTableModel()
modelo.setTable(table)
#los cambios al modelo se almacenan en cache y se reguistran
#cuando llamemos al metodo modelo.submitAll(), se tiene la posibilada de revertir
modelo.setEditStrategy(QtSql.QSqlTableModel.OnManualSubmit)
modelo.setFilter(condition)
modelo.select()
return modelo
def selectQuery(self, table, columns ,condition, like, limit):
'''Ejecuta una consulta tipo SELECT en la BD
(str) table => nombre de la tabla a consultar
(list) columns => Columnas a mostrar
(str) condition => condicion si no existe "1=1"
(str) like => para filtras busquedas de no existir es False (columna = valor)
(int) limit => limite de registros si se desa la tabla completa vale 0
SELECT columns FROM table
WHERE conditions | like | 1=1
LIMIT limit | nothing
'''
query = 'SELECT '
# x(desde) i(hasta)
x = 1
i = len(columns)
for item in columns:
if x < i:
query = query + item + ','
if x == i:
query = query + item + ' FROM ' + table
x+=1
#analizamos la condicion
query = query + ' WHERE '
if not condition and not Like:
query = query + '1=1'
elif condition and like :
query = query + condition + ' ' + like
elif not condition and like:
query = query + like
elif not like and condition:
query = query + condition
#terminamos de armar la consulta
if limit == 0:
query = query + ';'
else:
query = query + ' LIMIT ' + limit + ';'
sql = QtSql.QSqlQuery()
sql.prepare(query)
#ejecutamos la consulta, si hay un error acudir a last error
result = self.__consultDb(sql)
if not result:
return False
return result
def insertQuery(self,table,values):
'''Ejecuta una consulta tipo INSERT en la BD, si se manda una columna sin valor se reemplaza por NULL
(str) table => nombre de la tabla
(dic) values => diccionario clave valor
INSERT INTO table (values[columns])
VALUES( values[value]);
'''
query = 'INSERT INTO ' + table +'('
i = 1
x = len(values)
for item in values:
if i<x:
query = query + item + ','
if i==x:
query = query + item + ')'
i+=1
query = query + 'VALUES('
i = 1
for item in values:
if values[item] == '':
#si el valor esta vacio se escribe NULL
values[item] = 'NULL'
if i < x:
query = query + values[item] + ','
if i == x:
query = query + values[item] + ');'
i +=1
sql = QtSql.QSqlQuery()
sql.prepare(query)
result = self.__consultDb(sql)
if not result:
return False
return result
def updateQuery(self,table,values,condition):
'''Ejecuta una Sentencia tipo update en la BD
(str) table => nombre de la tabla
(dic) values => diccionario clave valor para update
(srt) condition => condicion SQL
UPDATE table
SET
values[columns] = values[value]
'''
query = 'UPDATE ' + table + ' SET '
i = 1
x = len(values)
#armamos la consulta
for item in values:
if values[item] == '':
#si el valor esta vacio se escribe NULL
values[item] = 'NULL'
if i < x :
query = query + item + ' = ' + values[item] + ','
if i == x :
query = query + item + ' = ' + values[item]
query = query + ' ' + condition + ';'
sql = QtSql.QSqlQuery()
sql.prepare(query)
result = self.__consultDb(sql)
if not result:
return False
return result
def deleteQuery(self, table, condition ):
'''Metodo encargad de ejecutar una Sentencia tipo DELETE en la BD
(str) table => nombre de la tabla
(str) condition => condicion para el borrado
DELETE FOM table
WHERE condition
'''
sql = QtSql.QSqlQuery()
sql.prepare('DELETE FROM ' + table + ' WHERE ' + condition + ';')
result = sefl.__consultDb(sql)
if not result:
return False
return result
def lastInsertId(self):
'''Ultimo Id ingresado en la BD'''
sql = QtSql.QSqlQuery()
return sql.lastInsertId()
def lastQuery(self):
''' retorna el Sql de la รบltima consulta'''
sql = QtSql.QSqlQuery()
return sql.lastQuery()
def beginTransaction(self):
'''Inicia una transaccion'''
conn = QtSql.QSqlDatabase.database()
conn.transaction()
def commitTransaction(self):
'''Confirma una transaccion'''
conn = QtSql.QSqlDatabase.database()
conn.commit()
def rollBack(self):
'''Cancela y revierte los cambios de una transaccion'''
conn = QtSql.QSqlDatabase.database()
conn.rollback()
def lastError(self):
'''Retorna en ultimo error producido en la base de datos
ojo *** '''
return conn.lastError() | [
"accxell_ax3c@hotmail.es"
] | accxell_ax3c@hotmail.es |
e58d07b1a06f4053031cf1136c0eb2b006bd01a1 | 9fbf346ce416665593993a274aaea53854a049a8 | /places/migrations/0005_alter_place_category.py | 269a808ce8b8fecac5c52fd55ad353d53c96354d | [] | no_license | crigrande/sei-project-4 | e41f5253904b3ddb1e1726b61cdade5013342dae | fef6c76827c269f0e746d10ddcc6e536955009c0 | refs/heads/main | 2023-07-18T20:30:28.592657 | 2021-09-02T12:25:25 | 2021-09-02T12:25:25 | 388,132,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Generated by Django 3.2.5 on 2021-07-30 09:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
('places', '0004_alter_place_category'),
]
operations = [
migrations.AlterField(
model_name='place',
name='category',
field=models.ManyToManyField(blank=True, related_name='places', to='categories.Category'),
),
]
| [
"cri.grande@gmail.com"
] | cri.grande@gmail.com |
fcfb1689706e7f8c67e679bad72b3bc338eb3680 | 43931b9c07da0a8e3cafaa4d34d005502259fb36 | /fun.py | 30fdecfc29e4b47d0f0024335f006069a7ed8022 | [] | no_license | Preethi-reddy/Assignment | f23dbfa0debacec054c447be64e36f9159d43902 | b7dcf005b05076526efa303b1a4c474bbb8013d2 | refs/heads/master | 2020-06-05T14:45:59.742455 | 2019-06-18T05:09:01 | 2019-06-18T05:09:01 | 192,463,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # -*- coding: utf-8 -*-
def sum(n1,n2):
res=n1+n2
print('result:',res)
sum(10,20)
def sum(n1,n2):
res=n1+n2
return res
res=sum(299,201)
print('result:',res)
| [
"Preethi Reddy@DESKTOP-I2RJUU0.com"
] | Preethi Reddy@DESKTOP-I2RJUU0.com |
1f29d6713fac5aee3c7a3114a72fe8eb067e1aac | 804189532b58a81bd13c290d0969384eab237a19 | /venv/Lib/site-packages/seleniumbase/fixtures/page_utils.py | 058798a202aa3981cec5fe806ea89e7bc819881b | [] | no_license | georgerobertkplivi/DemoBlazeTesting | 651aa46e24d7aceb83291cc6758162c335cac248 | b2b8d3c4b6c67252ff647425c4980decd0f8b310 | refs/heads/master | 2023-04-30T10:18:22.104743 | 2021-05-19T00:42:10 | 2021-05-19T00:42:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,393 | py | """
This module contains useful utility methods.
"""
import codecs
import re
import requests
def get_domain_url(url):
"""
Use this to convert a url like this:
https://blog.xkcd.com/2014/07/22/what-if-book-tour/
Into this:
https://blog.xkcd.com
"""
if not url.startswith("http://") and not url.startswith("https://"):
return url
url_header = url.split('://')[0]
simple_url = url.split('://')[1]
base_url = simple_url.split('/')[0]
domain_url = url_header + '://' + base_url
return domain_url
def is_xpath_selector(selector):
"""
A basic method to determine if a selector is an xpath selector.
"""
if (selector.startswith('/') or selector.startswith('./') or (
selector.startswith('('))):
return True
return False
def is_link_text_selector(selector):
"""
A basic method to determine if a selector is a link text selector.
"""
if (selector.startswith('link=') or selector.startswith('link_text=') or (
selector.startswith('text='))):
return True
return False
def is_partial_link_text_selector(selector):
"""
A basic method to determine if a selector is a partial link text selector.
"""
if (selector.startswith('partial_link=') or (
selector.startswith('partial_link_text=') or (
selector.startswith('partial_text=')))):
return True
return False
def is_name_selector(selector):
"""
A basic method to determine if a selector is a name selector.
"""
if selector.startswith('name='):
return True
return False
def get_link_text_from_selector(selector):
"""
A basic method to get the link text from a link text selector.
"""
if selector.startswith('link='):
return selector.split('link=')[1]
elif selector.startswith('link_text='):
return selector.split('link_text=')[1]
elif selector.startswith('text='):
return selector.split('text=')[1]
return selector
def get_partial_link_text_from_selector(selector):
"""
A basic method to get the partial link text from a partial link selector.
"""
if selector.startswith('partial_link='):
return selector.split('partial_link=')[1]
elif selector.startswith('partial_link_text='):
return selector.split('partial_link_text=')[1]
elif selector.startswith('partial_text='):
return selector.split('partial_text=')[1]
return selector
def get_name_from_selector(selector):
"""
A basic method to get the name from a name selector.
"""
if selector.startswith('name='):
return selector.split('name=')[1]
return selector
def is_valid_url(url):
regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if regex.match(url) or ((url.startswith("about:") or (
url.startswith("data:") or url.startswith("chrome:") or (
url.startswith("edge:") or url.startswith("opera:") or (
url.startswith("file:")))))):
return True
else:
return False
def _get_unique_links(page_url, soup):
"""
Returns all unique links.
Includes:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src" links.
"""
if not page_url.startswith("http://") and (
not page_url.startswith("https://")):
return []
prefix = 'http:'
if page_url.startswith('https:'):
prefix = 'https:'
simple_url = page_url.split('://')[1]
base_url = simple_url.split('/')[0]
full_base_url = prefix + "//" + base_url
raw_links = []
raw_unique_links = []
# Get "href" from all "a" tags
links = soup.find_all('a')
for link in links:
raw_links.append(link.get('href'))
# Get "src" from all "img" tags
img_links = soup.find_all('img')
for img_link in img_links:
raw_links.append(img_link.get('src'))
# Get "href" from all "link" tags
links = soup.find_all('link')
for link in links:
raw_links.append(link.get('href'))
# Get "src" from all "script" tags
img_links = soup.find_all('script')
for img_link in img_links:
raw_links.append(img_link.get('src'))
for link in raw_links:
if link not in raw_unique_links:
raw_unique_links.append(link)
unique_links = []
for link in raw_unique_links:
if link and len(link) > 1:
if link.startswith('//'):
link = prefix + link
elif link.startswith('/'):
link = full_base_url + link
elif link.startswith('./'):
f_b_url = full_base_url
if len(simple_url.split('/')) > 1:
f_b_url = full_base_url + "/" + simple_url.split('/')[1]
link = f_b_url + link[1:]
elif link.startswith('#'):
link = full_base_url + link
elif '//' not in link:
f_b_url = full_base_url
if len(simple_url.split('/')) > 1:
f_b_url = full_base_url + "/" + simple_url.split('/')[1]
link = f_b_url + "/" + link
else:
pass
unique_links.append(link)
return unique_links
def _get_link_status_code(link, allow_redirects=False, timeout=5):
""" Get the status code of a link.
If the timeout is exceeded, will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
"""
status_code = None
try:
response = requests.get(
link, allow_redirects=allow_redirects, timeout=timeout)
status_code = response.status_code
except Exception:
status_code = 404
return status_code
def _print_unique_links_with_status_codes(page_url, soup):
""" Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
links = _get_unique_links(page_url, soup)
for link in links:
status_code = _get_link_status_code(link)
print(link, " -> ", status_code)
def _download_file_to(file_url, destination_folder, new_file_name=None):
if new_file_name:
file_name = new_file_name
else:
file_name = file_url.split('/')[-1]
r = requests.get(file_url)
with open(destination_folder + '/' + file_name, "wb") as code:
code.write(r.content)
def _save_data_as(data, destination_folder, file_name):
out_file = codecs.open(
destination_folder + '/' + file_name, "w+", encoding="utf-8")
out_file.writelines(data)
out_file.close()
def make_css_match_first_element_only(selector):
# Only get the first match
last_syllable = selector.split(' ')[-1]
if ':' not in last_syllable and ':contains' not in selector:
selector += ':first'
return selector
| [
"georgerobertkplivi@gmail.com"
] | georgerobertkplivi@gmail.com |
bd0ba877cb6b849000ce9ea154a7506ab94dbb97 | 2d735cd72f1b2a17e58397a1214d3bcc2b8f113f | /PYTHON_FUNCTIONS/any_all_in_python.py | c4e84d22e60c5fd4da0ce9f654e5655dd7651839 | [] | no_license | shubhamrocks888/python | 3b95b5b53be8e0857efe72b8797e01e959d230f4 | 7313ddd0d09a0b478df928a07a6094930b597132 | refs/heads/master | 2022-12-15T00:03:40.261942 | 2020-08-29T18:00:42 | 2020-08-29T18:00:42 | 279,280,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | Truth table :-
any all
All true values True True
All false values False False
One True(all others are False) True False
One False(all others are True) True False
Empty False True
##Any and All are two built ins provided in python used for successive And/Or.
'''Any'''
Returns true if any of the items is True. It returns False if empty or all are false.
Any can be thought of as a sequence of OR operations on the provided iterables.
It short circuit the execution i.e. stop the execution as soon as the result is known.
Syntax : any(list of iterables)
# Since all are false, false is returned
print (any([False, False, False, False])) # Output: False
# Here the method will short-circuit at the
# second item (True) and will return True.
print (any([False, True, False, False])) # Output: True
# Here the method will short-circuit at the
# first (True) and will return True.
print (any([True, False, False, False])) # Output: True
'''All'''
Returns true if all of the items are True (or if the iterable is empty). All can be thought
of as a sequence of AND operations on the provided iterables. It also short circuit the
execution i.e. stop the execution as soon as the result is known.
Syntax : all(list of iterables)
# Here all the iterables are True so all
# will return True and the same will be printed
print (all([True, True, True, True])) # Output: True
# Here the method will short-circuit at the
# first item (False) and will return False.
print (all([False, True, True, False])) # Output: False
# This statement will return False, as no
# True is found in the iterables
print (all([False, False, False])) # Output: False
Practical Examples:
# This code explains how can we
# use 'any' function on list
list1 = []
list2 = []
# Index ranges from 1 to 10 to multiply
for i in range(1,11):
list1.append(4*i)
# Index to access the list2 is from 0 to 9
for i in range(0,10):
list2.append(list1[i]%5==0)
print('See whether at least one number is divisible by 5 in list 1=>')
print(any(list2))
Output:
See whether at least one number is divisible by 5 in list 1=>
True
# Illustration of 'all' function in python 3
# Take two lists
list1=[]
list2=[]
# All numbers in list1 are in form: 4*i-3
for i in range(1,21):
list1.append(4*i-3)
# list2 stores info of odd numbers in list1
for i in range(0,20):
list2.append(list1[i]%2==1)
print('See whether all numbers in list1 are odd =>')
print(all(list2))
Output:
See whether all numbers in list1 are odd =>
True
| [
"shubhamrocks888@gmail.com"
] | shubhamrocks888@gmail.com |
7a921265a5aea45921fed6857e46ce0e6166ba03 | 4847088068b303c8c786203474327118738ff45b | /notion/54_spiral_matrix.py | cf2e17811e8a5c5c12ddec3a7b94951696dcf557 | [] | no_license | cukejianya/leetcode | fd0166a83aafbcda036b88c4239170bc7556d287 | 10db2aab180aece1130b8da19094cf74f158e625 | refs/heads/master | 2022-04-27T21:21:30.368378 | 2022-04-08T01:54:22 | 2022-04-08T01:54:22 | 167,761,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
array = []
seen = set()
direction = [(0, 1), (1, 0), (0, -1), (-1, 0)]
direction_idx = 0
cell = (0, 0)
while cell:
row, col = cell
array.append(matrix[row][col])
seen.add(cell)
d = direction[direction_idx]
row_check = row + d[0] >= 0 and row + d[0] < len(matrix)
col_check = col + d[1] >= 0 and col + d[1] < len(matrix[0])
if row_check and col_check and not (row + d[0], col + d[1]) in seen:
cell = (row + d[0], col + d[1])
else:
direction_idx += 1
d = direction[direction_idx]
row_check = row + d[0] >= 0 and row + d[0] < len(matrix)
col_check = col + d[1] >= 0 and col + d[1] < len(matrix[0])
if row_check and col_check and not (row + d[0], col + d[1]) in seen:
cell = (row + d[0], col + d[1])
else:
break
return array
| [
"cukejianya@gmail.com"
] | cukejianya@gmail.com |
c22f8acacd79b8afcf53558dbd03b826832af27a | 8580fd92512c236deae692d155bdb5eab2e00508 | /DarkTrails/asgi.py | 7b723533039a12cf02182a7076964bb2881d83f3 | [] | no_license | JackSnowdon/DownDT | d5d7f04acf92b5102cf67c5aa70cda2ebc4062fd | 17924b0b64da39d29c892fee4c7746d09b76fd8c | refs/heads/master | 2023-04-01T00:25:16.382696 | 2021-03-28T16:19:26 | 2021-03-28T16:19:26 | 352,373,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for DarkTrails project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DarkTrails.settings')
application = get_asgi_application()
| [
"jacksnowdondrums@gmail.com"
] | jacksnowdondrums@gmail.com |
dedcf30da9c556f6c718f73a63447d0d9386a9e4 | dcc83732ec13c2af53bb86f9ca3d989d2e7ae399 | /Algorithms/scheduler_v1.py | 65d3d9113745a1081e8e937eee3112a7508bb339 | [] | no_license | MondayCat/PLGA | 83cc79bc583744cc58d4a35b6e3cff6449096155 | 6776107f0eebe0b83957ed5193cf4625ef84ab54 | refs/heads/main | 2023-08-16T13:23:45.857359 | 2021-10-05T09:34:42 | 2021-10-05T09:34:42 | 359,085,882 | 11 | 3 | null | null | null | null | UTF-8 | Python | false | false | 11,702 | py | import torch
import os
import json
import numpy as np
import copy
from Algorithms.servers.serverASO import ServerASO
from Algorithms.servers.serverFedAvg import ServerFedAvg
from Algorithms.servers.serverLGP import ServerLGP
from Algorithms.servers.serverPerFed import ServerPerFed
from Algorithms.servers.serverFedAsync import serverFedAsync
from Algorithms.servers.serverPLGP import ServerPLGP
from Algorithms.users.userASO import UserASO
from Algorithms.users.userFedAvgBase import UserFedAvg
# from Algorithms.users.userLGP import UserLGP
from Algorithms.users.userLGP_v1 import UserLGP
from Algorithms.users.userPerFed import UserPerFed
from Algorithms.users.userFedAsync import UserFedAsync
from Algorithms.users.userPLGP import UserPLGP
from utils.model_utils import read_data, read_user_data
from data.org_dataset import load_org_dataset
from data.dataset_v4 import dataset_federate_new
import torch
import pandas as pd
class Scheduler:
def __init__(self, dataset,algorithm, model, async_process, batch_size, learning_rate, lamda, beta, num_glob_iters,
local_epochs, optimizer, num_users, user_labels, niid, times, data_load, extra):
self.dataset = dataset
self.model = copy.deepcopy(model)
self.algorithm = algorithm
self.optimizer = optimizer
self.batch_size = batch_size
self.learning_rate = learning_rate
self.async_process = async_process
self.lamda = lamda
self.beta = beta
self.times = 8
self.data_load = data_load
self.extra = extra
self.num_users = num_users
self.num_glob_iters = num_glob_iters
self.local_epochs = local_epochs
self.user_labels = user_labels
self.niid = niid
self.users = []
self.local_acc = []
self.avg_local_acc = []
self.avg_local_train_acc = []
self.avg_local_train_loss = []
self.server_acc = []
# old data split
# data = read_data(dataset, niid, num_users, user_labels)
self.num_users = num_users
test_data = []
# id, train, test = read_user_data(0, data, dataset)
# if algorithm == 'FedAvg':
# user = UserFedAvg(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, self.times)
# if algorithm == 'ASO':
# user = UserASO(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, self.times)
# if algorithm == 'LGP':
# user = UserLGP(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, self.times)
# if algorithm == 'PerFed':
# user = UserPerFed(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, self.times)
# self.users.append(user)
# test_data.extend(test)
DATASET_NAME = "MNIST"
# DATASET_NAME = "CIFAR10"
# DATASET_NAME = "EMNIST"
# DATASET_NAME = "CIFAR100"
# DATASET_NAME = "FashionMNIST"
DATA_ROOT = "/home/yx/Fede_MAML/data/"
current_dataset = load_org_dataset(DATASET_NAME,
DATA_ROOT)
print("load original dataset finished!,the dataset shape is {}".format(np.array(current_dataset.data).shape))
machine_list = []
for i in range(num_users):
machine_list.append([])
# create federate train and test dataset
train_federate_dataset, test_federate_dataset, client2index_list = \
dataset_federate_new(current_dataset, machine_list,
distribution_mode="NIID",
class_num_client=6,
dataset_name="")
# exit()
for i in range(self.times):
# id, train, test = read_user_data(i, data, dataset)
id = i
train,test = train_federate_dataset[i],test_federate_dataset[i]
if algorithm == 'FedAvg':
user = UserFedAvg(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, i+1)
if algorithm == 'ASO':
user = UserASO(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, i+1)
if algorithm == 'LGP':
user = UserLGP(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, i+1)
if algorithm == 'PerFed':
user = UserPerFed(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, i+1)
if algorithm == 'FedAsync':
user = UserFedAsync(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load, i+1)
if algorithm == "PLGP":
user = UserPLGP(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta,
local_epochs, optimizer, data_load, i + 1)
self.users.append(user)
test_data.extend(test)
for i in range(self.times, self.num_users):
# for i in range(self.num_users):
# id, train, test = read_user_data(i, data, dataset)
id = i
train, test = train_federate_dataset[i], test_federate_dataset[i]
if algorithm == 'PerFed':
user = UserPerFed(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load)
if algorithm == 'FedAvg':
user = UserFedAvg(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load)
if algorithm == 'ASO':
user = UserASO(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load)
if algorithm == 'LGP':
user = UserLGP(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load)
if algorithm == 'FedAsync':
user = UserFedAsync(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta, local_epochs, optimizer, data_load)
if algorithm == "PLGP":
user = UserPLGP(id, train, test, model, async_process, batch_size, learning_rate, lamda, beta,
local_epochs, optimizer, data_load)
self.users.append(user)
test_data.extend(test)
if algorithm == 'FedAvg':
self.server = ServerFedAvg(algorithm, model, async_process, test_data, batch_size)
if algorithm == 'PerFed':
self.server = ServerPerFed(algorithm, model, async_process, test_data, batch_size)
if algorithm == 'ASO':
self.server = ServerASO(algorithm, model, async_process, test_data, batch_size)
if algorithm == 'LGP':
self.server = ServerLGP(algorithm, model, async_process, test_data, batch_size)
if algorithm == 'FedAsync':
self.server = serverFedAsync(algorithm, model, async_process, test_data, batch_size)
if algorithm == 'PLGP':
self.server = ServerPLGP(algorithm, model, async_process, test_data, batch_size)
for user in self.users:
self.server.append_user(user.id, user.train_data_samples)
def run(self):
for glob_iter in range(self.num_glob_iters):
print("-------------Round number: ",glob_iter, " -------------")
for user in self.users:
user.run(self.server,glob_iter)
if self.async_process == False:
self.server.clear_update_cache()
self.evaluate()
# sync not drop
# extra_iters = [800,800, 400, 267, 200, 160, 134,115, 100, 89]
# for i in range(extra_iters[self.times] - self.users[0].train_counter):
# user = self.users[0]
# user.train(list(self.server.model.parameters()))
# self.server.update_parameters(user.id, user.model.parameters(), user.train_data_samples)
# self.server.clear_update_cache()
# self.evaluate()
# async
# train_count = []
# for user in self.users:
# if user.trained:
# self.server.update_parameters(user.id, user.model.parameters(), user.train_data_samples)
# train_count.append(user.train_counter)
# self.server.clear_update_cache()
# self.evaluate()
# self.local_acc.append(train_count)
# self.server_acc.append(self.num_glob_iters)
self.save_results()
self.server.save_model()
# self.save_loss_log()
def save_loss_log(self):
for user in self.users:
loss_log = user.loss_log
name=range(21)
dataframe = pd.DataFrame(columns=name, data=loss_log)
fileName = "./logs/"+user.id+'.csv'
dataframe.to_csv(fileName, index=False, sep=',')
def evaluate(self):
self.evaluate_users()
self.evaluate_server()
def evaluate_users(self):
stats = self.users_test()
client_acc = [x*1.0/y for x, y in zip(stats[2], stats[1])]
self.local_acc.append(client_acc)
print("Local Accurancy: ", client_acc)
def evaluate_server(self):
stats = self.server.test()
server_acc = stats[0]*1.0/stats[1]
self.server_acc.append(server_acc)
print("Central Model Accurancy: ", server_acc)
def users_test(self):
num_samples = []
tot_correct = []
losses = []
for c in self.users:
ct, ns = c.test()
tot_correct.append(ct*1.0)
num_samples.append(ns)
ids = [c.id for c in self.users]
return ids, num_samples, tot_correct
def users_train_error_and_loss(self):
num_samples = []
tot_correct = []
losses = []
for c in self.users:
ct, cl, ns = c.train_error_and_loss()
tot_correct.append(ct*1.0)
num_samples.append(ns)
losses.append(cl*1.0)
ids = [c.id for c in self.users]
return ids, num_samples, tot_correct, losses
def save_results(self):
alg = self.dataset + "_" + self.algorithm + "_" + self.optimizer
if self.async_process == True:
alg = alg + "_async"
else:
alg = alg + "_sync"
if self.niid == True:
alg = alg + "_niid"
else:
alg = alg + "_iid"
alg = alg + "_" + str(self.learning_rate) + "_" + str(self.beta) + "_" + str(self.lamda) + "_" + str(self.num_users) + "u" + "_" + str(self.user_labels) + "l" + "_" + str(self.batch_size) + "b" + "_" + str(self.local_epochs) + "_" + str(self.num_glob_iters) + "ep" + "_" + self.data_load
alg = alg + "_" + str(self.times) + "_" + self.extra
if (len(self.server_acc) & len(self.local_acc) ) :
dictData={}
for i in range(self.num_users):
dictData['client_'+str(i)] = [x[i] for x in self.local_acc]
dictData['central_model_acc'] = self.server_acc[:]
dataframe = pd.DataFrame(dictData)
fileName = "./results/"+alg+'_v1.csv'
dataframe.to_csv(fileName, index=False, sep=',')
| [
"noreply@github.com"
] | MondayCat.noreply@github.com |
4eb49fd57d25c5ea69c6aa9a83268f4638e6c72d | ce84b74f38e7506cb822379c0438acbbc7eabf55 | /Meetup/migrations/0003_auto_20190331_1645.py | a6cc1c9b896a66e90c54a9c202a029e31c83702d | [] | no_license | BhanuPratapSIngh97/Project | c6543061e964f7d03aca4d9701fd5046dddc0214 | 1dbf82354105e12bdd9804da905b923f197e3907 | refs/heads/master | 2021-06-29T17:00:07.061583 | 2021-06-24T07:37:47 | 2021-06-24T07:37:47 | 181,318,159 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2019-03-31 11:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Meetup', '0002_wpost_upic'),
]
operations = [
migrations.AddField(
model_name='user',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='cover'),
),
migrations.AlterField(
model_name='user',
name='ppic',
field=models.ImageField(upload_to='ppic/%y%m%d'),
),
]
| [
"bhanu.oficial97@gmail.com"
] | bhanu.oficial97@gmail.com |
24446e918c6434db62fb519512d51e55a09fc05a | b426e444308e5787d252692c5d81b075b9af3c79 | /src/serving/application/lib/rsa/prime.py | 97fe632cfa3bb2b5ebc3dfc45c2c8ddbc570267e | [] | no_license | realfranser/MachineLearning-GoogleCloud | bb96142a2ebf7aa8a1ad205417007093bb00b970 | c135f03bccffff98c8bd35d03358eb79ff540fb9 | refs/heads/main | 2023-03-21T11:35:10.789427 | 2021-03-16T23:43:57 | 2021-03-16T23:43:57 | 322,024,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,272 | py | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stรผvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Numerical functions related to primes.
Implementation based on the book Algorithm Design by Michael T. Goodrich and
Roberto Tamassia, 2002.
"""
from rsa._compat import range
import rsa.common
import rsa.randnum
__all__ = ['getprime', 'are_relatively_prime']
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(48, 180)
12
"""
while q != 0:
(p, q) = (q, p % q)
return p
def get_primality_testing_rounds(number):
"""Returns minimum number of rounds for Miller-Rabing primality testing,
based on number bitsize.
According to NIST FIPS 186-4, Appendix C, Table C.3, minimum number of
rounds of M-R testing, using an error probability of 2 ** (-100), for
different p, q bitsizes are:
* p, q bitsize: 512; rounds: 7
* p, q bitsize: 1024; rounds: 4
* p, q bitsize: 1536; rounds: 3
See: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
# Calculate number bitsize.
bitsize = rsa.common.bit_size(number)
# Set number of rounds.
if bitsize >= 1536:
return 3
if bitsize >= 1024:
return 4
if bitsize >= 512:
return 7
# For smaller bitsizes, set arbitrary number of rounds.
return 10
def miller_rabin_primality_testing(n, k):
"""Calculates whether n is composite (which is always correct) or prime
(which theoretically is incorrect with error probability 4**-k), by
applying Miller-Rabin primality testing.
For reference and implementation example, see:
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
:param n: Integer to be tested for primality.
:type n: int
:param k: Number of rounds (witnesses) of Miller-Rabin testing.
:type k: int
:return: False if the number is composite, True if it's probably prime.
:rtype: bool
"""
# prevent potential infinite loop when d = 0
if n < 2:
return False
# Decompose (n - 1) to write it as (2 ** r) * d
# While d is even, divide it by 2 and increase the exponent.
d = n - 1
r = 0
while not (d & 1):
r += 1
d >>= 1
# Test k witnesses.
for _ in range(k):
# Generate random integer a, where 2 <= a <= (n - 2)
a = rsa.randnum.randint(n - 3) + 1
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == 1:
# n is composite.
return False
if x == n - 1:
# Exit inner loop and continue with next witness.
break
else:
# If loop doesn't break, n is composite.
return False
return True
def is_prime(number):
"""Returns True if the number is prime, and False otherwise.
>>> is_prime(2)
True
>>> is_prime(42)
False
>>> is_prime(41)
True
"""
# Check for small numbers.
if number < 10:
return number in {2, 3, 5, 7}
# Check for even numbers.
if not (number & 1):
return False
# Calculate minimum number of rounds.
k = get_primality_testing_rounds(number)
# Run primality testing with (minimum + 1) rounds.
return miller_rabin_primality_testing(number, k + 1)
def getprime(nbits):
"""Returns a prime number that can be stored in 'nbits' bits.
>>> p = getprime(128)
>>> is_prime(p-1)
False
>>> is_prime(p)
True
>>> is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
"""
assert nbits > 3 # the loop wil hang on too small numbers
while True:
integer = rsa.randnum.read_random_odd_int(nbits)
# Test for primeness
if is_prime(integer):
return integer
# Retry if not prime
def are_relatively_prime(a, b):
"""Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
True
>>> are_relatively_prime(2, 4)
False
"""
d = gcd(a, b)
return d == 1
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count % 100 == 0 and count:
print('%i times' % count)
print('Doctests done')
| [
"serranoarresejavier@gmail.com"
] | serranoarresejavier@gmail.com |
d8037dee1bbda982007ee6ed10a916a6955b0877 | 0e316a03af1aa585ecd9525aea499a3988f08c01 | /learning_logs/urls.py | 8c75ed3cb42d1f71d369e05e51ca872cddd6fcd0 | [] | no_license | xioumau/learning_log | 459964e69c837d6a55a1f038fd9a18112b7cffdc | 1f4f210d0ab1d7753f3b42ca75dd905be4c43458 | refs/heads/master | 2023-06-24T13:48:11.322379 | 2021-08-01T21:24:39 | 2021-08-01T21:24:39 | 378,497,167 | 0 | 1 | null | 2021-07-30T21:12:27 | 2021-06-19T20:14:28 | Python | UTF-8 | Python | false | false | 730 | py | """Define padrรตes de URL para learning_logs"""
from django.urls import path
from . import views
app_name = 'learning_logs'
urlpatterns = [
# pagina inicial
path('', views.index, name='index'),
# mostra todos os assuntos
path('topics/', views.topics, name='topics'),
# pagina de detalhes para um รบnico assunto
path('topics/<int:topic_id>/', views.topic, name='topic'),
# pagina para adicionar um novo assunto
path('new_topic/', views.new_topic, name='new_topic'),
# pagina para adicionar nova entrada
path('new_entry/<int:topic_id>', views.new_entry, name='new_entry'),
# pรกgina para editar uma entrada
path('edit_entry/<int:entry_id>', views.edit_entry, name='edit_entry'),
]
| [
"mauriciocon@gmail.com"
] | mauriciocon@gmail.com |
2bdaf389b5e48d429d1b3b05b8a493621a9a7ed6 | 144d8f2a5a5c751cebaabc73f2e2b82fa23c61c1 | /nebula_sniffer/nebula_sniffer/main.py | 6a4f42627e97abb132cf4cf0da49e18e7fe9ab3a | [
"Apache-2.0"
] | permissive | bradbann/sniffer | f248697cf4b483a7af1e43a08d3cc6e420b21d99 | 3ef3ad5316942669f32cda7d0c96f5a8c441efc2 | refs/heads/master | 2020-04-28T04:38:00.496351 | 2019-03-11T10:56:37 | 2019-03-11T10:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,663 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import os
import logging
import traceback
import gevent
import gevent.queue
import settings
from threathunter_common.metrics.metricsrecorder import MetricsRecorder
from nebula_parser.autoparser import get_current_generators
from .urltree import URLTree
from .utils import is_linux
from .bson.objectid import ObjectId
from .msg import TextMsg, HttpMsg
from .sessionmapping import *
max_body_length_config = configcontainer.get_config("sniffer").int_item("sniffer.httpmsg.max_body",
caching=3600, default=2048)
class Main(object):
def __init__(self, id, parser, driver, cpu=None, is_process=True):
self.parser = parser
self.driver = driver
self.id = id
self._running = False
self._rpc_task = None
self._events_task = None
self._health_task = None
self.queue = gevent.queue.Queue(maxsize=10000)
self.cpu = cpu
self.is_process = is_process
self.logger = settings.init_logging("main.{}".format(self.id))
self.error_mr = MetricsRecorder("sniffer.main.error")
self.msg_mr = MetricsRecorder("sniffer.main.msg")
self.event_mr = MetricsRecorder("sniffer.main.event")
self.rpc_mr = MetricsRecorder("sniffer.main.rpc")
self.main_mr = MetricsRecorder("sniffer.main.loop")
self.urltree = URLTree()
def add_error_metrics(self, data_type):
tags = {"id": self.id, "type": data_type}
self.error_mr.record(1, tags)
def start(self):
if self._running:
return
self.main_mr.record(1, {"id": self.id, "type": "start"})
# cpu binding
self.logger.info("process %s binding to cpu %s", os.getpid(), self.cpu)
if is_linux() and self.cpu and self.is_process:
# taskset ็จไบๆฅ็ใ่ฎพๅฎ CPU ๆ ธไฝฟ็จๆ
ๅต็ๅฝไปคใ ๅฏไปฅ็จ taskset ๅฏๅจไธไธชๅฝไปค๏ผ็ดๆฅ่ฎพ็ฝฎๅฎ็ CPU ๆ ธ็่ฟ่กไพ่ตๅ
ณ็ณปใ
# self.cpu = 1
subprocess.Popen(["taskset", "-cp", "{}".format(self.cpu), "{}".format(os.getpid())],
stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()
self._running = True
self.logger.info("sniffer instance is starting driver")
if self.driver:
self.driver.start()
self.logger.info("sniffer instance is starting rpc task")
self._rpc_task = gevent.spawn(self.rpc_processor)
self._rpc_task.start()
# parse event for httpmsg
self.logger.info("sniffer instance is starting events task")
self._events_task = gevent.spawn(self.event_processor)
self._events_task.start()
self.logger.info("sniffer instance is starting healthy task")
self._health_task = gevent.spawn(self.health_processor)
self._health_task.start()
self.urltree.synchronize()
def stop(self):
self._running = False
self.logger.info("sniffer instance is stopping rpc task")
self.main_mr.record(1, {"id": self.id, "type": "stop"})
if self._rpc_task:
self._rpc_task.kill()
self.logger.info("sniffer instance is stopping events task")
if self._events_task:
self._events_task.kill()
self.logger.info("sniffer instance is stopping healthy task")
if self._health_task:
self._health_task.kill()
self.logger.info("sniffer instance is stopping driver")
if self.driver:
self.driver.stop()
def close(self):
self.stop()
def __del__(self):
self.stop()
def event_processor(self):
idle_run = 0
while self._running:
# no events coming
if idle_run > 0 and idle_run % 5 == 0:
# idle sleep for 0.5 seconds
gevent.sleep(0.5)
if idle_run % 100 == 0:
self.logger.debug("no msg in the last short time")
self.main_mr.record(1, {"id": self.id, "type": "idle"})
try:
msg = self.driver.get_msg_nowait()
except Exception as ex:
# no msg yet
msg = None
if not msg:
idle_run += 1
continue
else:
idle_run = 0
# msg common processing
try:
self.msg_mr.record(1, {"id": self.id, "type": "input"})
self.logger.debug("start to process msg %s", msg)
# ๅผๅงbonesๆๅ
self.urltree.synchronize()
uri_stem = msg.uri_stem
page = msg.page
if msg.is_static:
# ้ๆ้กต้ข็นๆฎ้ป่พ
new_url = msg.host + '/****.' + msg.page.rsplit('.', 1)[-1]
msg.uri_stem = msg.page = new_url
elif page == uri_stem:
# no normalization yet
new_page, new_params = self.urltree.normalize_url(page)
if new_page != page:
msg.uri_stem = new_page
msg.page = new_page
new_params = '&'.join(['%s=%s' % (k, v) for k, v in new_params.iteritems()])
old_params = msg.uri_query
if old_params:
new_params = old_params + '&' + new_params
msg.uri_query = new_params
# msg specific processing per customer
if self.parser.filter(msg):
self.logger.debug("filtered by customparsers")
self.msg_mr.record(1, {"id": self.id, "type": "drop"})
continue
self.logger.debug("msg has passed the filter")
events = []
if isinstance(msg, HttpMsg):
# parse ๅฎ้
ๅ
ฅๅฃ๏ผๅฏนhttpไฟกๆฏ่ฟ่กๅค็๏ผ่ฟๅไธไธชevents๏ผไบไปถๅ่กจ๏ผ
events = self.parser.get_events_from_http_msg(msg)
elif isinstance(msg, TextMsg):
events = self.parser.get_events_from_text_msg(msg)
else:
self.logger.error("fail to process this type of event")
self.add_error_metrics("parse failure")
continue
http_events = [e for e in events if e.name in {"HTTP_DYNAMIC", "HTTP_STATIC"}]
if not http_events:
continue
# ๅ็ฌฌไธไธชๆฏๅ ไธบๆๆ็๏ผๅฎขๆทๅค็ๆจกๅไธญ็ฌฌไธไธชๅค็ๅฝๆฐ้ฝๆฏextract_http_log_event()
http_event = http_events[0]
# try autoparsers
for g in get_current_generators():
result = g.parse_event(http_event, msg)
if result:
events.append(result)
if not events:
continue
self.logger.debug("msg has generated %d events", len(events))
self.msg_mr.record(1, {"id": self.id, "type": "output"})
self.event_mr.record(len(events), {"id": self.id, "type": "input"})
# this is an ugly version, need a totally new one
# processing id and pid
httpid = "0" * 24
for ev in events:
if ev.name in {"HTTP_DYNAMIC", "HTTP_STATIC"}:
ev.property_values["pid"] = "0" * 24
httpid = ev.property_values["id"]
for ev in events:
if ev.name not in {"HTTP_DYNAMIC", "HTTP_STATIC"}:
ev.property_values["id"] = str(ObjectId())
ev.property_values["pid"] = httpid
# "processing uid/did/sid"
id_dict = {
"uid": "",
"did": "",
"sid": "",
}
for ev in events:
for key in id_dict.keys():
if ev.property_values.get(key):
id_dict[key] = ev.property_values[key]
if ev.name == "ACCOUNT_LOGIN":
id_dict["uid"] = ev.property_values["user_name"]
store_user_session_mapping(id_dict["uid"], id_dict["sid"])
if ev.name == "ACCOUNT_REGISTRATION":
id_dict["uid"] = ev.property_values["user_name"]
store_user_session_mapping(id_dict["uid"], id_dict["sid"])
if not id_dict["uid"] or id_dict["uid"].startswith("fake"):
t = get_user_from_session(id_dict["sid"])
if t:
id_dict["uid"] = t
self.logger.debug("get id for this batch of events %s", id_dict)
for ev in events:
ev.property_values.update(id_dict)
_max_length = max_body_length_config.get()
for ev in events:
# body should not be too long
if "s_body" in ev.property_values:
ev.property_values["s_body"] = ev.property_values["s_body"][:_max_length]
if "c_body" in ev.property_values:
ev.property_values["c_body"] = ev.property_values["c_body"][:_max_length]
# end of the ugly code
for ev in events:
self.logger.debug("get event %s", ev)
self.queue.put_nowait(ev)
self.event_mr.record(len(events), {"id": self.id, "type": "output"})
except:
# todo add metrics
self.add_error_metrics("main process failure")
self.msg_mr.record(1, {"id": self.id, "type": "drop"})
self.logger.error("fail to process, error %s", traceback.format_exc())
def health_processor(self):
while self._running:
if self.driver and not self.driver.is_alive():
self._running = False
gevent.sleep(5)
def rpc_processor(self):
mode = configcontainer.get_config("sniffer").get_string("sniffer.servicemode", "redis")
if mode == "redis":
import redisserviceclient
http_client = redisserviceclient.get_httplog_rpc_client()
misc_client = redisserviceclient.get_misclog_rpc_client()
elif mode == "rabbitmq":
import rabbitmqserviceclient
amqp_url = configcontainer.get_config("sniffer").get_string("sniffer.amqp_url", "")
http_client = rabbitmqserviceclient.get_httplog_rpc_client(amqp_url)
misc_client = rabbitmqserviceclient.get_misclog_rpc_client(amqp_url)
else:
self.add_error_metrics("invalid service")
raise RuntimeError("invalid service mode")
http_client.start()
misc_client.start()
idle_run = 0
events_sent = 0
r = 0
event = None
while self._running:
r += 1
try:
events_sent = 0
event = self.queue.get_nowait()
self.rpc_mr.record(1, {"id": self.id, "type": "input", "mode": mode, "name": event.name})
if event.name == "HTTP_DYNAMIC" or event.name == "HTTP_STATIC":
if event.property_values["is_static"]:
# remove redundant values
event.property_values["s_body"] = ""
event.property_values["c_body"] = ""
event.property_values["cookie"] = ""
event.key = event.property_values["c_ip"]
http_client.send(event, event.key, False)
self.logger.debug("sending an http event on key %s", event.key)
self.rpc_mr.record(1, {"id": self.id, "type": "output", "mode": mode, "name": event.name})
else:
misc_client.send(event, event.key, False)
self.logger.debug("sending an %s event on key %s", event.name, event.key)
self.rpc_mr.record(1, {"id": self.id, "type": "output", "mode": mode, "name": event.name})
events_sent = 1
event = None
except gevent.queue.Empty:
pass
except Exception as err:
import traceback
traceback.print_exc()
self.add_error_metrics("send event")
self.rpc_mr.record(1, {"id": self.id, "type": "error", "mode": mode,
"name": event.name if event else ""})
self.logger.error("fail to send event, error %s", err)
finally:
# sleep while idle
if not events_sent:
idle_run += 1
idle_run = min(idle_run, 5)
gevent.sleep(0.1 * idle_run)
else:
idle_run = 0
| [
"zengjinping@threathunter.cn"
] | zengjinping@threathunter.cn |
1a90a5b9c86f0f8a4bacd30deeeb5b802be1abd9 | 1cd965da612f3f0e4d458935cc60023de942b3a6 | /scripts/kill_servers.py | 19c4f5c5ef47794394cbc709e15195a18f171191 | [
"Apache-2.0"
] | permissive | weld-project/clamor | ad42d001078ceb9e74a4a9389050dc819bf6cdc3 | 72e7a75e608bb3407ab6f7ee47f24f707932b71a | refs/heads/master | 2023-09-03T12:56:48.623120 | 2021-11-04T18:00:46 | 2021-11-04T18:00:46 | 424,517,465 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,884 | py | import argparse
import csv
import itertools
import json
import math
import numpy as np
import subprocess
import sys
import time
aws_key = "prthaker-slate.pem"
def run_cmd(server_name, script):
cmd = "ssh -A -o StrictHostKeyChecking=no -i %s ubuntu@%s 'bash -s' < %s" % (aws_key, server_name, script)
output = subprocess.check_output(cmd, shell=True)
return output
def run_cmd_nonblock(server_name, script):
cmd = "ssh -A -o StrictHostKeyChecking=no -i %s ubuntu@%s 'bash -s' < %s" % (aws_key, server_name, script)
print cmd
subprocess.Popen(cmd, shell=True)
def start_manager(manager_ip, bench_name, nprocs, worker_ips):
run_cmd_nonblock(manager_ip, "run-manager.sh %s %d %s" % (bench_name, nprocs, worker_ips))
def start_master(master_ip, bench_name, nprocs, manager_name, worker_ips):
run_cmd_nonblock(master_ip, "run-master.sh %s %d %s %s" % (bench_name, nprocs, manager_name, worker_ips))
def start_workers(worker_ips, bench_name, nprocs, manager_name):
for ip in worker_ips:
for i in range(nprocs):
cmd = "runserver.sh %s %d %d %s & sleep 0.1" % (bench_name, i, i, manager_name)
print cmd
run_cmd_nonblock(ip, cmd)
def kill_servers(server_ips, bench_name):
for ip in server_ips:
try:
run_cmd(ip, "killserver.sh %s" % bench_name)
except:
pass
def read_ips(ip_fname):
with open(ip_fname, 'r') as f:
return [l.strip() for l in f.readlines()]
def main():
parser = argparse.ArgumentParser(
description="Run the performance suite for the passed in benchmarks"
)
parser.add_argument('-n', "--num_iterations", type=int, default=1,
help="Number of iterations to run each benchmark")
#parser.add_argument('-f', "--output_fname", type=str, required=True,
# help="Name of CSV to dump output in")
parser.add_argument('-b', "--benchmark", type=str, default=None,
help="Benchmark to run")
parser.add_argument('-s', "--server_names", type=str, default='../boto-scripts/servers.txt',
help="Filename containing list of server IPs")
parser.add_argument('-m', "--master_name", type=str, default='../boto-scripts/master.txt',
help="Filename containing master IP")
parser.add_argument('-k', "--nworkers", type=int, default=1,
help="Number of nodes to use")
parser.add_argument('-p', "--nprocs", type=int, default=1,
help="Number of processes per worker")
args = parser.parse_args()
server_names = read_ips(args.server_names)
master_name = read_ips(args.master_name)[0]
kill_servers([master_name], args.benchmark)
kill_servers(server_names, args.benchmark)
if __name__=="__main__":
main()
| [
"ubuntu@ip-172-31-11-211.ec2.internal"
] | ubuntu@ip-172-31-11-211.ec2.internal |
8b23a3fffb6859b0622210f0f50699c660b3ef3f | 50ee2f4f1a7d2e5ff7ac35118c5ac45f9b923865 | /0x01-python-if_else_loops_functions/1-last_digit.py | c7b28ae9d733661962aa47ddbb2e987589ebc1b4 | [] | no_license | spencerhcheng/holbertonschool-higher_level_programming | b489fbe8eba6109ef1eaa0d9363f3477e7eb16c4 | f8e1dbc24fcf8fb40ca135d2700872eb773e481e | refs/heads/master | 2021-01-20T06:54:35.044899 | 2018-05-20T05:09:59 | 2018-05-20T05:09:59 | 89,943,332 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number > 0:
lastNum = number % 10
elif number <= 0:
lastNum = number % -10
print('Last digit of {:d} is {:d}'. format(number, lastNum), end=" ")
if lastNum > 5:
print('and is greater than 5')
elif lastNum == 0:
print('and is 0')
elif lastNum < 6:
print('and is less than 6 and not 0')
| [
"spencerhcheng@gmail.com"
] | spencerhcheng@gmail.com |
2333e797e2e9e2d031e0167063a60eddf093205a | 7f6ea40f4a6acb964a224cf860705ac04b446911 | /CCap.py | 6f4bba5d1eeb6ab548888270c94ca08f4a65bb3d | [] | no_license | ctfu/concernCapture | 65b9cd7a82e699973776579d54c0849718df45ed | 4860857ca67cfc323ea38942a39f47fd6a57ed96 | refs/heads/master | 2020-04-05T13:02:34.156774 | 2017-12-28T07:34:47 | 2017-12-28T07:34:47 | 95,034,303 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,039 | py | from Tkinter import *
import tkFileDialog
import subprocess
from tkintertable import TableCanvas, TableModel
import os
def raise_frame(frame):
frame.tkraise()
root = Tk()
root.title("Concern Capture")
root.geometry("450x380")
# stack fram stack up
frames = []
mainFrame = Frame(root)
aboutFrame = Frame(root)
docFrame = Frame(root)
fileConvertFrame = Frame(root)
fileCombineFrame = Frame(root)
callGraphFrame = Frame(root)
freqFrame = Frame(root)
irFrame = Frame(root)
frames.append(mainFrame)
frames.append(aboutFrame)
frames.append(docFrame)
frames.append(fileConvertFrame)
frames.append(fileCombineFrame)
frames.append(callGraphFrame)
frames.append(freqFrame)
frames.append(irFrame)
for frame in frames:
frame.grid(row=0, column=0, sticky='news')
# drop-down menu
menu = Menu(root)
root.config(menu=menu)
introMenu = Menu(menu)
menu.add_cascade(label="Concern Capture", menu=introMenu)
introMenu.add_command(label="Generate Trace", command=lambda:raise_frame(mainFrame))
introMenu.add_command(label="About", command=lambda:raise_frame(aboutFrame))
introMenu.add_command(label="Documentation", command=lambda:raise_frame(docFrame))
fileMenu = Menu(menu)
menu.add_cascade(label="File", menu=fileMenu)
fileMenu.add_command(label="Convert File", command=lambda:raise_frame(fileConvertFrame))
fileMenu.add_command(label="Combine Files", command=lambda:raise_frame(fileCombineFrame))
analysisMenu = Menu(menu)
menu.add_cascade(label="Analysis", menu=analysisMenu)
analysisMenu.add_command(label="Dynamic Call Graph", command=lambda:raise_frame(callGraphFrame))
analysisMenu.add_command(label="Frequency Analysis", command=lambda:raise_frame(freqFrame))
analysisMenu.add_command(label="LDA/LSI", command=lambda:raise_frame(irFrame))
# javashot generate program trace menu
def genCallTrace(event):
javashotPath = javashotE_entry.get()
projectPath = projectE_entry.get()
command = []
command.append("java")
command.append("-javaagent:" + javashotPath)
command.append("-jar")
command.append(projectPath)
print(command)
subprocess.call(command)
javashotP_label = Label(mainFrame, text="Javashot Path:")
projectP_label = Label(mainFrame, text="Project Path:")
javashotE_entry = Entry(mainFrame, bd = 3)
projectE_entry = Entry(mainFrame, bd=3)
javashotP_label.grid(row=0, sticky=E, padx=(50,0), pady=(50,0))
javashotE_entry.grid(row=0, column=1, pady=(50,0))
projectP_label.grid(row=1, sticky=E, padx=(50,0))
projectE_entry.grid(row=1, column=1)
genTrace_buttion = Button(mainFrame, text="Generate Call Trace")
genTrace_buttion.bind("<Button-1>", genCallTrace)
genTrace_buttion.grid(row=2, column=1)
# about frame
information ="Author: Chuntao Fu\n"\
"Supervisor: Dr. Harvey Siy\n\n"\
'I am a graduate student in Computer Science at University of Nebraska at Omaha.'\
'This tool is created for the support of my thesis "Tool Support in Capturing the Essense of Concern in Source Code".'\
'The tool allows you either works on dynamic program analysis with Javashot from scratch or perform the analysis with prepared files'\
'in the right format. It provides functionalities including structural analysis that based on dynamic call graph genertion and'\
'textual analysis that based on Information Retrieval techniques. It achieves the purpose of providing automatic support for'\
'minimized related code subset capturing from an given software concern.'
Label(aboutFrame, text=information, justify=LEFT, wraplength=450).pack()
# docummentation frame
doc = Label(docFrame, text="This is documentation frame").pack()
# covert file frame
def convertFile(event):
absoluteFileName = tkFileDialog.askopenfilename()
print(absoluteFileName)
commandLine = 'sed "s/\\\\$/_/g" | sed "s/->/;/g" | sed "s/\[/;/g" | sed "s/\]//g" | grep -v digraph | grep -v "^[}]$"'
fileNameTokens = absoluteFileName.split("/")
relFileName = fileNameTokens[len(fileNameTokens)-1]
outFileName = "converted_" + relFileName[relFileName.index('_')+1:relFileName.index('.')]
print(outFileName)
dir_path = "convert/"
if not os.path.isdir("./" + dir_path):
os.makedirs("convert/")
outFile = open(os.path.join(dir_path, outFileName + ".txt"), "w")
result = subprocess.call('sed "s/\./_/g" ' + absoluteFileName + " | " + commandLine, shell=True, stdout=outFile)
print(result)
outFile.close()
Label(fileConvertFrame, text="File Converstion").pack()
convertFileInfo = "Select a target file (in .dot format), convert it to the format of: (class1;class2;method)."
Label(fileConvertFrame, text=convertFileInfo, justify=LEFT, wraplength=450).pack()
convertFileChooser_label = Label(fileConvertFrame, text="Target File:", pady=10).pack()
convertFileChooser_button = Button(fileConvertFrame, text="Choose File")
convertFileChooser_button.bind("<Button-1>", convertFile)
convertFileChooser_button.pack()
# combine file frame
def combineFiles(event):
fileNames = tkFileDialog.askopenfilenames()
fileNameTokens = fileNames[0].split("/")
relFileName = fileNameTokens[len(fileNameTokens)-1]
outFileName = "combined_" + "combine" + str(len(fileNames))
print(outFileName)
combineCommand = []
combineCommand.append("cat")
fileNameList = list(fileNames)
for f in fileNameList:
combineCommand.append(f)
dir_path = "combine/"
if not os.path.isdir("./" + dir_path):
os.makedirs("combine/")
outFile = open(os.path.join(dir_path, outFileName + ".txt"), "w")
result = subprocess.call(combineCommand, stdout=outFile)
print(result)
outFile.close()
Label(fileCombineFrame, text="Concatenate Multiple Files ").pack()
combineFileInfo = "Select multiple files and combine them into a single file."
Label(fileCombineFrame, text=combineFileInfo, justify=LEFT, wraplength=450).pack()
combineFileChooser_label = Label(fileCombineFrame, text="Concatenate Files:", padx=20, pady=30).pack()
combineFileChooser_button = Button(fileCombineFrame, text="Choose Files")
combineFileChooser_button.bind("<Button-1>", combineFiles)
combineFileChooser_button.pack()
# call graph & dominator tree frame
def genDynamicCallGraph(event):
absoluteFileName = tkFileDialog.askopenfilename()
print(absoluteFileName)
fileNameTokens = absoluteFileName.split("/")
relFileName = fileNameTokens[len(fileNameTokens)-1]
outFileName = "tracer_" + relFileName[relFileName.index('_')+1:relFileName.index('.')] + ".dot"
tracerCommand = []
tracerCommand.append("python")
tracerCommand.append("./scripts/tracer.py")
tracerCommand.append(absoluteFileName)
outFile = open(outFileName, "w")
result = subprocess.call(tracerCommand, stdout=outFile)
outFile.close()
graphCommand = []
graphCommand.append("dot")
graphCommand.append("-Tpdf")
graphCommand.append("-O")
graphCommand.append(outFileName)
result = subprocess.call(graphCommand)
print(result)
subprocess.call("open " + outFileName + ".pdf", shell=True)
def genDomTree(event):
absoluteFileName = tkFileDialog.askopenfilename()
fileNameTokens = absoluteFileName.split("/")
relFileName = fileNameTokens[len(fileNameTokens)-1]
outFileName = "tracerDom_" + relFileName[relFileName.index('_')+1:relFileName.index('.')] + ".dot"
tracerDomCommand = []
tracerDomCommand.append("python")
tracerDomCommand.append("./scripts/tracerDom.py")
tracerDomCommand.append(absoluteFileName)
outFile = open(outFileName, "w")
result = subprocess.call(tracerDomCommand, stdout=outFile)
outFile.close()
graphCommand = []
graphCommand.append("dot")
graphCommand.append("-Tpdf")
graphCommand.append("-O")
graphCommand.append(outFileName)
result = subprocess.call(graphCommand)
print(result)
subprocess.call("open " + outFileName + ".pdf", shell=True)
Label(callGraphFrame, text="Call Graph Generation").pack()
genCallGraphInfo = "Select a graget file (in class1;class2;method format), generate a adjusted directed graph based on the input file. "
Label(callGraphFrame, text=genCallGraphInfo, justify=LEFT, wraplength=450).pack()
genFileChooser_label = Label(callGraphFrame, text="Target File:", pady=10).pack()
genFileChooser_button = Button(callGraphFrame, text="Gen Call Graph")
genFileChooser_button.bind("<Button-1>", genDynamicCallGraph)
genFileChooser_button.pack()
domTreeFileChooser_button = Button(callGraphFrame, text="Gen Dominator Tree")
domTreeFileChooser_button.bind("<Button-1>", genDomTree)
domTreeFileChooser_button.pack();
# Frequency analysis frame
# 1. combine all files in one execution senerio into one single file
# 2. calculate the frequency distrubution over mutilple execution scenarios
def calFrequency(event):
files = tkFileDialog.askopenfilenames()
fileList = list(files)
print(fileList)
freqCommand = []
freqCommand.append("python")
freqCommand.append("./scripts/frequency.py")
for f in fileList:
freqCommand.append(f)
outFile = open("frequency_output.txt", "w")
result = subprocess.call(freqCommand, stdout=outFile)
outFile.close()
# open a new window to view the frequency output
def viewFreqOutput(event):
top = Toplevel()
analysis = {}
f = open("frequency_output.txt")
for line in f:
record = {}
tokens = line.rstrip('\n').split(' ')
if tokens[0] not in analysis:
record["Label"] = tokens[0]
record["Frequency"] = tokens[1]
analysis[tokens[0]] = record
# print(analysis)
model = TableModel()
model.importDict(analysis)
table = TableCanvas(top, model=model)
table.createTableFrame()
top.mainloop()
# generate frequency colored graph based on the frequency analysis output for one execution scenario
def genFreqCallGraph(event):
absoluteFileName = tkFileDialog.askopenfilename()
print(absoluteFileName)
fileNameTokens = absoluteFileName.split("/")
relFileName = fileNameTokens[len(fileNameTokens)-1]
outFileName = "tracerFreq_" + relFileName[relFileName.index('_')+1:relFileName.index('.')] + ".dot"
tracerCommand = []
tracerCommand.append("python")
tracerCommand.append("./scripts/tracerFreq.py")
tracerCommand.append(absoluteFileName)
outFile = open(outFileName, "w")
result = subprocess.call(tracerCommand, stdout=outFile)
outFile.close()
graphCommand = []
graphCommand.append("dot")
graphCommand.append("-Tpdf")
graphCommand.append("-O")
graphCommand.append(outFileName)
result = subprocess.call(graphCommand)
print(result)
subprocess.call("open " + outFileName + ".pdf", shell=True)
def genFreqDomTree(event):
absoluteFileName = tkFileDialog.askopenfilename()
fileNameTokens = absoluteFileName.split("/")
relFileName = fileNameTokens[len(fileNameTokens)-1]
outFileName = "tracerDomFreq_" + relFileName[relFileName.index('_')+1:relFileName.index('.')] + ".dot"
tracerDomCommand = []
tracerDomCommand.append("python")
tracerDomCommand.append("./scripts/tracerDomFreq.py")
tracerDomCommand.append(absoluteFileName)
outFile = open(outFileName, "w")
result = subprocess.call(tracerDomCommand, stdout=outFile)
outFile.close()
graphCommand = []
graphCommand.append("dot")
graphCommand.append("-Tpdf")
graphCommand.append("-O")
graphCommand.append(outFileName)
result = subprocess.call(graphCommand)
print(result)
subprocess.call("open " + outFileName + ".pdf", shell=True)
Label(freqFrame, text="Frequency Analysis").pack()
genFreqInfo = "Select multiple files (in class1;class2;method format), generate a class frequency output based on the selected files."
Label(freqFrame, text = genFreqInfo, justify=LEFT, wraplength=450).pack()
subFrame = Frame(freqFrame)
subFrame.pack()
calFreq_label = Label(subFrame, text="Calculate Frequency:")
calFreq_button = Button(subFrame, text="Choose Files")
freqOutput_label = Label(subFrame, text="View Frequency Output:")
freqOutput_button = Button(subFrame, text="View Output")
freqGraph_label = Label(subFrame, text="Frequency Call Graph:")
freqGraph_button = Button(subFrame, text="Choose File")
freqDomTree_label = Label(subFrame, text="Frequency Dome Tree:")
freqDomTree_button = Button(subFrame, text="Choose File")
calFreq_label.grid(row=0, sticky=E, pady=(30, 0))
calFreq_button.bind("<Button-1>", calFrequency)
calFreq_button.grid(row=0, column=1, pady=(30, 0))
freqOutput_label.grid(row=1, sticky=E)
freqOutput_button.bind("<Button-1>", viewFreqOutput)
freqOutput_button.grid(row=1, column=1)
freqGraph_label.grid(row=2, sticky=E)
freqGraph_button.bind("<Button-1>", genFreqCallGraph)
freqGraph_button.grid(row=2, column=1)
freqDomTree_label.grid(row=3, sticky=E)
freqDomTree_button.bind("<Button-1>", genFreqDomTree)
freqDomTree_button.grid(row=3, column=1)
# LDA/LSI frame
def getAnalysisType():
global analysisType
print(str(var.get()))
if str(var.get()) == "1":
analysisType = "LDA"
ldaTopic_entry.config(state="normal")
ldaTopicWord_entry.config(state="normal")
ldaDecay_entry.config(state="normal")
ldaPasses_entry.config(state="normal")
ldaIteration_entry.config(state="normal")
lsiTopic_entry.delete(0, END)
lsiDecay_entry.delete(0, END)
lsiIteration_entry.delete(0, END)
lsiQuery_Entry.delete(0, END)
lsiTopic_entry.config(state="disabled")
lsiOnePass_checkbox.config(state="disabled")
lsiDecay_entry.config(state="disabled")
lsiIteration_entry.config(state="disabled")
lsiQuery_Entry.config(state="disabled")
else:
analysisType = "LSI"
ldaTopic_entry.delete(0, END)
ldaTopicWord_entry.delete(0, END)
ldaDecay_entry.delete(0, END)
ldaPasses_entry.delete(0, END)
ldaIteration_entry.delete(0, END)
ldaTopic_entry.config(state="disabled")
ldaTopicWord_entry.config(state="disabled")
ldaDecay_entry.config(state="disabled")
ldaPasses_entry.config(state="disabled")
ldaIteration_entry.config(state="disabled")
lsiTopic_entry.config(state="normal")
lsiOnePass_checkbox.config(state="normal")
lsiDecay_entry.config(state="normal")
lsiQuery_Entry.config(state="normal")
print(analysisType)
def getLSIPassStatus():
global lsiOnePass
if str(var1.get()) == "1":
lsiOnePass = "False"
lsiIteration_entry.config(state="normal")
else:
lsiOnePass = "True"
lsiIteration_entry.config(state="disabled")
def populateData(type):
top = Toplevel()
analysis = {}
if type == "LDA":
with open("./analysis/LDA_output.txt") as f:
next(f)
index = 0
for line in f:
tokens = line.rstrip('\n').split(':')
topicWords = tokens[1].split('+')
print(topicWords)
for w in topicWords:
record = {}
tw = w.split('*')
record["Topic ID"] = tokens[0]
record["Probability"] = tw[0]
word = tw[1].replace('"', '').replace('"', '')
record["Word"] = word
analysis[index] = record
index = index + 1
else:
with open("./analysis/LSI_output.txt") as f:
next(f)
index = 0
for line in f:
record = {}
tokens = line.rstrip('\n').split(':')
record["Document Name"] = tokens[0]
record["Document ID"] = tokens[1]
record["Probability"] = tokens[2]
analysis[index] = record
index = index + 1
model = TableModel()
model.importDict(analysis)
table = TableCanvas(top, model=model)
table.createTableFrame()
top.mainloop()
def irAnalysis(evnet):
absoluteFileName = tkFileDialog.askopenfilename()
print(absoluteFileName)
analysisCommand = []
analysisCommand.append("python3")
analysisCommand.append("./scripts/ir.py")
analysisCommand.append(absoluteFileName)
analysisCommand.append(analysisType)
topicNumber = "0"
if analysisType == "LDA":
topicNumber = ldaTopic_entry.get()
topicWords = ldaTopicWord_entry.get()
ldaDecay = "0.5"
ldaPass = "1"
ldaIteration = "50"
if len(ldaDecay_entry.get()) != 0:
ldaDecay = ldaDecay_entry.get()
if len(ldaPasses_entry.get()) != 0:
ldaPass = ldaPasses_entry.get()
if len(ldaIteration_entry.get()) != 0:
ldaIteration = ldaIteration_entry.get()
analysisCommand.append(topicNumber)
analysisCommand.append(topicWords)
analysisCommand.append(ldaDecay)
analysisCommand.append(ldaPass)
analysisCommand.append(ldaIteration)
else:
topicNumber = lsiTopic_entry.get()
lsiDecay = "1.0"
lsiIteration = "2"
if len(lsiDecay_entry.get()) != 0:
lsiDecay = lsiDecay_entry.get()
if lsiOnePass == "False":
if len(lsiIteration_entry.get()) != 0:
lsiIteration = lsiIteration_entry.get()
lsiQuery = lsiQuery_Entry.get()
analysisCommand.append(topicNumber)
analysisCommand.append(lsiOnePass)
analysisCommand.append(lsiDecay)
analysisCommand.append(lsiIteration)
analysisCommand.append(lsiQuery)
dir_path = "analysis/"
if not os.path.isdir("./" + dir_path):
os.makedirs("analysis/")
outFile = open(os.path.join(dir_path, analysisType + "_output" + ".txt"), "w")
result = subprocess.call(analysisCommand, stdout=outFile)
print(result)
populateData(analysisType)
outFile.close()
var = IntVar()
var1 = IntVar()
analysisType = "LDA"
lsiOnePass = "True"
irTypeFrame = Frame(irFrame)
irTypeFrame.pack()
Label(irTypeFrame, text="LDA / LSI Analysis").pack()
irType_label = Label(irTypeFrame, text="Type of analysis:")
irType_label.pack(side=LEFT)
irType_radio1 = Radiobutton(irTypeFrame, text="LDA", variable=var, value=1, command=getAnalysisType)
irType_radio1.pack(side=LEFT)
irType_radio2 = Radiobutton(irTypeFrame, text="LSI", variable=var, value=2, command=getAnalysisType)
irType_radio2.pack()
irSubframe = Frame(irFrame)
irSubframe.pack(side=BOTTOM)
ldaType_label = Label(irSubframe, text="For LDA:")
ldaTopic_label = Label(irSubframe, text="Topic Numbers:")
ldaTopic_entry = Entry(irSubframe, bd=2)
ldaTopicWord_label = Label(irSubframe, text="Topic Words:")
ldaTopicWord_entry = Entry(irSubframe, bd=2)
ldaDecay_label = Label(irSubframe, text="Decay(0-1)")
ldaDecay_entry = Entry(irSubframe, bd = 2)
ldaDecay_entry.insert(END, "0.5")
ldaDecay_entry.config(state="disabled")
ldaPasses_label = Label(irSubframe, text="Pass Numbers")
ldaPasses_entry = Entry(irSubframe, bd = 2)
ldaPasses_entry.insert(END, "1")
ldaPasses_entry.config(state="disabled")
ldaIteration_label = Label(irSubframe, text="Iteration Numbers")
ldaIteration_entry = Entry(irSubframe, bd = 2)
ldaIteration_entry.insert(END, "50")
ldaIteration_entry.config(state="disabled")
ldaType_label.grid(row=0, column=0)
ldaTopic_label.grid(row=1, sticky=E)
ldaTopic_entry.grid(row=1, column=1)
ldaTopicWord_label.grid(row=2, sticky=E)
ldaTopicWord_entry.grid(row=2, column=1)
ldaDecay_label.grid(row=3, sticky=E)
ldaDecay_entry.grid(row=3, column=1)
ldaPasses_label.grid(row=4, sticky=E)
ldaPasses_entry.grid(row=4, column=1)
ldaIteration_label.grid(row=5, sticky=E)
ldaIteration_entry.grid(row=5, column=1)
lsiType_label = Label(irSubframe, text="For LSI:")
lsiOnePass_checkbox = Checkbutton(irSubframe, text="Multi-Pass", variable=var1, onvalue=1, offvalue = 0, command=getLSIPassStatus)
lsiTopic_label = Label(irSubframe, text="Topic Numbers:")
lsiTopic_entry = Entry(irSubframe, bd=2)
lsiDecay_label = Label(irSubframe, text="Decay(0-1)")
lsiDecay_entry = Entry(irSubframe, bd = 2)
lsiDecay_entry.insert(END, "1.0")
lsiDecay_entry.config(state="disabled")
lsiIteration_label = Label(irSubframe, text="Power Iteration")
lsiIteration_entry = Entry(irSubframe, bd = 2)
lsiIteration_entry.insert(END, "2")
lsiIteration_entry.config(state="disabled")
lsiIteration_entry.config(state="disabled")
lsiQuery_label = Label(irSubframe, text="Search Query:")
lsiQuery_Entry = Entry(irSubframe, bd=2)
lsiType_label.grid(row=6, column=0)
lsiOnePass_checkbox.grid(row=6, column=1)
lsiTopic_label.grid(row=7, sticky=E)
lsiTopic_entry.grid(row=7, column=1)
lsiDecay_label.grid(row=8, sticky=E)
lsiDecay_entry.grid(row=8, column=1)
lsiIteration_label.grid(row=9, sticky=E)
lsiIteration_entry.grid(row=9, column=1)
lsiQuery_label.grid(row=10, sticky=E)
lsiQuery_Entry.grid(row=10, column=1)
irAnalysis_button =Button(irSubframe, text="Start Analysis")
irAnalysis_button.bind("<Button-1>", irAnalysis)
irAnalysis_button.grid(row=11, column=1)
raise_frame(mainFrame)
root.mainloop()
| [
"cfu@my.bellevue.edu"
] | cfu@my.bellevue.edu |
6aaadd38872c563c7e3b4fd9a31a6d2edfb79945 | 41b73ecc4fa00a58609c1c3b8e717bbbc13cdee6 | /test/test_all.py | d7bd3837fc94c5de55e932b9801ad5547ef409f3 | [] | no_license | ahwillia/sinkdiv | 70c2f689af43cf80dd8c3951199885f3792d9ac3 | 85bd51f369855b78e5c0e1d5bb2aa8928d85c428 | refs/heads/master | 2023-01-31T10:56:08.481608 | 2020-12-18T04:41:26 | 2020-12-18T04:41:26 | 298,928,192 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,364 | py | import pytest
import numpy as np
from numpy.testing import assert_allclose
from sinkdiv import OTCost, ForwardKL, Balanced
from scipy.optimize import approx_fprime
def test_entropy_increases(make_fig=False):
"""
Check that increasing epsilon increases blur in the
transport plan.
"""
epsilons = (0.01, 0.1, 1.0)
margdiv = ForwardKL(1.0)
x = np.linspace(-4, 4, 51)[:, None]
y = np.linspace(-4, 4, 50)[:, None]
a = np.squeeze(np.exp(-x ** 2))
b = np.squeeze(np.exp(-y ** 2))
a /= np.sum(a)
b /= np.sum(b)
# Fit transport plans.
plans = []
for eps in epsilons:
plans.append(
OTCost(margdiv, eps, 1e-6).fit(a, x, b, y).P_
)
# Test that the entropy of the optimal plan increases.
entropies = [np.sum(-P * np.log(P + 1e-10) - P + 1) for P in plans]
assert np.all(np.diff(entropies) > 0)
if make_fig:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 3, sharey=True, sharex=True)
for P, eps, ax in zip(plans, epsilons, axes):
ax.imshow(P, aspect="auto")
ax.set_title("eps = {}".format(eps))
fig.set_size_inches((4, 2))
fig.tight_layout()
plt.show()
# @pytest.mark.parametrize('eps', [0.01, 0.1, 1.0])
# @pytest.mark.parametrize('tol', [1e-6])
# def test_balanced_duality_gap(eps, tol):
# """
# Check agreement between primal and dual objectives,
# balanced transport case.
# """
# np.random.seed(1234)
# margdiv = Balanced()
# x = np.linspace(-4, 4, 51)[:, None]
# y = np.linspace(-4, 4, 50)[:, None]
# a = np.squeeze(np.exp(-x ** 2))
# b = np.squeeze(np.exp(-y ** 2))
# a /= a.sum()
# b /= b.sum()
# ot = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# assert_allclose(ot.primal_obj_, ot.dual_obj_, atol=1e-3)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('eps', [1.0])
@pytest.mark.parametrize('lam', [1000]) # <-- !! currently works for large lam, but not small !!
@pytest.mark.parametrize('b_mass', [1.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_reference_implementation(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
from ot.unbalanced import sinkhorn_stabilized_unbalanced
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Fit OTCost, get transport plan
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Fit with reference library.
transport_plan = sinkhorn_stabilized_unbalanced(
a, b, otcost.C_, eps, lam, numItermax=10000
)
# Assert optimal transport plans match.
assert_allclose(otcost.P_, transport_plan, atol=1e-5, rtol=1e-2)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('tol', [1e-6])
@pytest.mark.parametrize('eps', [1e-6])
def test_zero_cost(seed, eps, tol):
"""
Assert cost is zero if epsilon and lambda penalties are both very small.
In this case, an optimal transport plan could just be the zeros matrix.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (1.0 / b.sum())
# Fit model with very small marginal penalty
margdiv = ForwardKL(1e-6)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Assert cost is essentially zero.
assert_allclose(otcost.primal_obj_, 0.0, atol=1e-5)
assert_allclose(otcost.dual_obj_, 0.0, atol=1e-5)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('eps', [0.1, 1.0, 10])
@pytest.mark.parametrize('lam', [0.1, 1.0, 10])
@pytest.mark.parametrize('b_mass', [0.5, 1.0, 2.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_unbalanced_kl_duality_gap(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Calculate OT cost.
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Duality gap should be small.
assert_allclose(otcost.primal_obj_, otcost.dual_obj_, atol=1e-4)
@pytest.mark.parametrize('seed', [123, 1234])
@pytest.mark.parametrize('eps', [0.1, 1.0, 10])
@pytest.mark.parametrize('lam', [0.1, 1.0, 10])
@pytest.mark.parametrize('b_mass', [0.5, 1.0, 2.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_ot_kl_gradients(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Calculate OT cost.
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol)
# Fit OT cost, compute gradients for a and b.
otcost.fit(a, x, b, y)
grad_a = otcost.grad_a_.copy()
grad_b = otcost.grad_b_.copy()
# Compute gradient of a by finite differencing.
def f(a_):
otcost.fit(a_, x, b, y)
return otcost.primal_obj_
approx_grad_a = approx_fprime(a, f, np.sqrt(np.finfo(float).eps))
# Check gradients approximately match finite differencing.
assert_allclose(grad_a, approx_grad_a, atol=1e-4, rtol=1e-3)
# Function to compute otcost given mass vector b.
def g(b_):
otcost.fit(a, x, b_, y)
return otcost.primal_obj_
approx_grad_b = approx_fprime(b, g, np.sqrt(np.finfo(float).eps))
# Check gradients approximately match finite differencing.
assert_allclose(grad_b, approx_grad_b, atol=1e-4, rtol=1e-3)
| [
"alex.h.willia@gmail.com"
] | alex.h.willia@gmail.com |
6aca78d446a771d1bdc8bb31bbbc2bb778bacfba | 206c10808b6224f7d8236e27cc555e723af695d9 | /tests/test_empty_service.py | 8ab14bce925b0271890c48c84c359ad361d40e51 | [
"MIT"
] | permissive | xdmiodz/tomodachi | 3280209ae49100ec902e3b15c323b38e7480cdd3 | 7ca998a421dd724df5967d5baa0cf79f5112b79b | refs/heads/master | 2023-03-15T19:22:16.381212 | 2023-01-20T07:34:48 | 2023-01-20T07:34:48 | 200,020,833 | 0 | 2 | MIT | 2023-03-08T00:00:01 | 2019-08-01T09:30:22 | Python | UTF-8 | Python | false | false | 674 | py | from typing import Any
from run_test_service_helper import start_service
def test_empty_service(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/empty_service.py", monkeypatch)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert "No transports defined in service file" in err
def test_non_decorated_service(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/non_decorated_service.py", monkeypatch)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert "No transports defined in service file" in err
| [
"hello@carloscar.com"
] | hello@carloscar.com |
cfb9ff1a1089622084ea929a8ceebf87da9d0687 | 45799ccc3a16c785ab3c65f3296d66f8463590dc | /docs/_downloads/b9951f29cd54bc08237c8fb75b9c2476/q1314.py | b487939c8e11b9a0513ff9639257664f5e82d07a | [
"MIT"
] | permissive | odys-z/hello | 9d29b7af68ea8c490b43994cf16d75c0e8ace08e | fedd0aec7273f3170aa77316d0d5f317cc18a979 | refs/heads/master | 2023-08-19T03:25:58.684050 | 2023-08-18T08:07:27 | 2023-08-18T08:07:27 | 154,006,292 | 0 | 0 | MIT | 2023-04-18T22:50:56 | 2018-10-21T12:34:12 | C++ | UTF-8 | Python | false | false | 2,347 | py | '''
1314. Matrix Block Sum
https://leetcode.com/problems/matrix-block-sum/
Given a m * n matrix mat and an integer K, return a matrix answer where each answer[i][j] is
the sum of all elements mat[r][c] for i - K <= r <= i + K, j - K <= c <= j + K, and (r, c)
is a valid position in the matrix.
Example 1:
Input: mat = [[1,2,3],[4,5,6],[7,8,9]], K = 1
Output: [[12,21,16],[27,45,33],[24,39,28]]
Example 2:
Input: mat = [[1,2,3],[4,5,6],[7,8,9]], K = 2
Output: [[45,45,45],[45,45,45],[45,45,45]]
Constraints:
m == mat.length
n == mat[i].length
1 <= m, n, K <= 100
1 <= mat[i][j] <= 100
Hint 1:
How to calculate the required sum for a cell (i,j) fast ?
Hint 2:
Use the concept of cumulative sum array.
Hint 3:
Create a cumulative sum matrix where dp[i][j] is the sum of all cells in the rectangle
from (0,0) to (i,j), use inclusion-exclusion idea.
'''
from unittest import TestCase
from typing import List
class Solution:
'''
70.85%
'''
def matrixBlockSum(self, mat: List[List[int]], K: int) -> List[List[int]]:
# dp
m, n = len(mat), len(mat[0])
dp = [[0] * (n+K) for _ in range(m+K)]
for r in range(m):
dp[r][0] = mat[r][0]
for c in range(1, n+K):
if c < n:
dp[r][c] = mat[r][c] + dp[r][c-1]
else:
dp[r][c] = dp[r][c-1]
for c in range(n+K):
for r in range(1, m+K):
if r < m:
dp[r][c] += dp[r-1][c]
else:
dp[r][c] = dp[r-1][c]
for r in range(m):
for c in range(n):
mat[r][c] = dp[r+K][c+K]
if 0 <= r - K - 1:
mat[r][c] -= dp[r-K-1][c+K]
if 0 <= c - K - 1:
mat[r][c] -= dp[r+K][c-K-1]
if 0 <= r - K - 1 and 0 <= c - K - 1:
mat[r][c] += dp[r-K-1][c-K-1]
return mat
if __name__ == '__main__':
t = TestCase()
s = Solution()
t.assertCountEqual([[12,21,16],[27,45,33],[24,39,28]],
s.matrixBlockSum([[1,2,3],[4,5,6],[7,8,9]], 1))
t.assertCountEqual([[45,45,45],[45,45,45],[45,45,45]],
s.matrixBlockSum([[1,2,3],[4,5,6],[7,8,9]], 2))
print("OK!") | [
"odysseusj@163.com"
] | odysseusj@163.com |
bf7761e23bae9a63ca81061aa2aafaf3aff5226e | 48a960fcba752cdd54e8b9bcca26dc9620d8fee3 | /bookmarks/account/views.py | 3dd39a12b1aacaba6737d72ee305ba54976180d0 | [] | no_license | spacemonkeythe/django_social_website | 411172d602b3f0c82e294eb8f5d2001a4edb1016 | c7114a5f4e95e2a4b72a4b1a6055e961b4545b29 | refs/heads/master | 2021-01-19T23:47:05.238568 | 2017-05-14T19:18:53 | 2017-05-14T19:18:53 | 89,026,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,678 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
from django.contrib.auth.decorators import login_required
from .models import Profile
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from common.decorators import ajax_required
from .models import Contact
from actions.utils import create_action
from actions.models import Action
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(username=cd['username'], password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'account/login.html', {'form': form})
@login_required
def dashboard(request):
actions = Action.objects.exclude(user=request.user)
follownig_ids = request.user.following.values_list('id', flat=True)
if follownig_ids:
actions = actions.filter(user_id__in=follownig_ids)\
.select_related('user', 'user__profile')\
.prefetch_related('target')
actions = actions[:10]
return render(request,
'account/dashboard.html',
{'section': 'dashboard',
'actions': actions})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
new_user = user_form.save(commit=False)
new_user.set_password(user_form.cleaned_data['password'])
new_user.save()
profile = Profile.objects.create(user=new_user)
create_action(request.user, 'has created account')
return render(request, 'account/register_done.html', {'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request, 'account/register.html', {'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user, data=request.POST)
profile_form = ProfileEditForm(instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile updated successfully!')
else:
messages.error(request, 'Error updating your profile!')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
return render(request, 'account/edit.html', {'user_form': user_form,
'profile_form': profile_form})
@login_required
def user_list(request):
users = User.objects.filter(is_active=True)
return render(request,
'account/user/list.html',
{'section': 'people',
'users': users})
@login_required
def user_detail(request, username):
user = get_object_or_404(User,
username=username,
is_active=True)
return render(request,
'account/user/detail.html',
{'section': 'people',
'user': user})
@ajax_required
@require_POST
@login_required
def user_follow(request):
user_id = request.POST.get('id')
action = request.POST.get('action')
if user_id and action:
try:
user = User.objects.get(id=user_id)
if action == "follow":
Contact.objects.get_or_create(user_from=request.user,
user_to=user)
create_action(request.user, 'is following', user)
else:
Contact.objects.filter(user_from=request.user, user_to=user).delete()
return JsonResponse({'status': 'ok'})
except:
return JsonResponse({'status': 'ko'})
return JsonResponse({'status': 'ok'})
| [
"mladen.meseldzija@gmail.com"
] | mladen.meseldzija@gmail.com |
17fe19b4e80f15be0aa96d6afc0197167630396f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Yfksxs7kyJf6B3yvK_21.py | 3d96e93dc0ddaedcb2d4e9ec9ecf8a4618a5d7cd | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | """
Given a list of integers, return the smallest _positive_ integer _not present
in the list_.
Here is a representative example. Consider the list:
[-2, 6, 4, 5, 7, -1, 7, 1, 3, 6, 6, -2, 9, 10, 2, 2]
After reordering, the list becomes:
[-2, -2, -1, 1, 2, 2, 3, 4, 5, 6, 6, 6, 7, 7, 9, 10]
... from which we see that the smallest missing positive integer is `8`.
### Examples
min_miss_pos([-2, 6, 4, 5, 7, -1, 1, 3, 6, -2, 9, 10, 2, 2]) โ 8
# After sorting, list becomes [-2, -2, -1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 9, 10]
# So the smallest missing positive integer is 8
min_miss_pos([5, 9, -2, 0, 1, 3, 9, 3, 8, 9]) โ 2
# After sorting, list becomes [-2, 0, 1, 3, 3, 5, 8, 9, 9, 9]
# So the smallest missing positive integer is 2
min_miss_pos([0, 4, 4, -1, 9, 4, 5, 2, 10, 7, 6, 3, 10, 9]) โ 1
# After sorting, list becomes [-1, 0, 2, 3, 4, 4, 4, 5, 6, 7, 9, 9, 10, 10]
# So the smallest missing positive integer is 1
### Notes
For the sake of clarity, recall that `0` is not considered to be a positive
number.
"""
def min_miss_pos(lst):
for i in range(1, 2<<64): # huge range instead of "while" or itertools.count
if i not in lst:
return i
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
292d693d4951c885d76bf1f7ac7bf0a082ef6351 | 0967216649e468a9031493efccb54a3ca91e0605 | /gui.py | bae3f271b800ccaa12c62af2617dfe0e5ec487d9 | [] | no_license | Cegard/triqui | d6b1256176007326a914176cf4984a71eae6c5e9 | a6347a4a1a7b4a314f84c2eaa8f765a4c0f8bf53 | refs/heads/master | 2021-07-07T09:38:46.722689 | 2017-10-02T07:25:12 | 2017-10-02T07:25:12 | 105,507,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,400 | py | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from triqui import *
class Cell(Gtk.ToggleButton):
def __init__(self, position):
Gtk.Label.__init__(self)
self.position = position
class Dialog(Gtk.Dialog):
def __init__(self, parent, message):
Gtk.Dialog.__init__(self, "Fin del juego", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150, 100)
label = Gtk.Label(message)
box = self.get_content_area()
box.add(label)
self.show_all()
class Game(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title = "Triqui")
self.board = [' '] * 10
self.player_char = ""
self.agent_char = ""
self.win_message, self.lose_message, self.draw_message = \
"Ganaste! Volver a jugar?", \
"Perdiste! Volver a jugar?", \
"Empate! Volver a jugar?"
self.main_box = Gtk.Box(orientation = Gtk.Orientation.VERTICAL)
self.header_box = Gtk.Box()
self.add(self.main_box)
self.main_box.pack_start(self.header_box, True, True, 0)
self.button_x = Gtk.ToggleButton(label = "X")
self.button_x.connect("clicked", self.__on_button_x_clicked)
self.header_box.pack_start(self.button_x, True, True, 0)
self.button_o = Gtk.ToggleButton(label = "O")
self.button_o.connect("clicked", self.__on_button_o_clicked)
self.header_box.pack_start(self.button_o, True, True, 0)
def __disable_all(self):
for position in self.cells:
self.cells[position].set_sensitive(False)
def __clean(self):
self.main_box.remove(self.table)
self.board = [' '] * 10
def __show_message(self, message):
dialog = Dialog(self, message)
response = dialog.run()
if response == Gtk.ResponseType.OK:
self.__clean()
if self.player_char == "X":
self.button_x.set_active(False)
else:
self.button_o.set_active(False)
self.__clean()
self.button_o.set_sensitive(True)
self.button_x.set_sensitive(True)
else:
self.close()
dialog.destroy()
def __end_game(self, message):
self.__disable_all()
self.__show_message(message)
def __make_move(self):
position = getComputerMove(self.board, self.agent_char)
self.cells[position].set_label(self.agent_char)
self.cells[position].set_sensitive(False)
self.board[position] = self.agent_char
if isWinner(self.board, self.agent_char):
self.__end_game(self.lose_message)
elif isBoardFull(self.board):
self.__end_game(self.draw_message)
def __on_cell_clicked(self, widget):
widget.set_label(self.player_char)
widget.set_sensitive(False)
self.board[widget.position] = self.player_char
if isWinner(self.board, self.player_char):
self.__end_game(self.win_message)
elif not isBoardFull(self.board):
self.__make_move()
else:
self.__end_game(self.draw_message)
def __deactivate_buttons(self):
self.button_x.set_sensitive(False)
self.button_o.set_sensitive(False)
def __assign_chars(self, player, agent):
self.player_char = player
self.agent_char = agent
def __transform_index(self, position, factor, length):
first_term = length*factor - 1
new_position = first_term - position + 1
return new_position
def __add_table(self):
self.table = Gtk.Grid(column_homogeneous = True, column_spacing = 0,
row_spacing = 50)
self.main_box.pack_start(self.table, True, True, 0)
self.table.show()
self.cells = {}
length = 3
width, height = 1, 2
max_index = length**2 - 1
factor = 7
for row in range(length):
factor -= 2
for col in range(length):
raw_position = max_index - (length*row + col)
position = self.__transform_index(raw_position, factor, length)
cell = Cell(position)
cell.connect("toggled", self.__on_cell_clicked)
cell.show()
self.cells[position] = cell
self.table.attach(cell, col, row, width, height)
def __prepare_game(self, human, agent):
self.__assign_chars(human, agent)
self.__deactivate_buttons()
self.__add_table()
if random.randint(0, 1) == 0:
self.__make_move()
def __on_button_x_clicked(self, widget):
chars = "XO"
self.__prepare_game(*chars)
def __on_button_o_clicked(self, widget):
chars = "OX"
self.__prepare_game(*chars)
win = Game()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() | [
"cegard689@gmail.com"
] | cegard689@gmail.com |
3dca2cc757f66f60392a249939a7b606c5bcbaee | ec75e1748d2b2a10dc34cbc5c044132bb7f63bca | /getResolution.py | 5742d0add2d7b37945d9d0fcd33b7491b4758470 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fstab50/conky-system-monitor | 0d9856f1c84c4a2af6cd80130c7ba5163851f6fc | 7195318926e3b4e319517f96dbb691029f8225a0 | refs/heads/master | 2021-10-25T18:52:41.393184 | 2021-10-22T21:16:10 | 2021-10-22T21:16:10 | 211,334,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #!/usr/bin/env python3
# modified version of getMonitorInfo.py
# (last line changed "current" monitor info
# change to enable start_conky_laptop.sh capabilities
import tkinter as tk
root = tk.Tk()
cols = root.winfo_screenwidth()
rows = root.winfo_screenheight()
# the screen contains all monitors
print("screen size: %d x %d" % (rows, cols))
# current monitor
print("%d x %d (current)" % (cols, rows))
| [
"blakeca00@gmail.com"
] | blakeca00@gmail.com |
7515a1a3a160b48f3077495d86f0d62cfd276aee | b0edf66e8360667f09c5a24a92ddcdeef0f94e99 | /AgeEstimationServer/src/AgeEstimationServer/views.py | 99f42f41a4c481ef7a5990e4ccd6d6da91dc88d8 | [] | no_license | LeslRo/AgeEstimation | 4b7cf374511a0f75ae6e657186eaf68ecc21ae85 | 9045d9cc87704cd8804ca3683970a5a79965b16f | refs/heads/master | 2020-04-12T18:38:56.489503 | 2018-12-21T07:42:01 | 2018-12-21T07:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # -*- coding:utf-8 -*-
from django.http import HttpResponse
from . import services
import json
def hello(request):
"""
test
"""
return HttpResponse("Hello Age Gender Estimation!")
def detect(request):
response_dict = {'successful': False} # handle illegal cases
if request.method == "POST":
params = json.loads(request.body)
if 'photo_path' not in params:
response_dict['message'] = 'No photo path input'
response_dict['code'] = 1
elif 'config' not in params:
photo_path = params['photo_path']
response_dict = services.detect_gender_age(photo_path)
else:
photo_path = params['photo_path']
config = params['config']
response_dict = services.detect_gender_age(photo_path, config)
else:
response_dict['message'] = 'Request method is invalid'
return HttpResponse(json.dumps(response_dict, ensure_ascii=False), content_type="application/json") | [
"hu_minghao@outlook.com"
] | hu_minghao@outlook.com |
ce23796651ea87049745a818cb08caafa35cc580 | 9eef3e4cf39a659268694cf08a4a799af8fb13e2 | /packages/dpdprops/dpdprops/__init__.py | c42c51871769928dd028add49df137aafa25b487 | [] | no_license | cselab/tRBC-UQ | c30ec370939b949c989d2e9cd30137073b53e7d2 | cd7711b76c76e86bc6382914111f4fa42aa78f2c | refs/heads/master | 2023-04-18T03:06:49.175259 | 2022-10-25T15:45:07 | 2022-10-25T15:45:07 | 483,407,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from .fluid import *
from .dpdparams import (DPDParams,
create_dpd_params_from_str,
create_dpd_params_from_Re_Ma,
create_dpd_params_from_props)
from .membrane import *
from .membraneparams import (MembraneParams,
KantorParams,
JuelicherParams,
WLCParams,
LimParams,
DefaultRBCParams,
KantorWLCRBCDefaultParams,
JuelicherLimRBCDefaultParams)
from .membraneforces import (extract_dihedrals,
compute_kantor_energy,
compute_juelicher_energy)
from .fsi import (get_gamma_fsi_DPD_membrane,
create_fsi_dpd_params)
from .rbcmesh import (load_stress_free_mesh,
load_equilibrium_mesh)
| [
"lucas.amoudruz@wanadoo.fr"
] | lucas.amoudruz@wanadoo.fr |
e27baac6ac5bffe812f3193a959cab81a5c8aff7 | fb71a6543a7c354fb1c60e4e8a99e195316fda5b | /SimpleLevel.py | 3eda15373d49505b9059a51a5f5da6d17b0ad9dd | [
"MIT"
] | permissive | tawnkramer/Adventure | dc9343513b3da9bba7b7b22b3c95bd84e58218e5 | ded25d7b49094c8ca4a79fc0aa18fca3afa394f2 | refs/heads/master | 2022-12-22T11:56:49.006454 | 2022-12-16T18:42:28 | 2022-12-16T18:42:28 | 89,750,955 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,516 | py | from adventure import *
from shop import *
from util import *
#######################################################
# Here is a simple starter level to start learning with.
# This creates three rooms in a cave and populates with
# some items and gets it ready for an adventure.
# Read the descriptions and you can guess how it might
# work. But copy this and use it to start your own!
# Good luck!!!
#######################################################
#You can make things to discover like this. This makes an Item. The first argument is the name. Keep it short. The second is a description. The third is the location where it's found. Optionally, you can specify how valuable it is. This makes it worth 10 gold pieces.
treasure = Item('bag of gems', 'countless gems and jewels', 'scattered around the room.', GP(10))
#Sometimes it's fun to make items that don't have much use, but make us laugh!
fries = Item('bag of fries', 'old, old, moldy, disgusting but somewhat tempting french fries', 'on the wet, soggy ground behind some rocks.')
#You can make monsters like this. You can also use the master list of pre-created monsters with the function Mon which takes a monster name as an argument.
#This Dragon has two attacks. The first part of the attack is what it looks like when it's used. The second is the max damage.
dragon = Monster('Dragon', "On top of the coins sits the largest red dragon you've ever seen!", 60, [ MonsterAttack('clawed', 7), MonsterAttack('spewed flames', 9) ], 300 )
#sometimes it's fun to connect rooms with a door and then hide the key. A key is a special item because of it's name.
#Keep it 'key' until you know how to match it with doors specifically.
key = Item('key', 'a large silver dragon key', 'hidden in the pot under lots of spider webs.')
door = Door("a strong door with a silver dragon crest around the key hole.")
#Here's how to make some rooms. These can be indoor or outdoor spaces. Use your imagination to create a cool environment.
#The first argument is the name.
#The second, optional argument, is the description you see only when you first enter a room.
#The third argument is what you see every time after the first entry.
#The fourth argument finishes the sentence "[Direction] you see" in order to give the party some indication of what can be seen from the current space.
#The fifth optional argument is the list of items you will find if you search.
#The sixth argument is the list of monsters in the room.
cave = Room( "Cave", None, "This large cave has the smell of wet dirt and old grease." ,
"a glimmer of light shines through the distant cave entrance.", [fries] )
small_tunnel = Room("Small Tunnel", None, "Down the small tunnel you see a small golden pot. The tunnel stops here.", "a small tunnel.", [key])
dragon_room = Room("Dragon's Lair", None, "There is a huge pile of coins in the center of a large cave.", "a pile of coins.", [treasure], [dragon])
#If you want to make a fun ascii banner, like the Ragged Keep, check out
#http://patorjk.com/software/taag/#p=display&f=Graffiti&t=Type%20Something%20
#then pass it as the optional second argument to Level
#You need to make one level
level1 = Level ("The Cave of Fear")
#connect your rooms together like this. The door is optional.
cave.connect("East", dragon_room, door)
cave.connect("West", small_tunnel)
#and start the level like this, passing the level an the first room to begin.
run_adventure(level1, cave)
| [
"tawnkramer@gmail.com"
] | tawnkramer@gmail.com |
c454aa0dcc9b5fa6321a30c46ce20f68da64dc02 | 390b3ea1c01b693e71f7ac0bf8e9d626b084b09d | /luminous/migrations/0001_initial.py | 12a40aa23a0590bfbde41eecdd18a9a920894147 | [
"MIT"
] | permissive | rohithpr/luminous-backend | f409a58fc9d143fbfa0b52444f0fe38adb863d11 | 7aa45989993fcad8aa6d786dc3828691dbb96540 | refs/heads/master | 2020-07-29T05:24:46.676578 | 2016-08-28T10:33:37 | 2016-08-28T10:33:37 | 66,752,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-28 06:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('left_top', models.CharField(max_length=20)),
('resources', models.CharField(max_length=2000)),
],
),
migrations.CreateModel(
name='LocationUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='luminous.Location')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resources', models.CharField(max_length=2000)),
],
),
migrations.AddField(
model_name='locationuser',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='luminous.User'),
),
]
| [
"praroh2@gmail.com"
] | praroh2@gmail.com |
011d6e04d23d555a74207ac5b3c6166e38926413 | 04f614f534471cb4a2ed72a875b3601cba87590a | /final_project/views.py | 103fa32b842c56839f22040e7a6995d4eabf0232 | [] | no_license | harrywoo/Squarrel-Tracker | 35636bfd100309cc4067f1fe5097d1b421b6a78d | 143043978e2089dec0273e242ed438a80a55ef37 | refs/heads/master | 2022-12-01T19:23:15.583090 | 2019-12-10T02:44:35 | 2019-12-10T02:44:35 | 242,660,388 | 1 | 1 | null | 2022-11-22T04:39:45 | 2020-02-24T06:12:09 | JavaScript | UTF-8 | Python | false | false | 93 | py | from django.shortcuts import render
# Create your views here.
def home(request):
return | [
"harryhz@yahoo.com"
] | harryhz@yahoo.com |
e9fb301ce413574e49d9b5dab04e7840eb52ae8b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnetonian.py | 43b62a44c0b174541904a814c37cf4917415a758 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 62 | py | ii = [('ClarGE3.py', 2), ('DibdTRL.py', 1), ('DibdTBR.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
ceed057825798d46c509ddab61ac189ced30ad29 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-oms/setup.py | b81b7515b7d134fa9438170ce81a39929b9463d6 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # coding: utf-8
from os import path
from setuptools import setup, find_packages
NAME = "huaweicloudsdkoms"
VERSION = "3.0.52"
AUTHOR = "HuaweiCloud SDK"
AUTHOR_EMAIL = "hwcloudsdk@huawei.com"
URL = "https://github.com/huaweicloud/huaweicloud-sdk-python-v3"
DESCRIPTION = "OMS"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README_PYPI.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
REQUIRES = ["huaweicloudsdkcore"]
OPTIONS = {
'bdist_wheel': {
'universal': True
}
}
setup(
name=NAME,
version=VERSION,
options=OPTIONS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache LICENSE 2.0",
url=URL,
keywords=["huaweicloud", "sdk", "OMS"],
packages=find_packages(exclude=["tests*"]),
install_requires=REQUIRES,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*",
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development'
]
)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
d5ddd74869a157b83c40a72dcab563c596578394 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /T083_ๆฑ็จ0โ7ๆ่ฝ็ปๆ็ๅฅๆฐไธชๆฐ.py | 0fb0007220933911a99ceca79ed911aaae9783bb | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # ๅจ่ฟ้ๅไธไฝ ็ไปฃ็ :-)
'''
้ข็ฎ083๏ผๆฑ0โ7ๆ่ฝ็ปๆ็ๅฅๆฐไธชๆฐใ
'''
def tm083():
'''
ใไธชไบบๅคๆณจใ๏ผๆฒก่ฏด็ปๆๅ ไฝๆฐๆๆฏๅฆ้ๅคไฝฟ็จใๅ่ฎพ1-8ไฝ้ฝๅฏไปฅ๏ผไธไธ่ฝ้ๅคไฝฟ็จใ
็ดๆฅ็จๆๅๅฝๆฐ๏ผ็ดฏๅ ็ถๅๅป้๏ผๅฐฑๅพๅฐ็ญๆกไบใ
'''
s = [i for i in '01234567']
import itertools #ๆๆๅไธ็ปๅๅฝๆฐ
arr = []
for i in range(1,9):
a = list(itertools.permutations(s,i)) # ้ฟๅบฆ1-8ๅทฆๅณๆๅ
l = list(map(lambda x:int(''.join(x)),a)) # ๆด็ๆๆฐๅญๅฝขๅผ๏ผ้ฟๅ
ๅบ็ฐ02่ฟ็งๆ
ๅต๏ผ02ๅฎ้
ไธๅฐฑๆฏ2๏ผ
arr+=l
print(i,len(l))
arr1 = set(arr) # ๅป้ๅค็
arr2 = list(filter(lambda x:x%2==1,arr1)) # ๅช็ๅฅๆฐ
print(len(arr),len(arr1),len(arr2)) # ็ญๆกๆฏ46972
tm083()
| [
"noreply@github.com"
] | xiang-daode.noreply@github.com |
5c124166166fe504cf5105f18a3316336ea82e20 | f282fa7a82cc357875acfaa81c37b42890d52972 | /CMPT825NLP/CMPT825-1141-wkrayenh/hw3/answer/smoothing_old.py | e48b5a884a24d02423cda4c477648d76f8412a2d | [] | no_license | wbkdef/NLP | 1d065b60c31d5d6da24b71b8f82f3b614e89341d | 4482525af82128b1be1b246c38cf891a313a6f09 | refs/heads/master | 2020-03-30T11:29:36.110967 | 2014-04-10T07:16:30 | 2014-04-10T07:16:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,506 | py | from __future__ import division
from nltk.corpus import brown
from nltk.probability import *
from itertools import islice
from math import pow, exp
from numpy import log2
_NINF = float('-1e300') # log probability value for log2(0.0) = Inf
def crossEntropy(Wt, Pt):
return -(Pt/Wt)
def perplexity(H):
try: val = pow(2,H)
except OverflowError: return 'Inf'
return "%lf" % (val)
def logsum(values):
sum = 0.0
for i in values:
if i == _NINF: return _NINF
sum += i
if sum < _NINF: return _NINF
return sum
def do_train(tagged_sents):
Wt = 0.0
bigramFreq = ConditionalFreqDist()
p = [(None, None)] # empty token/tag pair
for sent in tagged_sents:
Wt += len(sent)
bigrams = zip(p+sent, sent+p)
for (a,b) in bigrams:
history = a[1]
current_tag = b[1]
bigramFreq[history].inc(current_tag)
return bigramFreq
def compute_perplexity(bigramProb, tagged_sents):
Wt = 0
Pt = []
p = [(None, None)] # empty token/tag pair
for sent in tagged_sents:
bigrams = zip(p+sent, sent+p)
for (a,b) in bigrams:
Wt += 1
history = a[1]
current_tag = b[1]
logprob = None
if bigramProb[history].prob(current_tag) > 0.0:
logprob = log2(bigramProb[history].prob(current_tag))
else:
logprob = _NINF
Pt.append(logprob)
H = crossEntropy(Wt, logsum(Pt))
print >>sys.stderr, "Wt =", Wt, "Pt =", logsum(Pt), "cross entropy =", H, "perplexity =", perplexity(H)
return perplexity(H)
def usage(args):
if len(args) > 1:
print >>sys.stderr, "unknown args", args[1:]
print >>sys.stderr, "usage: %s -h -i trainsection -o testsection -m method" % (args[0])
print >>sys.stderr, """
-h help
-i training section ,e.g. 'news' or 'editorial'
-o test section ,e.g. 'news' or 'editorial'
-m method, e.g. 'no_smoothing', 'interpolation', 'add_one'
-l lambda_vector, e.g. "0.5:0.3:0.2" for values of \lambda_1, \lambda_2 and \lambda_3.
It must have 3 elements and sum to 1.0 (only used for interpolation)
Do not type in the single quotes at the command line.
"""
sys.exit(2)
if __name__ == '__main__':
import sys
import getopt
try:
(trainsection, testsection, method, lambda_vector) = ('news', 'editorial', 'default', [0.5,0.3,0.2])
opts, args = getopt.getopt(sys.argv[1:], "hi:o:m:l:", ["help", "train=", "test=", "method=", "lambda_vector="])
except getopt.GetoptError:
usage(sys.argv)
for o, a in opts:
if o in ('-h', '--help'): usage([sys.argv[0]])
if o in ('-i', '--train'): trainsection = a
if o in ('-o', '--test'): testsection = a
if o in ('-m', '--method'): method = a
if o in ('-l', '--lambda'): lambda_vector = map(float,a.split(':'))
if len(lambda_vector) < 3:
print >>sys.stderr, "error: lambda vector should have three elements"
sys.exit(2)
if sum(lambda_vector) != 1.0:
print >>sys.stderr, "error: lambda vector should sum to one"
sys.exit(2)
train = brown.tagged_sents(categories=trainsection)
test = islice(brown.tagged_sents(categories=testsection), 300)
#test = brown.tagged_sents(categories=testsection)
bigramFreq = do_train(train)
# use the maximum likelihood estimate MLEProbDist to create
# a probability distribution from the observed frequencies
bigram = ConditionalProbDist(bigramFreq, MLEProbDist)
if method == 'no_smoothing':
print "%s:%s:%s" % (method, 'train', compute_perplexity(bigram, train))
print "%s:%s:%s" % (method, 'test', compute_perplexity(bigram, test))
elif method == 'interpolation':
print "%s:%s:%s" % (method, 'train', compute_perplexity(bigram, train))
print "%s:%s:%s" % (method, 'test', compute_perplexity(bigram, test))
elif method == 'add_one':
bigram = ConditionalProbDist(bigramFreq, LaplaceProbDist)
print "%s:%s:%s" % (method, 'train', compute_perplexity(bigram, train))
print "%s:%s:%s" % (method, 'test', compute_perplexity(bigram, test))
elif method == 'interpolation_add_one':
print "%s:%s:%s" % (method, 'train', compute_perplexity(bigram, train))
print "%s:%s:%s" % (method, 'test', compute_perplexity(bigram, test))
else:
print >>sys.stderr, "unknown method"
sys.exit(2)
| [
"wbrucek@gmail.com"
] | wbrucek@gmail.com |
d3f7083d27ccfd01ba535fcc5502ef199cf4ad4d | 569a4c7b2846e67f25453b2b588d2376d590a057 | /tests/test_unit/test_dynamo_unit.py | 497514c185f82771095cf2abaf3cca395a6d0121 | [] | no_license | sbowers-mdsol/datamigrator | 599e95201539bbbe9849cbcee71c1b29d811a502 | 893df3a693ea718610bb58be387629482118947e | refs/heads/master | 2021-01-22T18:43:21.997882 | 2017-03-22T23:49:33 | 2017-03-22T23:49:33 | 85,109,393 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,768 | py | import tests.configtests as _config
from mock import patch
from sharedlibs.tools_dynamo import ToolsDynamo
from unittest import TestCase
@staticmethod
def fake_create_table(tablename, keyschema, attributedefinitions, provisionedthroughput):
return True
@staticmethod
def fake_delete_record(tablename, key):
return True
@staticmethod
def fake_delete_table(tablename):
return True
@staticmethod
def fake_get_record(tablename, key):
return _config.DynamoTestRecord
@staticmethod
def fake_get_recordset(tablename, key, keyval):
return _config.DynamoTestRecordset
@staticmethod
def fake_insert_record(tablename, item):
return True
@staticmethod
def fake_insert_record_batch(tablename, itemlist):
return True
@staticmethod
def fake_read_table(table):
return '2010-11-01 15:31:10.123456-07:00'
@staticmethod
def fake_update_record(tablename, key, updateexpression, expressionattributevalues):
return True
class TestDynamoUnit(TestCase):
def create_patch(self, name, fakemethod):
self.patcher = patch(name, fakemethod)
thing = self.patcher.start()
self.client = ToolsDynamo()
self.addCleanup(self.patcher.stop)
return thing
def test_convert_oracle_record_to_dynamo(self):
response = ToolsDynamo.convert_oracle_record_to_dynamo(_config.OracleTestRecord)
self.assertEqual(_config.DynamoTestRecord, response)
def test_create_table_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.create_table', fake_create_table)
response = self.client.create_table(_config.DynamoTestTablename, _config.DynamoTestKeySchema,
_config.DynamoTestAttributeDefinitions,
_config.DynamoTestProvisionedThroughput)
self.assertTrue(response)
def test_delete_record_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.delete_record', fake_delete_record)
response = self.client.delete_record(_config.DynamoTestTablename, _config.DynamoTestRecord)
self.assertTrue(response)
def test_delete_table_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.delete_table', fake_delete_table)
response = self.client.delete_table(_config.DynamoTestTablename)
self.assertTrue(response)
def test_get_record_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.get_record', fake_get_record)
response = self.client.get_record(_config.DynamoTestTablename, _config.DynamoTestRecord)
self.assertIn('oid', response)
self.assertEqual(response['oid'], _config.DynamoTestRecord['oid'])
self.assertEqual(response['oid'], _config.DynamoTestRecord['oid'])
def test_get_recordset_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.get_recordset', fake_get_recordset)
response = self.client.get_recordset(_config.DynamoTestTablename, _config.DynamoTestKey,
_config.DynamoTestKeyval)
self.assertIn(_config.DynamoTestRecord, response)
self.assertEqual(response[0]['oid'], _config.DynamoTestRecordset[0]['oid'])
self.assertEqual(response[1]['oid'], _config.DynamoTestRecordset[1]['oid'])
self.assertEqual(response[1]['oid'], _config.DynamoTestRecordset[1]['oid'])
def test_insert_record_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.insert_record', fake_insert_record)
response = self.client.insert_record(_config.DynamoTestTablename, _config.DynamoTestRecord)
self.assertTrue(response)
def test_insert_record_batch_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.insert_record_batch', fake_insert_record_batch)
response = self.client.insert_record_batch(_config.DynamoTestTablename, _config.DynamoTestRecordset)
self.assertTrue(response)
def test_read_table_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.read_table', fake_read_table)
response = self.client.read_table(_config.DynamoTestTablename)
self.assertEqual(response, '2010-11-01 15:31:10.123456-07:00')
def test_update_record_mock(self):
self.create_patch('sharedlibs.tools_dynamo.ToolsDynamo.update_record', fake_update_record)
updateexpression = _config.DynamoTestUpdateExpression
expressionattributevalues = _config.DynamoTestUpdateExpressionAttributeValues
response = self.client.update_record(_config.DynamoTestTablename, _config.DynamoTestRecord,
updateexpression, expressionattributevalues)
self.assertTrue(response)
| [
"sbowers@mdsol.com"
] | sbowers@mdsol.com |
736f75070055a99a4a7e584cf66a56af7e7459f7 | 66d176360e17249ab725b6c3dbb4ebcf9006b491 | /lab7/get_browser_history.py | 7a39ec5cd3b0b561a5b9406b6600bff99e4e851f | [] | no_license | otecdima/UCU | 843f9fba3891f106a52c450f19247aa2b749702e | 15ffafdc6831f199f59605ef3bd6698308615be2 | refs/heads/master | 2023-09-04T19:56:49.535973 | 2021-11-24T22:15:28 | 2021-11-24T22:15:28 | 424,571,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | import sqlite3
import datetime
def get_chrome(con: sqlite3.Connection) -> list:
"""
Reads database of Chrome history and process data
:param con: database connection
:return: list of visits
"""
c = con.cursor()
c.execute("select id, url, title from urls")
url_id_tuples = c.fetchall()
url_id = dict()
for url in url_id_tuples:
url_id[url[0]] = (url[1], url[2])
c.execute("select url, visit_time, visit_duration from visits")
results_with_url_as_id = c.fetchall()
results = []
for result in results_with_url_as_id:
url = url_id[result[0]]
date = datetime.datetime.fromtimestamp((result[1]) / 1000000 - 11644473600).__str__().split()
results.append((url[0], url[1], date[0], date[1], result[2]))
c.close()
return results
def get_chrome_os(user: str, os: str) -> list:
"""
Reads Chrome History on Linux
Returns list of tuples. Each tuple has structure:
(url: srt, title: str, date_of_last_visit: str("yyyy-mm-dd"),
time_of_last_visit: str("hh:mm:ss.ms"), time_of_visit: int)
:param user: username of computer
:param os: OS of computer. Can be "Windows", "Linux" or "MacOS"
:return: list of visits
"""
if os == "Linux":
con = sqlite3.connect(f'/home/{user}/.config/google-chrome/Default/History')
# elif os == "Windows":
# con = sqlite3.connect(f'C:\Users\{user}\AppData\Local\Google\Chrome\User Data\Default')
elif os == "MacOS":
con = sqlite3.connect(f'/Users/{user}/Library/Application Support/Google/Chrome/Default/History')
else:
raise ValueError("Incorrect OS")
return get_chrome(con)
def write_data_to_file(history: list, filename: str) -> None:
"""
Writes data to file
:param history: list of visits of browser
:param filename: name of file to write
:return:
"""
with open(filename, "w") as file:
for element in history:
file.write(str(element) + "\n") | [
"batko.dima@gmail.com"
] | batko.dima@gmail.com |
647bcede9f2c180ce6a1c4b7aed02d5486d24efc | 2343dd93eca5a82f9d27f2a7f7934ba7ca490ce4 | /exercises/parenthization.py | 873bada00a97c8a6ae986adedff32dce8bd47188 | [] | no_license | AYamaui/Practice | 7ccbbc8ebd08567b8398c5a7f5d74d49f08a6156 | ce51ae7f555eaf9edebcea3a737b14f735c70de0 | refs/heads/master | 2020-05-09T10:32:14.032347 | 2019-04-12T16:50:14 | 2019-04-12T16:50:14 | 181,046,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py |
def parenthization_matrix_multiplication(matrices):
for i in range(len(matrices) -1):
matrices = matrices[:i] + multiplication(matrices[i], matrices[i+1])
if i + 1 < len(matrices):
matrices += matrices[i+2:]
min(parenthization_matrix_multiplication(matrices[i])) | [
"alexandra.yamaui@gmail.com"
] | alexandra.yamaui@gmail.com |
ac835b677a0872f50c7b0ef9002545f8d9ab07f7 | 01578617f46ffd937bd5a40012ccea304bd796ec | /chamber.py | e1525d0cc10567bb9e2e5f2544c7cd17f89a2f6f | [] | no_license | greyson-newton/tbma_simulator | 0ef6010a5472d193bbff916276f503cee6a253f3 | c2ae3da25a5cf5b79b8423df7932601813d77918 | refs/heads/main | 2023-07-10T06:57:11.685593 | 2021-08-16T19:05:49 | 2021-08-16T19:05:49 | 390,780,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,867 | py | from logging import RootLogger
from matplotlib.cbook import pts_to_midstep
from geometry import *
import math
import matplotlib.pyplot as plt
import time
from mpl_toolkits import mplot3d
class Chamber:
def __init__(self,dim,origin,rotation,actual=None):
if actual==None:
self.design=True
else:
self.design=False
self.w,self.h = dim[0],dim[1]
self.origin=origin
self.rotation=rotation
self.current_rotation=rotation
x,y,z,w,h = self.origin.x,self.origin.y,self.origin.z,self.w,self.h
self.endpoints=[Point3([x,y+h,z+w]),Point3([x,y+h,z-w]),Point3([x,y-h,z-w]),Point3([x,y-h,z+w]),Point3([x,y+h,z+w])]
# for pt in self.endpoints:
# print(pt.out())
self.square=Square(self.endpoints,self.origin)
self.rotate(rotation)
# self.translate(origin)
def rotate(self,rotation):
# def translate_by(self,translations):
# self.square = [Vec3.__add__(pt,t) for pt,t in self.plane]
# def rotate_by(self,rotations):
self.current_rotation = self.square.rotate(rotation)
self.endpoints=self.square.endpoints
def translate(self,translation):
# print("BEFORE TRANSLATION")
# # print(translation.out())
# for pt in self.endpoints:
# print("chamber endpts ",pt.out())
# for pt in self.square.endpoints:
# print("square endpoints",pt.out())
self.square.translate(translation,self.design)
# for pt in self.endpoints:
# print("chamber ",pt.out())
# pt.x+=translation.x
# pt.y+=translation.y
# pt.z+=translation.z
# for pt in self.square.endpoints:
# print("square ",pt.out())
self.endpoints=self.square.endpoints
self.origin=self.square.origin
# print("AFTER TRANSLATION")
# for pt in self.endpoints:
# print("chamber endpts ",pt.out())
# for pt in self.square.endpoints:
# print("square endpoints",pt.out())
def plot_pts(self):
# print("plotting")
x,y,z = [],[],[]
for pt in self.endpoints:
x.append(pt.x)
y.append(pt.y)
z.append(pt.z)
return [x,y,z]
def plot_vert(self):
pts=self.endpoints
verts=zip([pts[0].x,pts[0].y,pts[0].z],[pts[1].x,pts[1].y,pts[1].z],
[pts[2].x,pts[2].y,pts[2].z],[pts[3].x,pts[3].y,pts[3].z])
return verts
def intersect(self,muon_vec):
return self.square.intersect_with(muon_vec,self.origin)
def align(self):
print(" Calculating Residuals")
# residual = self.get_residuals(track_slope,hit)
self.translate(Point3([5.,0.,0.]))
# print(self.endpoints)
time.sleep(1)
# def get_residuals(self,track_slope,des_hit,act_hit):
# residual_y=des_hit.y-act_hit.y
# residual_x=des_hit.x-act_hit.x
# x_steps = [-stepSizes[0], 0, stepSizes[0]]
# y_steps = [-stepSizes[1], 0, stepSizes[1]]
# z_steps = [-stepSizes[1], 0, stepSizes[1]]
# theta_steps = [-stepSizes[2], 0, stepSizes[2]]
# eta_steps = [-stepSizes[2], 0, stepSizes[2]]
# phi_steps = [-stepSizes[2], 0, stepSizes[2]]
# minValue = 1000
# lowesState = [0,0,0]
# noDisValue = 0
# xSTD = []
# for x_dis in x_steps:
# for y_dis in y_steps:
# for z_dis in z_steps:
# for t_dis in theta_steps:
# for e_dis in eta_steps:
# for p_dis in phi_steps:
# predictedResidual = y_dis - track_slope[1]*z_dis - act_hit.y*track_slope[1]*t_dis + act_hit.x*track_slope[1]*e_dis+act_hit.x*p_dis
# squaredDifference = np.power(predictedResidual - residual_y,2)
# squaredDifference = squaredDifference[~np.isnan(squaredDifference)]
# stdDev = np.mean(squaredDifference)
# if stdDev < minValue and not (x_dis == 0 and y_dis == 0 and t_dis ==0):
# lowesState = [x_dis,y_dis, t_dis]
# minValue = stdDev
# if x_dis == 0 and y_dis == 0 and t_dis ==0:
# noDisValue = stdDev
# def init(self,translations,rotations):
# self.rotate_by(rotations)
# self.translate_by(translations)
# self.bounds
# ax.plot([0, 0], [0, 0], [0, 10]) # extend in z direction
# ax.plot([0, 0], [0, 8], [0, 0]) # extend in y direction
# ax.plot([0, 9], [0, 0], [0, 0]) # extend in x direction
# def move(self,translations,rotations):
# self.rotate_by(rotations)
# # self.translate_by(translations)
# from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# fig, ax = plt.subplots()
# plt.rcParams["figure.figsize"] = [6, 10]
# ax = plt.axes(projection='3d') # set the axes for 3D plot
# ax.set_autoscale_on=True
# ax.set_xlim([-10, 10])
# ax.set_ylim([-10, 10])
# ax.set_zlim([-10, 10])
# dim = [5,5]
# origin=Point3()
# rotation=[0. ,0.,0.]
# translation=[5,0.,.0]
# chamber = Chamber(dim,origin,rotation)
# rotation=[0.,0.,1.]
# print("num lines")
# print(len(chamber.square.square_vec.lines))
# print(type(chamber.plot_pts()[0][0]))
# chamber.rotate(rotation)
# # ax.plot(chamber.plot_pts()[2],chamber.plot_pts()[1],chamber.plot_pts()[0])
# ax.plot(chamber.plot_pts()[0],chamber.plot_pts()[1],chamber.plot_pts()[2])
# chamber.translate(translation)
# ax.plot(chamber.plot_pts()[0],chamber.plot_pts()[1],chamber.plot_pts()[2])
# # print(chamber.square.out())
# # plt.plot(chamber.plot_pts())
# plt.show() | [
"greynewt@tamu.edu"
] | greynewt@tamu.edu |
27ca6fd5651d732db9013a6bd495e41fa5579fc9 | 467007783b28376929da1a379480c59f002a1d1c | /.env/bin/easy_install | e2d6af62ee9cc7bf5ccdc713e098c8c20a916d91 | [] | no_license | anotherjoshsmith/personal_website | 26bc0b68f87160a52669d2a043b142c6d776fc31 | c0e1a9a0c4a1fc9a44ba9c741515b2f55ba9ca8a | refs/heads/master | 2020-03-10T09:24:28.716941 | 2018-04-24T17:56:24 | 2018-04-24T17:56:24 | 129,308,493 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 417 | #!/Users/joshsmith/Git/personal_website/.env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools','console_scripts','easy_install'
__requires__ = 'setuptools'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools', 'console_scripts', 'easy_install')()
)
| [
"jsmith07@uw.edu"
] | jsmith07@uw.edu | |
3e3b442ac6a61efc6b2af0a7c01b53aa03dc42bd | ae574b5c99d90bd3e79716c822b9eac067563f8e | /firstpython.py | 1c3a745651ddbd2ff4b5261051aa8ee89734c829 | [] | no_license | salpekarashwin/FirstOne | aa8258b101eb1d126ae23b7ab90e90945d1c98e7 | 9b379812f30e1a6ddc7a1cad271980c16ee1e1b3 | refs/heads/main | 2023-02-22T08:47:34.088153 | 2023-02-19T15:21:59 | 2023-02-19T15:21:59 | 308,248,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | print("first python")
| [
"noreply@github.com"
] | salpekarashwin.noreply@github.com |
cdb111ebe746a8b9c8b63bf258b391cd0f925339 | d3febb493aaef14c5da6a15e52c04598eab4e411 | /main.py | 31d6cde7bb461db7693e0cd2e33dc7f9aed9abe9 | [] | no_license | Introduction-to-Programming-OSOWSKI/2-3-comparisons-Ryan-Sandhoefner | 491760ee475efbbd221302d188ab82e47db5e7c5 | 5b73870213238b9300a5b18ca8d94be0f4c180b3 | refs/heads/master | 2023-08-24T20:11:21.115386 | 2021-09-28T14:26:38 | 2021-09-28T14:26:38 | 411,311,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | #define function
def greaterThan(x,y):
if x>y:
return True
else:
return False
#define function
def lessThan(x,y):
if x<y:
return True
else:
return False
#define function
def equalTo(x,y):
if x==y:
return True
else:
return False
#define function
def greaterOrEqual(x,y):
if x>=y:
return True
else:
return False
#define function
def lessOrEqual(x,y):
if x<=y:
return True
else:
return False | [
"Mngamer813@gmail.com"
] | Mngamer813@gmail.com |
74b3a0fc7d4eb94c48d81175d59a052e621428e8 | bde5a2024923a025932719068e71f020eba1deed | /Areas.py | b4ace9d88300ed75957d59a4f7b6237c1638f886 | [] | no_license | RosanaR2017/PYTHON | d7b60ec6bf4722f6e1179b1e3cfd02ca934fa58e | eb68fdf3f4adcc580a6b52b6ecafdd7b85c96c4f | refs/heads/main | 2023-01-08T00:15:43.845957 | 2020-11-04T02:31:09 | 2020-11-04T02:31:09 | 306,211,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from math import pi
import sys
def ErrorPrint():
print("Entrada no vรกlida, finaliza el programa")
sys.exit(1)
def num(n):
while True:
try:
n=int(n)
return n;
except ValueError:
try:
n=float(n)
return n;
except ValueError:
ErrorPrint()
break
def positivo(n):
if (n>0 and n!=0):
return n;
else:
ErrorPrint();
def AreaCircle(radius):
positivo(num(radius))
return str(pi*(radius**2))
def AreaRect(base, height):
positivo(num(base*height))
return str(base*height);
def main():
areaType=str(input("Ingrese el tipo de area a calcular:")) #Solo circulo o rectangulo
if areaType.casefold() == "Circulo".casefold():
radio=positivo(num(input("Ingrese radio:")))
print("\nArea Circulo:{}".format(AreaCircle(radio)))
elif areaType.casefold() == "Rectangulo".casefold():
base, height= input("Ingrese base y altura separados por una coma:").split(",")
print("\nArea Rectangulo:{}".format(AreaRect(positivo(num(base)),positivo(num(height)))))
else:
sys.exit(1)
print("wESSaaaaa".upper())
#ErrorPrint()
if __name__=='__main__':
main()
| [
"noreply@github.com"
] | RosanaR2017.noreply@github.com |
2080ffdca8a74c982e7847b40e4f7a44d309aa6a | fb2668cb16b50e252225ed54d85e6dd531dba7e4 | /revip.py | 013b68d89f71fb6aba891b04357bf185b43e41f1 | [] | no_license | zxc2007/revip-1 | a39f387c87190e0fb2f9f40b8a67a78fd6bba71d | 5fdc263f6bf1a68e809c56ceab7f9a7ca789bf3e | refs/heads/main | 2023-06-27T09:24:35.387528 | 2021-07-14T15:19:55 | 2021-07-14T15:19:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,558 | py | #Author: r4sso
#Github: https://github.com/r4sso
banner = '''
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ By r4sso|github.com
'''
P0 = "\033[0;35m"
C0 = "\033[0;36m"
C1 = "\033[1;36m"
G0 = "\033[0;32m"
G1 = "\033[1;32m"
W0 = "\033[0;37m"
W1 = "\033[1;37m"
R0 = "\033[0;31m"
R1 = "\033[1;31m"
try:
import os
import requests,json
import os.path
except:
os.system("pip3 install requests")
os.system('clear')
print(banner)
def main():
choice ='0'
while choice =='0':
print("Choose Method:")
print("1. hackertarget.com [LIMITED]")
print("2. tools.hack.co.id [COMING SOON]")
print("3. yougetsignal.com [COMING SOON]")
choice = input ("\n\nPlease make a choice: ")
if choice == "1":
Hackertarget()
elif choice == "2":
print("Coming soon! use other else")
elif choice == "3":
print("Coming soon! use other else")
else:
print("I don't understand your choice.")
def Hackertarget():
os.system("clear")
print("""
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
""")
session = requests.session()
inip = input('Enter IP: ')
print("\n=========== Output ===============")
api = "http://api.hackertarget.com/reverseiplookup/?q="
apipun = api + inip
output = session.get(apipun).text
print(output)
file = input("Save output to txt? [Y/n]").lower()
if file == 'y':
fila = input("\nFilename: ")
filename = fila + ".txt"
file1 = open(filename, "w")
file1.write(str(output))
else:
print("\nHAVE A GOOD DAY :)")
main() | [
"programmernet106@gmail.com"
] | programmernet106@gmail.com |
07c649a37bc085678f51d29d310f245125e33886 | c958c1d13579785b8eea19dccdd205fdc4d4f5d9 | /CODE/Python/Scale UP.py | ec539319055dba09aec0351eb1e90eed5824500e | [] | no_license | Qwerty0525/NOPE-Library | 8c7bce0e5c2e41c2ac1d244bfd0286fabae4b0e6 | 0f262603922e53cb24989cf6242ea0b7d2e65a29 | refs/heads/master | 2022-10-12T01:11:36.347647 | 2020-06-11T01:06:25 | 2020-06-11T01:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | import subprocess
import os, sys
# ๊ฒฝ๋ก
PATH = "T:/git/CODE/SKIN/Camouflages"
OUTPUT_PATH = "T:/OUTPUT/"
# ํ์ผ ์นด์ดํธ
filecount = 0
endfilecount = 0
for (path, dir, files) in os.walk(PATH):
for filename in files:
ext = os.path.splitext(filename)[-1]
if ext == '.png':
endfilecount = endfilecount + 1
def progressBar(value, endvalue, bar_length=20):
percent = float(value) / endvalue
arrow = '=' * int(round(percent * bar_length) - 1) + '>'
spaces = ' ' * (bar_length - len(arrow))
sys.stdout.write("\r({0} / {1}) [{2}] {3}%".format(value, endvalue, arrow + spaces, int(round(percent * 100))))
sys.stdout.flush()
for (path, dir, files) in os.walk(PATH):
for filename in files:
ext = os.path.splitext(filename)[-1]
print(ext)
if ext == '.png':
print("%s/%s" % (path, filename))
Tmp = path.split("T:/git/CODE/SKIN/")[-1]
if not os.path.isdir("T:/OUTPUT/" + Tmp):
os.makedirs("T:/OUTPUT/" + Tmp)
progressBar(filecount, endfilecount)
subprocess.call("T:/git/Util/Img-Scaling/waifu2x-caffe-cui.exe -p cpu -m scale -i " + path + "/" + filename + " --scale_ratio 2.0 -n 0 -o " + OUTPUT_PATH + Tmp + "/" + os.path.splitext(filename)[0] + ".png")
filecount = filecount + 1 | [
"36913634+ETCOM@users.noreply.github.com"
] | 36913634+ETCOM@users.noreply.github.com |
0ec45d6c192a7103a5a4759e57e7f5068b0498d4 | bff215028de146b87ef725eb36556dcfd7401c56 | /magenta/magenta-tensorflow/magenta/models/music_vae/data_gen.py | f94ec60b42052e6c3a3b701bbafcdd4cf07b586f | [
"Apache-2.0"
] | permissive | Leetre/Graduation-Project | 6dd99ccbb67d0015ac1d81e02316b87a5290a85d | f99da04cddd76140ede66bf35da110c6ebbd6d20 | refs/heads/master | 2022-08-20T09:40:09.075115 | 2020-05-26T07:48:20 | 2020-05-26T07:48:20 | 266,953,578 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,650 | py | import os
import concurrent
from tqdm import tqdm
from magenta.models.music_vae import data_hierarchical, data
from magenta.models.music_vae.configs import CONFIG_MAP
from magenta.models.music_vae.data import NoteSequenceAugmenter
from magenta.models.music_vae.data_utils import generate_files, shuffle_dataset, UNSHUFFLED_SUFFIX
from magenta.music import abc_parser
from magenta.music import midi_io
from magenta.music import musicxml_reader
from magenta.music import note_sequence_io
import tensorflow as tf
from magenta.protobuf import music_pb2
import magenta.music as mm
import numpy as np
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('input_dir', None,
'Directory containing files to convert.')
tf.app.flags.DEFINE_string('output_file', None,
'Path to output TFRecord file. Will be overwritten '
'if it already exists.')
tf.app.flags.DEFINE_bool('recursive', True,
'Whether or not to recurse into subdirectories.')
tf.app.flags.DEFINE_string('config', 'hier-multiperf_vel_1bar_med', '')
tf.app.flags.DEFINE_string('log', 'INFO',
'The threshold for what messages will be logged '
'DEBUG, INFO, WARN, ERROR, or FATAL.')
def get_midi_files(root_dir, sub_dir, recursive=False):
"""Converts files.
Args:
root_dir: A string specifying a root directory.
sub_dir: A string specifying a path to a directory under `root_dir` in which
to convert contents.
writer: A TFRecord writer
recursive: A boolean specifying whether or not recursively convert files
contained in subdirectories of the specified directory.
Returns:
A map from the resulting Futures to the file paths being converted.
"""
dir_to_convert = os.path.join(root_dir, sub_dir)
tf.logging.info("collect files in '%s'.", dir_to_convert)
files_in_dir = tf.gfile.ListDirectory(os.path.join(dir_to_convert))
recurse_sub_dirs = []
midi_files = []
for file_in_dir in files_in_dir:
full_file_path = os.path.join(dir_to_convert, file_in_dir)
if (full_file_path.lower().endswith('.mid') or
full_file_path.lower().endswith('.midi')):
midi_files.append(full_file_path)
else:
if recursive and tf.gfile.IsDirectory(full_file_path):
recurse_sub_dirs.append(os.path.join(sub_dir, file_in_dir))
else:
tf.logging.warning(
'Unable to find a converter for file %s', full_file_path)
for recurse_sub_dir in recurse_sub_dirs:
midi_files += get_midi_files(root_dir, recurse_sub_dir, recursive)
return midi_files
def convert_midi(root_dir, sub_dir, full_file_path, output_file):
data_converter = CONFIG_MAP[FLAGS.config].data_converter
augmenter = CONFIG_MAP[FLAGS.config].note_sequence_augmenter
ret = []
try:
sequence = midi_io.midi_to_sequence_proto(
tf.gfile.GFile(full_file_path, 'rb').read())
except midi_io.MIDIConversionError as e:
tf.logging.warning(
'Could not parse MIDI file %s. It will be skipped. Error was: %s',
full_file_path, e)
return []
sequence.collection_name = os.path.basename(root_dir)
sequence.filename = os.path.join(sub_dir, os.path.basename(full_file_path))
sequence.id = note_sequence_io.generate_note_sequence_id(
sequence.filename, sequence.collection_name, 'midi')
# tf.logging.info('Converted MIDI file %s.', full_file_path)
for s in (augmenter.get_all(sequence) if augmenter is not None else [sequence]):
data = data_converter.to_tensors(s)
for inp, c, l in zip(data.inputs, data.controls, data.lengths):
s = list(inp.shape)
inp = inp.reshape(-1).tolist()
c = c.reshape(-1).tolist()
if len(c) == 0:
c = [0]
if isinstance(l, int):
l = [l]
ret.append({
'notes': inp,
'chords': c,
'shape': s,
'lengths': l
})
if len(ret) > 0:
np.save("{}_npy/{}".format(output_file, os.path.basename(full_file_path)), ret)
return ret
def generator(root_dir, output_file, recursive=False):
midi_files = get_midi_files(root_dir, '', recursive)
STEPS = 10000
seg_idx = 0
# os.makedirs('{}_npy'.format(output_file), exist_ok=True)
# for i in range(len(midi_files) // STEPS + 1):
# t = tqdm(midi_files[i * STEPS:(i + 1) * STEPS], total=len(midi_files), initial=STEPS * i, ncols=100)
# for full_file_path in t:
# for r in convert_midi(root_dir, '', full_file_path, output_file):
# r['id'] = [seg_idx]
# yield r
# seg_idx += 1
# t.set_description("total: {}".format(seg_idx))
os.makedirs('{}_npy'.format(output_file), exist_ok=True)
for i in range(len(midi_files) // STEPS + 1):
print(i)
# with concurrent.futures.ProcessPoolExecutor(max_workers=1) as executor:
with concurrent.futures.ProcessPoolExecutor(max_workers=int(os.cpu_count() * 1.5)) as executor:
# executor.map(convert_midi, [(root_dir, '', full_file_path) for full_file_path in
# midi_files[i * STEPS:(i + 1) * STEPS]])
futures = [executor.submit(convert_midi, root_dir, '', full_file_path, output_file) for full_file_path in
midi_files[i * STEPS:(i + 1) * STEPS]]
t = tqdm(concurrent.futures.as_completed(futures), total=len(midi_files), initial=STEPS * i, ncols=100)
for future in t:
for r in future.result():
r['id'] = [seg_idx]
yield r
seg_idx += 1
t.set_description("total: {}".format(seg_idx))
# python magenta/models/music_vae/data_gen.py --input_dir=data/lmd/lmd_full --output_file=data/lmd/lmd_full2 --recursive --config=hier-multiperf_vel_1bar_med
# python magenta/models/music_vae/data_gen.py --input_dir=data/maestro/maestro-v2.0.0 --output_file=data/maestro/maestro --recursive
def main(unused_argv):
tf.logging.set_verbosity(FLAGS.log)
if not FLAGS.input_dir:
tf.logging.fatal('--input_dir required')
return
if not FLAGS.output_file:
tf.logging.fatal('--output_file required')
return
input_dir = os.path.expanduser(FLAGS.input_dir)
output_file = os.path.expanduser(FLAGS.output_file)
output_dir = os.path.dirname(output_file)
if output_dir:
tf.gfile.MakeDirs(output_dir)
OUTPUT_SHARDS = 10
output_files = ["{}_{}.tfrecord{}".format(output_file, f, UNSHUFFLED_SUFFIX) for f in range(OUTPUT_SHARDS)]
generate_files(generator(input_dir, output_file, FLAGS.recursive), output_files)
shuffle_dataset(output_files)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point() | [
"823287831@qq.com"
] | 823287831@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.