blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f9a2cb5b744a7310455602ae1d5d7ab6d24938c | 52573a86f29adea75166cc2203618f2139abf5db | /RouterOS API/Set Item/Mengganti IP Address.py | 14f05f1bd32c2932f3f5395500e8d6e8d6499ec6 | [
"Apache-2.0"
] | permissive | HudaFiqri/Mikrotik_RouterOS_API_Learning_Indonesia | 4b1354ee09ec029b2a1461bdff7ba7f9c886f83a | cb9d32a42d19d5026ce620af7cd2d72b0b515f42 | refs/heads/main | 2023-03-07T23:07:02.299813 | 2021-02-23T05:20:40 | 2021-02-23T05:20:40 | 341,436,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | '''
Mengganti IP Address menggunakan API.
sumber referensi: - https://pypi.org/project/RouterOS-api/
- https://wiki.mikrotik.com/wiki/API_command_notes#Scripting_and_API
- https://github.com/socialwifi/RouterOS-api
ditulis pada: 23-02-2021
'''
# import module yang akan digunakan
import routeros_api
# authentikasi ke router mikrotik
api = routeros_api.RouterOsApiPool('192.168.1.1', username='user1', password='user1', plaintext_login=True)
api_connect = api.get_api()
# eksekusi perintah
api_command = api_connect.get_resource('/ip/address')
'''
-> Mengubah value pada ip address
sama halnya dengan command line pada mikrotik '/ip address set numbers=1 address=192.168.1.1/24'
tapi yang untuk digunakan pada variabel panggilannya adalah variabel yang berasal dari command line.
'''
api_command.set(numbers='1', address='192.168.1.1/24') | [
"mhudafiqri06@gmail.com"
] | mhudafiqri06@gmail.com |
a5c76605cdf1daa2e1d5d4e2cc7027083214022c | a687a386d54a36840caa2c4e46e741ca5fd223f8 | /homework1/homework1_16.py | c8819e34b9d263d9c28db98f5fdf089dde9dc211 | [
"MIT"
] | permissive | whx1994hans/ml_foundation | 0b7931aad1b0972be9188448279f1eabbbd0a364 | f5e618f1e0dedc9fc8095be38b628b97e7a2fd45 | refs/heads/master | 2020-03-15T09:28:43.945888 | 2018-05-04T02:49:14 | 2018-05-04T02:49:14 | 132,075,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | import numpy as np
def load_train_data(file_name):
training_set = np.loadtxt(file_name)
x0 = np.ones(training_set.shape[0])
training_set = np.insert(training_set, 0, x0, axis=1)
return training_set
def split_features_and_labels(dataset):
features = dataset[:, :-1]
labels = dataset[:, dataset.shape[1]-1]
return features, labels
def pla_hypothesis_value(w, x):
predict_y = w.dot(x)
if predict_y > 0:
return 1.0
else:
return -1.0
def pla(features, labels, w):
update_times = 0
error = False
while error or ((w == 0).all()):
error = False
for feature, label in zip(features,labels):
predict_y = pla_hypothesis_value(w, feature)
if predict_y == label:
continue
else:
w = w + label * feature
update_times += 1
error = True
return update_times, w
def random_cycles_pla_n_times(filename, n_times):
training_set = load_train_data(file_name)
update_times = 0
for i in range(0, n_times):
w = np.zeros(training_set.shape[1]-1)
np.random.shuffle(training_set)
features, labels = split_features_and_labels(training_set)
update_time, w = pla(features, labels, w)
update_times += update_time
return update_times/n_times
file_name = 'hw1_15_train.txt'
update_times = random_cycles_pla_n_times(file_name, 2000)
print(update_times)
| [
"whx1994hans@gmail.com"
] | whx1994hans@gmail.com |
af61ef01dae3b390af546bf09648723e26652b26 | 5a1b6b8ed4c06e7743c8e943b2c6f854decfd90b | /pyretrace/reader.py | c72a5c0341c021dc408a50c655f4eeda0b9de975 | [
"BSD-2-Clause-Views"
] | permissive | MaTriXy/pyretrace | 369ca9b9c14441c4f170930bd667fbd72e6ba430 | fe8afd2296f01fa949d744727cc3b9dc7e69aa6c | refs/heads/master | 2020-04-06T06:41:16.526008 | 2015-12-06T22:25:57 | 2015-12-06T22:25:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,226 | py | import sys
class MappingReader():
def __init__(self, mapping_file):
self.mapping_file = mapping_file
def pump(self, mapping_processor):
reader = open(self.mapping_file, 'r')
try:
class_name = None
# Read the subsequent class mappings and class member mappings.
while True:
line = reader.readline()
if not line:
break
line = line.strip()
# The distinction between a class mapping and a class
# member mapping is the initial whitespace.
if line.endswith(':'):
# Process the class mapping and remember the class's
# old name.
class_name = self.process_class_mapping(line, mapping_processor)
elif class_name is not None:
# Process the class member mapping, in the context of the
# current old class name.
self.process_class_member_mapping(class_name, line, mapping_processor)
except Exception as ex:
print 'Can\'t process mapping file (%s)' % ex
sys.exit(1)
finally:
reader.close()
@staticmethod
def process_class_mapping(line, mapping_processor):
# See if we can parse "___ -> ___:", containing the original
# class name and the new class name.
arrow_index = line.find('->')
if arrow_index < 0:
return None
colon_index = line.find(':', arrow_index + 2)
if colon_index < 0:
return None
# Extract the elements.
class_name = line[0: arrow_index].strip()
new_class_name = line[arrow_index + 2: colon_index].strip()
# Process this class name mapping.
interested = mapping_processor.process_class_mapping(class_name, new_class_name)
if interested:
return class_name
else:
return None
@staticmethod
def process_class_member_mapping(class_name, line, mapping_processor):
# See if we can parse "___:___:___ ___(___) -> ___",
# containing the optional line numbers, the return type, the original
# field/method name, optional arguments, and the new field/method name.
colon_index1 = line.find(':')
colon_index2 = -1 if colon_index1 < 0 else line.find(':', colon_index1 + 1)
space_index = line.find(' ', colon_index2 + 2)
argument_index1 = line.find('(', space_index + 1)
argument_index2 = -1 if argument_index1 < 0 else line.find(')', argument_index1 + 1)
arrow_index = line.find('->', max(space_index, argument_index2) + 1)
if space_index < 0 or arrow_index < 0:
return
# Extract the elements.
type = line[colon_index2 + 1: space_index].strip()
name = line[space_index + 1: argument_index1 if argument_index1 >= 0 else arrow_index].strip()
new_name = line[arrow_index + 2: len(line)].strip()
# Process this class member mapping.
if len(type) > 0 and \
len(name) > 0 and \
len(new_name) > 0:
# Is it a field or a method?
if argument_index2 < 0:
mapping_processor.process_field_mapping(class_name, type, name, new_name)
else:
first_line_number = 0
last_line_number = 0
if colon_index2 > 0:
first_line_number = int(line[0: colon_index1].strip())
last_line_number = int(line[colon_index1 + 1: colon_index2].strip())
arguments = line[argument_index1 + 1: argument_index2].strip()
mapping_processor.process_method_mapping(class_name,
first_line_number,
last_line_number,
type,
name,
arguments,
new_name) | [
"rotem@everything.me"
] | rotem@everything.me |
3ab80af6244436a1a7f2ad642c811678bd3e02b8 | bd0b760dfe03420bed3882c7c734e1553e923352 | /mysite/local_settings.py | 17dab683b2e6f6b488be3b0efd40abfaef8015b7 | [
"Apache-2.0"
] | permissive | bhagvank/slacknlp | c2bed55b28010b808f36131ec9a12e220f75b2dc | 9aabc8884cce657c0e6c2cec07fab6b609d1b0e9 | refs/heads/master | 2020-03-24T03:22:52.724487 | 2018-12-23T14:25:51 | 2018-12-23T14:25:51 | 142,416,967 | 3 | 1 | null | 2018-07-26T10:04:46 | 2018-07-26T09:09:30 | null | UTF-8 | Python | false | false | 926 | py | """
local Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from settings import PROJECT_ROOT, SITE_ROOT
import os
DEBUG = True
TEMPLATE_DEBUG = True
#DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.postgresql_psycopg2",
# "NAME": "django_deploy",
# "USER": "foo",
# "PASSWORD": "bar",
# "HOST": "localhost",
# "PORT": "5432",
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'PORT' : '5432',
'HOST' : 'localhost',
'NAME' : 'django_development',
'USER' : 'newuser',
'PASSWORD' : 'newuser'
}
} | [
"bhagvan@crystaldelta.com"
] | bhagvan@crystaldelta.com |
e57b454023b98bca370d58ed307d4323516892c2 | 27caa29053c9b6c7734c46f27ee3bc86d40bd9bd | /quspin/basis/_reshape_subsys.py | 3e24aeec44096863c013dd06e83fa890f4615729 | [] | no_license | wenya-r/ED | c121006418db6cff8c43aade957c3915865ecdb6 | 50f2c37465fdd6b4a0be5ce909a65edb94b9902b | refs/heads/master | 2020-04-08T13:21:30.034824 | 2019-03-14T21:20:24 | 2019-03-14T21:20:24 | 159,097,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,543 | py | import numpy as _np
import scipy.sparse as _sp
####################################################
# set of helper functions to implement the partial #
# trace of lattice density matrices. They do not #
# have any checks and states are assumed to be #
# in the non-symmetry reduced basis. #
####################################################
def _lattice_partial_trace_pure(psi,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a dense pure state psi over set of sites sub_sys_A and returns
reduced DM. Vectorisation available.
"""
psi_v=_lattice_reshape_pure(psi,sub_sys_A,L,sps)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
def _lattice_partial_trace_mixed(rho,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a set of dense mixed states rho over set of sites sub_sys_A
and returns reduced DM. Vectorisation available.
"""
rho_v=_lattice_reshape_mixed(rho,sub_sys_A,L,sps)
if return_rdm == "A":
return _np.einsum("...jlkl->...jk",rho_v),None
elif return_rdm == "B":
return None,_np.einsum("...ljlk->...jk",rho_v.conj())
elif return_rdm == "both":
return _np.einsum("...jlkl->...jk",rho_v),_np.einsum("...ljlk->...jk",rho_v.conj())
def _lattice_partial_trace_sparse_pure(psi,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a sparse pure state psi over set of sites sub_sys_A and returns
reduced DM.
"""
psi=_lattice_reshape_sparse_pure(psi,sub_sys_A,L,sps)
if return_rdm == "A":
return psi.dot(psi.H),None
elif return_rdm == "B":
return None,psi.H.dot(psi)
elif return_rdm == "both":
return psi.dot(psi.H),psi.H.dot(psi)
def _lattice_reshape_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the dense pure state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = psi.shape[:-1]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(n_dims + s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(L))
psi_v = psi.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
psi_v = psi_v.transpose(T_tup) # take transpose to reshuffle indices
psi_v = psi_v.reshape(extra_dims+(Ns_A,Ns_B))
return psi_v
def _lattice_reshape_mixed(rho,sub_sys_A,L,sps):
"""
This function reshapes the dense mixed state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = rho.shape[:-2]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
# T_tup tells numpy how to reshuffle the indices such that when I reshape the array to the
# 4-_tensor rho_{ik,jl} i,j are for sub_sys_A and k,l are for sub_sys_B
# which means I need (sub_sys_A,sub_sys_B,sub_sys_A+L,sub_sys_B+L)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(s+n_dims for s in T_tup) + tuple(L+n_dims+s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(2*L))
rho_v = rho.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
rho_v = rho_v.transpose(T_tup) # take transpose to reshuffle indices
return rho_v.reshape(extra_dims+(Ns_A,Ns_B,Ns_A,Ns_B))
def _lattice_reshape_sparse_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the sparse pure state psi over the Hilbert space defined by sub_sys_A and its complement.
"""
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
psi = psi.tocoo()
T_tup = sub_sys_A+sub_sys_B
# reshuffle indices for the sub-systems.
# j = sum( j[i]*(sps**i) for i in range(L))
# this reshuffles the j[i] similar to the transpose operation
# on the dense arrays psi_v.transpose(T_tup)
if T_tup != tuple(range(L)):
indx = _np.zeros(psi.col.shape,dtype=psi.col.dtype)
for i_old,i_new in enumerate(T_tup):
indx += ((psi.col//(sps**(L-i_new-1))) % sps)*(sps**(L-i_old-1))
else:
indx = psi.col
# A = _np.array([0,1,2,3,4,5,6,7,8,9,10,11])
# print("make shift way of reshaping array")
# print("A = {}".format(A))
# print("A.reshape((3,4)): \n {}".format(A.reshape((3,4))))
# print("rows: A.reshape((3,4))/4: \n {}".format(A.reshape((3,4))/4))
# print("cols: A.reshape((3,4))%4: \n {}".format(A.reshape((3,4))%4))
psi._shape = (Ns_A,Ns_B)
psi.row[:] = indx / Ns_B
psi.col[:] = indx % Ns_B
return psi.tocsr()
def _tensor_reshape_pure(psi,sub_sys_A,Ns_l,Ns_r):
extra_dims = psi.shape[:-1]
if sub_sys_A == "left":
return psi.reshape(extra_dims+(Ns_l,Ns_r))
else:
n_dims = len(extra_dims)
T_tup = tuple(range(n_dims))+(n_dims+1,n_dims)
psi_v = psi.reshape(extra_dims+(Ns_l,Ns_r))
return psi_v.transpose(T_tup)
def _tensor_reshape_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r):
psi = psi.tocoo()
# make shift way of reshaping array
# j = j_l + Ns_r * j_l
# j_l = j / Ns_r
# j_r = j % Ns_r
if sub_sys_A == "left":
psi._shape = (Ns_l,Ns_r)
psi.row[:] = psi.col / Ns_r
psi.col[:] = psi.col % Ns_r
return psi.tocsr()
else:
psi._shape = (Ns_l,Ns_r)
psi.row[:] = psi.col / Ns_r
psi.col[:] = psi.col % Ns_r
return psi.T.tocsr()
def _tensor_reshape_mixed(rho,sub_sys_A,Ns_l,Ns_r):
extra_dims = rho.shape[:-2]
if sub_sys_A == "left":
return rho.reshape(extra_dims+(Ns_l,Ns_r,Ns_l,Ns_r))
else:
n_dims = len(extra_dims)
T_tup = tuple(range(n_dims))+(n_dims+1,n_dims)+(n_dims+3,n_dims+2)
rho_v = rho.reshape(extra_dims+(Ns_l,Ns_r,Ns_l,Ns_r))
return rho_v.transpose(T_tup)
def _tensor_partial_trace_pure(psi,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
psi_v = _tensor_reshape_pure(psi,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
def _tensor_partial_trace_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
psi = _tensor_reshape_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return psi.dot(psi.H),None
elif return_rdm == "B":
return None,psi.H.dot(psi)
elif return_rdm == "both":
return psi.dot(psi.H),psi.H.dot(psi)
def _tensor_partial_trace_mixed(rho,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
rho_v = _tensor_reshape_mixed(rho,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ijkj->...ik",rho_v)),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...jijk->...ik",rho_v.conj()))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ijkj->...ik",rho_v)),_np.squeeze(_np.einsum("...jijk->...ik",rho_v.conj()))
| [
"wwrowe@gmail.com"
] | wwrowe@gmail.com |
d917343c58d4c5987adedca27f89df889a8c8f6a | 57513acbee10889f6803499cd4c20cfdcdbd2576 | /calificacion.py | e435ed8c0a97b7da2ce2b9406a6c493795f5fffe | [] | no_license | alanespinozaz/UNEMI-ESTRUCTURA-DE-DATOS | c0c1a5420033deaf0f54079befbbc6776cb2c3ce | 694633924c56370bf2a142b167cef54152000f7e | refs/heads/main | 2023-06-16T02:05:02.902495 | 2021-07-18T04:39:49 | 2021-07-18T04:39:49 | 375,894,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | "" "Dado como dato la calificación de un alumno en un examen escriba “aprobado” si su calificación es mayor o igual que 7 y “Reprobado” en caso contrario. "" "
clase Ejemplo4 :
def __init__ ( yo ):
aprobar
def aporepro ( yo ):
cal = float ( input ( "Ingrese su Nota final:" ))
si cal > = 7 :
print ( "Felicitaciones usted ha aprobado el curso {}" . formato ( cal ))
otra cosa :
print ( "Usted ha Reprobado el curso {}" . formato ( cal ))
eje1=Ejemplo4 ()
eje1.aporepro () | [
"noreply@github.com"
] | noreply@github.com |
baf7bb5505b041499763a04a9301d9ceb25d6184 | 04642962ef1d6ab7c3a3c8b65f1de2566fe366da | /venv/bin/flask | 7a8608814aee5dfb5c39d4115dd0017d068a1c28 | [] | no_license | chrismulc/cse5ipr | 3e2c6166d42481233ed415216b279f9769bcc132 | 287ef8bd43bc042f7ee05ea9cf2e5eae31d4dfb3 | refs/heads/main | 2023-01-01T02:37:22.905213 | 2020-10-19T04:00:52 | 2020-10-19T04:00:52 | 305,257,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | #!/home/chris/Desktop/shit/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"chrismulcair@gmail.com"
] | chrismulcair@gmail.com | |
289a348b02b1dcd1fbc6f33b305c4fa1d8313271 | f253f06b816503837744620befad8aed554ec72f | /bin/models/adaboost.py | 976ab2ce1c6476294602669e7153e061cc9f5221 | [] | no_license | julienbrosseau/IFT712-Projet | edc67ad4cd2437b57f9bdcb6b2fd1cc391e4c3d5 | 911090fc4a81e2a4f8c38d41543001cb1cc2da33 | refs/heads/master | 2020-09-11T04:11:27.170890 | 2019-12-11T22:13:06 | 2019-12-11T22:13:06 | 221,934,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | # Classification par Adaboost
# Source du module "AdaBoostClassifier"
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html
# Source du module "GridSearchCV"
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
import numpy as np
class AdaBoost():
def __init__(self):
# Initialisation du module
grid_parameters = {'n_estimators': range(5, 15, 1), 'learning_rate': np.arange(0.8, 1.8, 0.1)}
self.adaboost = GridSearchCV(AdaBoostClassifier(
random_state = 0
), grid_parameters, cv=15, iid=False)
def fit(self, x_tab, t_tab):
# Retroune l entrainement du modele par rapport aux donnees
return self.adaboost.fit(x_tab, t_tab)
def predict(self, data):
# Retourne la prediction des donnees
return self.adaboost.predict(data)
def score(self, x_tab, t_tab):
# Retourne la score moyen des donnees en fonction de leur classe
return self.adaboost.score(x_tab, t_tab)
def get_best_param(self):
# Retroune le meilleur hyperparametre
return self.adaboost.best_params_
| [
"jbrosseau85@gmail.com"
] | jbrosseau85@gmail.com |
50bd5fb3ecb34a150b5e00992b4dbb77b833d894 | b476148624d6a92b2be59adb856decf1b64e8f2b | /dmitri_melnik/wsgi.py | 167ecc8ac506557efc14bb89da4acea0e3fbf873 | [] | no_license | Anthlon/dmitri | 1e79e2aaf57c6ed06bf4526dd501da312d897f51 | a613f4fb2534e32bb52112557187c4b88aaaed57 | refs/heads/dev | 2022-12-13T07:16:12.168442 | 2019-01-23T18:40:04 | 2019-01-23T18:40:04 | 166,687,721 | 0 | 1 | null | 2022-11-22T02:55:20 | 2019-01-20T17:15:07 | Python | UTF-8 | Python | false | false | 404 | py | """
WSGI config for dmitri_melnik project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dmitri_melnik.settings")
application = get_wsgi_application()
| [
"taran2jl@gmail.com"
] | taran2jl@gmail.com |
0e8bb6dc3894b148e34ded7def1b41d06923923c | 9bce12bce7a08307d0894455f428bde13c49b0ad | /Lists.py | 9f0dc1bac4a0f1c1c747f9daf976cc41813feaa2 | [] | no_license | RAAD07/Lists-Python-Hackerrank | 30bbb74a88ca2d36bade053762143dd7fb47a828 | 6758f33631d1fdb1931a9b819a748a1e9b9b7bdd | refs/heads/master | 2022-11-19T09:51:58.840647 | 2020-07-06T17:47:35 | 2020-07-06T17:47:35 | 277,609,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | if __name__ == '__main__':
N = int(input())
arr=[]
for i in range(N):
s=input()
s=s.split()
if s[0]=="insert":
arr.insert(int(s[1]),int(s[2]))
elif s[0]=="print":
print(arr)
elif s[0]=="remove":
arr.remove(int(s[1]))
elif s[0]=="append":
arr.append(int(s[1]))
elif s[0]=="sort":
arr.sort()
elif s[0]=="pop":
arr.pop()
elif s[0]=="reverse":
arr.reverse()
| [
"noreply@github.com"
] | noreply@github.com |
3d37da637d2574b7f62ac05306ea06e985dab24c | ae10b60cb92a69146bfb05ef5dde735a0aa45d4b | /examples/Extended Application/sklearn/examples/manifold/plot_swissroll.py | 7c79aa7f21f247e89add004d85ca228061582301 | [
"MIT"
] | permissive | kantel/nodebox-pyobjc | 471cea4c5d7f1c239c490323186458a74edcc214 | 068ba64c87d607522a240ab60c3ba14f869f6222 | refs/heads/master | 2021-08-14T18:32:57.995445 | 2017-11-16T13:42:23 | 2017-11-16T13:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
# plt.show()
pltshow(plt)
| [
"karstenwo@web.de"
] | karstenwo@web.de |
6d183a2f763ca7810329b07ebda8c350fcdc4a62 | a5ba7f324febb0fa2938ad2ee7b7b4c9941f4894 | /4.py | 4090a3800817c632917fb33c97da7f3c4c0418a4 | [] | no_license | AlenaGB/hw4 | 12f7789b54d3afc25165c2df221e84a0a1b15f93 | b922c2db36361eaddcf86807aab4f65676b641d4 | refs/heads/add_first_file | 2023-02-13T17:05:07.274647 | 2021-01-13T21:09:14 | 2021-01-13T21:09:14 | 329,387,395 | 0 | 0 | null | 2021-01-13T21:09:15 | 2021-01-13T17:47:20 | Python | UTF-8 | Python | false | false | 134 | py | myList = input('Enter numbers by a space: ').split()
newList = [el for el in myList if myList.count(el) == 1]
print(' '.join(newList)) | [
"olyonapopova@gmail.com"
] | olyonapopova@gmail.com |
7a7ff52519684c15858e9d701cbe94a12417569d | 66ba4c3af5746fcb4dc0f0b73249c54ee68c92aa | /insta_post/migrations/0002_auto_20201016_1836.py | 5b827e4fb0dd3de03fc94d341678192648d679fe | [] | no_license | pokeyjess/Insta-Car | 9aaef1dbdf91c35e0f81522785b559ab51d23c51 | 31604d534d08204f16ed67965e32aeffb792a92f | refs/heads/master | 2022-12-20T03:08:49.184552 | 2020-10-20T03:28:04 | 2020-10-20T03:28:04 | 301,603,196 | 0 | 4 | null | 2020-10-20T03:28:05 | 2020-10-06T03:26:19 | JavaScript | UTF-8 | Python | false | false | 405 | py | # Generated by Django 3.1.2 on 2020-10-16 18:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insta_post', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content',
field=models.CharField(max_length=280, verbose_name='comment'),
),
]
| [
"pokeyjess72@gmail.com"
] | pokeyjess72@gmail.com |
bdc4fe683809a077cf124332f5f64f688336229e | 72b37e71fe10893bbd469a09bcbd20aa495c713b | /media1.py | dc9517e4d20de6b47cd337062ca51b62ad0bfb53 | [] | no_license | AndreLucasrs/UriJudge-Python | b177b008c9fea76aff75684e05ba810c13aae141 | f4fed41511281725bf07b2d95d51d44acdc3425f | refs/heads/master | 2021-01-11T21:32:41.886823 | 2019-08-11T18:14:51 | 2019-08-11T18:14:51 | 78,802,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | # -*- coding: utf-8 -*-
a = float(input())
b = float(input())
media = float(((a*3.5)+(b*7.5))/11)
print "MEDIA = %0.5f" % media | [
"andrelrs.v@outlook.com"
] | andrelrs.v@outlook.com |
d393dfab85f711e3577849ec7cf8e6c50b16d399 | bfdab27f224d9cac02e319fe55b53172fbf8d1a2 | /motion_editor_core/data/atlas_old/positions/arm/calib_12.py | 0953601e8df0af370b4565871e3c978535f8a6ca | [] | no_license | tu-darmstadt-ros-pkg/motion_editor | c18294b4f035f737ff33d1dcbdfa87d4bb4e6f71 | 178a7564b18420748e1ca4413849a44965823655 | refs/heads/master | 2020-04-06T12:37:30.763325 | 2016-09-15T14:11:48 | 2016-09-15T14:11:48 | 35,028,245 | 2 | 3 | null | 2015-05-05T13:20:27 | 2015-05-04T10:18:22 | Python | UTF-8 | Python | false | false | 67 | py | { 'calib_12': [-1.0046, 1.2497, 1.8048, -0.9239, 3.1416, -1.1781]}
| [
"martin.sven.oehler@gmail.com"
] | martin.sven.oehler@gmail.com |
fcb46ec4ee5b14754866da2d979be4e0e85d1724 | db2f1a5c498338093c7df2082f7517a5e00b756a | /1.문제 해결 패러다임/동적 계획/200924_baekjoon_1003/byounggoun/main.py | b954435c3a86bb8a8a4f8d34fe05515073146f2a | [] | no_license | kimByoungGoun/codetest | 95b0fbf36a9ffbd630ffffc2605a2c8b04de9679 | d491b7db0e4c82445fe4f530f8139144d06982a5 | refs/heads/master | 2022-06-17T15:05:44.162961 | 2022-05-31T07:36:50 | 2022-05-31T07:36:50 | 231,875,897 | 1 | 0 | null | 2020-01-19T06:28:46 | 2020-01-05T06:12:27 | null | UTF-8 | Python | false | false | 637 | py | import sys
zero = 0
one = 0
def fibonacci(n):
tmp = 0
result = 1
if(n == 0):
return 0
elif(n == 1):
return 1
else:
for i in range(n-1):
result = result + tmp
tmp = result - tmp
return result
if __name__ == "__main__":
rl = sys.stdin.readline()
for i in range(int(rl)):
tmp = sys.stdin.readline()
num = int(tmp)
if num >= 1:
zero = fibonacci(num -1)
one = fibonacci(num)
else:
zero = 1
one = 0
print(str(zero) + " " + str(one))
| [
"kgun610@naver.com"
] | kgun610@naver.com |
3479e119a928a44bfb4b30588b082226216cfa06 | 97bf09cf62ddd060ec436bc0abdda8a1a78e57f9 | /scripts/test/run_tests.py | 4392a92d799968c527d36b1bb61228cda313639e | [
"BSD-3-Clause"
] | permissive | Hiwatts/facebook360_dep | 1911848900d6be6eabe72a088bab9cf7eae6ef02 | 3ecbe7f64f88b8a7b50bfa3deef6daad61a30443 | refs/heads/master | 2023-07-24T05:48:44.705469 | 2021-05-05T15:57:32 | 2021-05-05T16:04:43 | 396,123,779 | 0 | 0 | NOASSERTION | 2021-08-18T11:48:09 | 2021-08-14T20:35:58 | null | UTF-8 | Python | false | false | 5,371 | py | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Runs all the unit tests defined in res/test/translator.json.
This is the main entrypoint for running the comprehensive test suite defined across
our applications. All the scripts desired by the specified "type" CLI argument will be run from
the test/ directory. If only a certain subset of the tests are desired, this can be specified in
a separate .json file and passed using the --static CLI flag.
Example:
For running all the CPU tests, use:
$ python run_tests.py \
--type=cpu
--binary_dir=/path/to/facebook360_dep/build/bin \
--dataset_root=s3://example/dataset
For running a statically-defined subset of the GPU tests, use:
$ python run_tests.py \
--type=gpu \
--static=/path/to/facebook360_dep/static.json \
--binary_dir=/path/to/facebook360_dep/build/bin \
--dataset_root=s3://example/dataset
"""
import json
import os
import sys
from pathlib import Path
from .test_align_colors import AlignColorsTest
from .test_calibration import CalibrationTest
from .test_calibration_lib_main import CalibrationLibMainTest
from .test_convert_to_binary import ConvertToBinaryTest
from .test_derp_cli import DerpCLITest
from .test_export_point_cloud import ExportPointCloudTest
from .test_generate_camera_overlaps import GenerateCameraOverlapsTest
from .test_generate_foreground_masks import GenerateForegroundMasksTest
from .test_import_point_cloud import ImportPointCloudTest
from .test_layer_disparities import LayerDisparitiesTest
from .test_master_class import generic_main, parser
from .test_project_equirects_to_cameras import ProjectEquirectsToCamerasTest
from .test_raw_to_rgb import RawToRgbTest
from .test_rig_aligner import RigAlignerTest
from .test_rig_analyzer import RigAnalyzerTest
from .test_rig_compare import RigCompareTest
from .test_rig_simulator import RigSimulatorTest
from .test_simple_mesh_renderer import SimpleMeshRendererTest
from .test_upsample_disparity import UpsampleDisparityTest
try:
import networkx as nx
load_static = False
except Exception:
load_static = True
def get_ordered_tests(tests_setup, test_type):
"""Determines the order of tests to be run, filtered to only return the specified type.
Args:
tests_setup (dict): Map of test name to its configuration (see: res/test/translator.json).
test_type (str): Which apps are to be tested. Must be one of "cpu", "gpu", or "both".
Returns:
list[str]: Names of the applications in the order they are to be run.
"""
test_graph = nx.DiGraph()
for test_app in tests_setup:
tests = tests_setup[test_app]
for test in tests:
if "truth" in test:
output_node = test["truth"]
else:
output_node = f"placeholder_{test_app}"
test_graph.add_nodes_from(test["datasets"])
test_graph.add_nodes_from([output_node])
for dataset in test["datasets"]:
if test_type == "both" or test["type"] == test_type:
print(dataset, output_node)
test_graph.add_edge(dataset, output_node, name=test_app)
ordered_nodes = list(nx.topological_sort(test_graph))
ordered_tests = []
for node in ordered_nodes:
for neighbor in test_graph.neighbors(node):
test_app = test_graph.get_edge_data(node, neighbor)["name"]
if test_app not in ordered_tests:
ordered_tests.append(test_app)
return ordered_tests
def run_tests(loader=None, res_dir=None):
"""Runs tests of the variant specified by CLI arguments. If "cpu" is specified,
CPU-only tests will be run and similarly for "gpu." Both are run if "both" is
passed in. If "static" is specified, the tests are run per their order in the
given static json file. Otherwise, the test order is automatically determined.
"""
parser.add_argument(
"--type", help="Type of tests to run (one of: cpu, gpu, both)", required=True
)
parser.add_argument(
"--static",
help="Static json w/ list of tests (use ONLY if NetworkX unavailable)",
)
args = parser.parse_args()
if not res_dir:
res_dir = os.path.join(
Path(os.path.abspath(__file__)).parents[2], "res", "test"
)
translator_path = os.path.join(res_dir, "translator.json")
with open(translator_path) as f:
tests_setup = json.load(f)
if load_static or args.static:
with open(args.static, "r") as f:
ordered_json = json.load(f)
ordered_tests = []
if (args.type == "both" or args.type == "cpu") and "cpu" in ordered_json:
ordered_tests += ordered_json["cpu"]
if (args.type == "both" or args.type == "gpu") and "gpu" in ordered_json:
ordered_tests += ordered_json["gpu"]
else:
ordered_tests = get_ordered_tests(tests_setup, args.type)
test_classes = []
for test in ordered_tests:
test_classes.append(getattr(sys.modules[__name__], test))
generic_main(test_classes, loader, res_dir)
if __name__ == "__main__":
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e96fdcdd6acf50f222c5a01b5789d2decb76fea0 | 97171b6ddc7f3ba93d6abf6df375a9e87c223f78 | /config.py | 68db0675dcdc96215df2cd547fb41e7af33fe6a4 | [
"MIT"
] | permissive | tuanthng/MemTrack | be3520f12b0f121f2673b7678ab7a694faaeca4d | 20cb359d4e90b7beac15eeaeebd8ef8b7b0e9609 | refs/heads/master | 2022-03-26T05:57:49.719585 | 2020-01-02T07:22:17 | 2020-01-02T07:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | # ------------------------------------------------------------------
# Tensorflow implementation of
# "Learning Dynamic Memory Networks for Object Tracking", ECCV,2018
# Licensed under The MIT License [see LICENSE for details]
# Written by Tianyu Yang (tianyu-yang.com)
# ------------------------------------------------------------------
import os
import socket
#================= data preprocessing ==========================
home_path = '/home/tianyu'
root_path = home_path+'/Data/ILSVRC'
tfrecords_path = home_path+'/Data/ILSVRC-TF'
otb_data_dir = home_path+'/Data/Benchmark/OTB'
data_path_t = os.path.join(root_path, 'Data/VID/train')
data_path_v = os.path.join(root_path, 'Data/VID/val')
anno_path_t = os.path.join(root_path, 'Annotations/VID/train/')
anno_path_v = os.path.join(root_path, 'Annotations/VID/val/')
vid_info_t = './VID_Info/vid_info_train.txt'
vid_info_v = './VID_Info/vid_info_val.txt'
vidb_t = './VID_Info/vidb_train.pk'
vidb_v = './VID_Info/vidb_val.pk'
max_trackid = 50
min_frames = 50
num_threads_t = 16
num_threads_v = 2
patch_size = 255+2*8
fix_aspect = True
enlarge_patch = True
if fix_aspect:
context_amount = 0.5
else:
z_scale = 2
#========================== data input ============================
min_queue_examples = 500
num_readers = 2
num_preprocess_threads = 8
z_exemplar_size = 127
x_instance_size = 255
is_limit_search = False
max_search_range = 200
is_augment = True
max_strech_x = 0.05
max_translate_x = 4
max_strech_z = 0.1
max_translate_z = 8
label_type= 0 # 0: overlap: 1 dist
overlap_thres = 0.7
dist_thre = 2
#========================== Memnet ===============================
hidden_size = 512
memory_size = 8
slot_size = [6, 6, 256]
usage_decay = 0.99
clip_gradients = 20.0
keep_prob = 0.8
weight_decay = 0.0001
use_attention_read = False
use_fc_key = False
key_dim = 256
#========================== train =================================
batch_size = 8
time_step = 16
decay_circles = 10000
lr_decay = 0.8
learning_rate = 0.0001
use_focal_loss = False
summaries_dir = 'output/summary/'
checkpoint_dir = 'output/models/'
summary_save_step = 500
model_save_step = 5000
validate_step = 5000
max_iterations = 100000
summary_display_step = 8
#========================== evaluation ==================================
batch_size_eval = 2
time_step_eval = 48
num_example_epoch_eval = 1073
max_iterations_eval = num_example_epoch_eval//batch_size_eval
#========================== tracking ====================================
num_scale = 3
scale_multipler = 1.05
scale_penalty = 0.97
scale_damp = 0.6
response_up = 16
response_size = 17
window = 'cosine'
win_weights = 0.15
stride = 8
avg_num = 1
is_save = False
save_path = './tracking/snapshots'
| [
"baiyangtianyu_i@didichuxing.com"
] | baiyangtianyu_i@didichuxing.com |
88c67709c5279531a3ad7800795ea2de506f1926 | 74749c296c8bff8a05ae7b2f70814055735403fd | /c117.py | 8896150b0180da607b1e6e1f01991836e231813c | [] | no_license | jzhang14/python_data_structures | 8c44d3ed96b7c07ab4974e2593bde1b6791506da | 08bb2668aff714855244ec40e96af31030de1365 | refs/heads/master | 2021-01-17T14:31:58.375567 | 2015-05-12T06:42:38 | 2015-05-12T06:42:38 | 35,389,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | l = []
for x in range(1,11):
l.append((x-1)*x)
print l | [
"jzhang14@nd.edu"
] | jzhang14@nd.edu |
a7e8bdea7fc8b9862ce19d43506ea55a39cd5fca | 8ba4e3e9f9fdcfdb9a62cd578bd9f0757234068a | /App/migrations/0005_cart_stores_substores.py | 23585af32df5ec0f52b63ee8d72d0d8ff6ac8bc6 | [] | no_license | Oriya-Berlin/I-Frayer | 8ba30f546064ffbe42816884f7c1f27ce2e1bc6d | dc5595fefecc3973b36f3e540d43fc9a94f6125b | refs/heads/master | 2022-11-20T09:51:06.690378 | 2020-07-26T22:41:59 | 2020-07-26T22:41:59 | 282,739,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | # Generated by Django 3.0.4 on 2020-04-03 09:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('App', '0004_auto_20200402_0922'),
]
operations = [
migrations.CreateModel(
name='SubStores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chain_id', models.CharField(max_length=60)),
('sub_chain_id', models.CharField(max_length=60)),
('store_id', models.CharField(max_length=60)),
('store_name', models.CharField(max_length=60)),
('address', models.CharField(max_length=60)),
('city', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Stores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('sub_stores', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='App.SubStores')),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='App.User')),
],
),
]
| [
"oriya2699@gmail.com"
] | oriya2699@gmail.com |
5e649b53f7f3cdd1d2c9cdcc93b245cade480147 | cf98e26c4da607df3b4ac3a64e3bc3267506691d | /q2_neural.py | fa5f67a6febf17fdad05c14d1fee91bc06592cb6 | [] | no_license | DeepNLP/pset1-matthieu-r | 6f0f322415d41c4f608bc03a2817416cfb633e23 | beb1ac504330f8b6377737a00e7d798ff782ce89 | refs/heads/master | 2021-01-12T09:32:20.294190 | 2016-12-30T17:34:41 | 2016-12-30T17:34:41 | 76,189,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,000 | py | import numpy as np
import random
from q1_softmax import softmax
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_gradcheck import gradcheck_naive
def forward_backward_prop(data, labels, params, dimensions):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
and backward propagation for the gradients for all parameters.
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
N = labels.shape[0]
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
### YOUR CODE HERE: forward propagation
x = data
a1 = x
z1 = x
z2 = np.dot(a1,W1) + b1
a2 = sigmoid(z2)
z3 = np.dot(a2, W2) + b2
a3 = softmax(z3) #normalized for probabilty
#loss function would be here ? https://en.wikipedia.org/wiki/Cross_entropy or lecture 6
cost = - np.sum(np.log(np.sum(labels * a3, axis=1))) / N #this labels * a3 multiplication only keeps the non 0 vector, we use the softmax function here
### END YOUR CODE
### YOUR CODE HERE: backward propagation
error = (a3 - labels) / N
gradW2 = np.dot(np.transpose(a2), error)
gradb2 = np.sum(error, axis=0)
delta2 = sigmoid_grad(a2) * np.dot(error, np.transpose(W2))
gradW1 = np.dot( np.transpose(x), delta2)
gradb1 = np.sum(delta2, axis=0)
### END YOUR CODE
### Stack gradients (do not modify)
grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),
gradW2.flatten(), gradb2.flatten()))
return cost, grad
def sanity_check():
"""
Set up fake data and parameters for the neural network, and test using
gradcheck.
"""
print "Running sanity check..."
N = 20 #number of window to classify
dimensions = [10, 5, 10]
data = np.random.randn(N, dimensions[0]) # each row will be a datum
labels = np.zeros((N, dimensions[2]))
for i in xrange(N):
labels[i,random.randint(0,dimensions[2]-1)] = 1 # give probability 1 to each row, give a 100% label to each row
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,
dimensions), params)
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
| [
"matthieu@stupeflix.com"
] | matthieu@stupeflix.com |
06838957e3b93d87cffa17bbe9417780a957f49a | 1e60b1b311e4e1ced836f43ef055c65f5e78f7ef | /test/functional/test_framework/bignum.py | 841d5adf660e80f4959836b088423f3d556936c8 | [
"MIT"
] | permissive | liufile/BlackHatWallet | 529bd4b492dbf672aa3d7b1f7dd456e53508fdc4 | 0e6b310fb6cb9bdb3b51a81ab55e606efed891f2 | refs/heads/master | 2023-04-24T13:49:07.117712 | 2021-05-01T12:34:50 | 2021-05-01T12:34:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | #!/usr/bin/env python3
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Big number routines.
This file is copied from python-bitcoinlib.
"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# BlackHat-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
| [
"contact@blackhatco.in"
] | contact@blackhatco.in |
8647727004a3d439ecb6faca531cbf64967346c9 | f7c72d9994b59c4023aaa811533e59f117384256 | /polls/models.py | f37a8e136945f5bfe52c56ef83fd08083911a01d | [] | no_license | Aparth27newtonix/poll | f3005e9d13e2a7aa34cda425b4f56872f41cdc85 | ba852f7af24c170f3b247d3ce24bb76a7c07e9a4 | refs/heads/master | 2021-05-12T07:01:44.237552 | 2018-01-12T12:13:11 | 2018-01-12T12:13:11 | 117,235,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | from django.db import models
# Create your models here.
from datetime import date
# Create your models here.
class Question(models.Model):
question_text=models.CharField(max_length=200)
pub_date=models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question=models.ForeignKey(Question,on_delete=models.CASCADE)
choice_text=models.CharField(max_length=200)
votes=models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"27newtonix@gmail.com"
] | 27newtonix@gmail.com |
7c13814393f17f7fedc5cf35d8f27da995845f2a | 02445d052bbd8e84dbdf1f14897d60adb8815491 | /exercicios-python/curso-python/ex054.py | 76f04261d6086ff7e308999ab37c981520fb8a6d | [
"MIT"
] | permissive | PabloLanza/curso-python3 | 68b1e5b46eb2be7eebe273f6514174ff24433a1e | 34cf44a2467fa239ba4019e085833002ad9b76a1 | refs/heads/main | 2023-08-31T21:09:52.170695 | 2021-10-19T11:47:20 | 2021-10-19T11:47:20 | 403,309,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | n= int(input('Digite quantos termos você quer mostrar: '))
t1 = 0
t2 = 1
print('=====' * 15)
print('{} - {}'.format(t1, t2), end='')
cont = 3
while cont <= n:
t3 = t1 + t2
print(' - {} '.format(t3), end='')
t1 = t2
t2 = t3
cont = cont + 1
print(' - FIM')
| [
"pablolanza75@gmail.com"
] | pablolanza75@gmail.com |
cdb32f20cec92bbc765111f2f965ebc0bc9da52f | 35962c88d15874825d8dd025e3ea62cb90d666f9 | /blog/migrations/0006_remove_comment_email.py | 73d81c35678baf001caa6e945e64c07ff86a8c4c | [] | no_license | rajatyadav8540/dscweb | 4a05bd3900125fa38064a893e28f8e42c405803d | 20dec5fa2d28cce5e31e7dedd003ed58cce17f37 | refs/heads/master | 2023-05-06T20:29:19.302869 | 2020-10-13T15:15:12 | 2020-10-13T15:15:12 | 254,567,699 | 1 | 0 | null | 2020-04-10T07:06:36 | 2020-04-10T07:06:36 | null | UTF-8 | Python | false | false | 314 | py | # Generated by Django 3.0.3 on 2020-08-19 18:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_comment'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='email',
),
]
| [
"kumar@mx"
] | kumar@mx |
e902ead2af28166e202cd5752aec78116afeb097 | 5fe79656bb0319944f5088366edbe9a364724778 | /blog/models.py | 172de166397dde5478e74e8dcccbc96b6fd6c37d | [] | no_license | isheikh8492/FakeBlog | 716c7199c9e80b9b96212efbfd82fb14e59d9c98 | 6017cdfefaa53f49ea9ee63d507d6c6c948348af | refs/heads/master | 2023-08-22T21:21:58.595666 | 2021-10-10T06:35:07 | 2021-10-10T06:35:07 | 380,857,015 | 1 | 0 | null | 2021-07-01T18:27:45 | 2021-06-27T23:28:17 | Python | UTF-8 | Python | false | false | 599 | py | from datetime import time
from django.db import models
from django.conf import settings
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title | [
"isheikh8492@gmail.com"
] | isheikh8492@gmail.com |
c2e877332cdc877c229478f76b49e11504e3a4e0 | e7fb42dd080504609719a84e7f0c82d8b428d9dc | /setup.py | c9f0522702ecbfb91d2a986fbd62de91c5720af6 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | aleksas/frappe_ftp_backup_integration | be50dfbaf6e040f7a1196bf6028b43bc2d62ed88 | 9f977dd127629a278d9ce46bfa98152f59ea6f06 | refs/heads/version-11 | 2020-06-18T17:53:51.428134 | 2019-11-11T08:37:21 | 2019-11-11T08:37:21 | 196,389,775 | 1 | 0 | NOASSERTION | 2019-11-11T08:37:23 | 2019-07-11T12:26:00 | Python | UTF-8 | Python | false | false | 719 | py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re, ast
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in intergation_ftp_backup/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('intergation_ftp_backup/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='intergation_ftp_backup',
version=version,
description='Add FTP backup functionality',
author='Frappe',
author_email='ant.kampo@gmail.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| [
"ant.kampo@gmail.com"
] | ant.kampo@gmail.com |
a2c3f4916e3e50a309e4c9cfd13d000d7ea065ae | 19951b217e22ed6510350ecf4e5dbc1cfe7abdd4 | /formatting_datasets/digitalise_timetable.py | c5f57ddc26dc0ba76e78631c0374ebd2ea7b9dcc | [] | no_license | fedix/MSA_Project | e36645e4e5b7f8e518e95ccb20ad4cafb0939d66 | bdbf4e44268cfeeb2d4e079e48738860ebbf19d9 | refs/heads/master | 2021-05-18T05:50:49.427633 | 2020-09-01T15:07:51 | 2020-09-01T15:07:51 | 251,143,992 | 1 | 1 | null | 2020-06-09T12:41:17 | 2020-03-29T22:03:49 | Jupyter Notebook | UTF-8 | Python | false | false | 4,860 | py | """
Transforms 'timetable' in KudaGo dataset (str):
'timetable': 'вт, чт 10:30–18:00, ср 10:30–21:00'
to the format of FIFA (ITMO) dataset:
dict of lists[int_1(opening time), int_2(closing time)]
'open_hours': {"1":[1030,1800],"4":[1030,1800], ...}
"""
import json
from json import JSONDecodeError
import re
# 1 - functions for digitalisation of KudaGo timetable
# Convert time from 'str' to 'list[int_1, int_2]'
# Example: ('9:00-18:00') to [900, 1800]
def convert_time(text_time):
time_range = re.split(r'[^\d:]', text_time[0])
open_time = re.split(r'[^\d]', time_range[0])
close_time = re.split(r'[^\d]', time_range[1])
if open_time and close_time:
time = [int(open_time[0] + open_time[1]), int(close_time[0] + close_time[1])]
else:
time = None
return time
def define_days(string):
# Week days, example: "пн"
mon = r'\bпн\b'
tue = r'\bвт\b'
wed = r'\bср\b'
thu = r'\bчт\b'
fri = r'\bпт\b'
sat = r'\bсб\b'
sun = r'\bвс\b'
week = [mon, tue, wed, thu, fri, sat, sun]
# Day range delimeter, example: "пн–ср"
range_delimeter = r'–'
# Other day defining notions:
everyday = r'[Ее]жедневно'
weekdays = r'[Бб]удни'
work_days = []
# if timetable provides range of days ("пн-ср"):
day_range = re.search(range_delimeter, string)
if day_range:
start_day = 0
end_day = 0
for day in range(7): # represents week days
if(re.search(week[day] + range_delimeter,
string[0:day_range.end()])):
start_day = day
if(re.search(week[day],
string[day_range.end():])):
end_day = day
for day in range(start_day, end_day+1):
work_days.append(str(day))
# Day enumeration ("пн, вт, ...""):
for day in range(7): # represents week days
if(re.search(week[day], string)):
work_days.append(str(day))
# Everyday ("ежедневно"):
if(re.search(everyday, string)):
for day in range(7):
work_days.append(str(day))
# Weekdays ("по будним"):
if(re.search(weekdays, string)):
for day in range(5):
work_days.append(str(day))
return work_days
def digitalise(string):
# Time range, example: "10:00-0:10"
time_patt = r'\d{1,2}:\d\d[^,]\d{1,2}:\d\d'
# Other time-notations:
all_day = r'[Вв]есь день'
always = r'[Кк]руглосуточно'
last_visitor = r'[Дд]о последн'
timetable = {}
shift = 0
# Work until last visitor case
if re.search(last_visitor, string):
# To allow time_range pattern below to work with this case
# 'last_visitor' will be chaged to until '21:00' (assumption)
string = re.sub(last_visitor, '21:00', string, count=0)
# Standart time pattern case:
time = re.search(time_patt, string[shift:])
while time:
hours = convert_time(time)
for day in define_days(string[shift:
shift + time.start()]):
if day not in timetable:
timetable[day] = hours
shift += time.end()
time = re.search(time_patt, string[shift:])
# All day open case:
time = re.search(all_day, string)
if time:
hours = [0,0]
for day in define_days(string[:time.start()]):
if day not in timetable:
timetable[day] = hours
# Always open case:
time = re.search(always, string)
if time:
timetable = {str(day):[0,0] for day in range(7)}
return timetable
# 2 - output results:
if __name__ == "__main__":
from collections import Counter
# In / out files and directorires:
direct = "D:/Work/Data_files/working_dir/"
in_file_1 = "places_2_expand_spb"
in_file_2 = "places_2_expand_msk"
with open(direct + in_file_1 + ".json",
'r', encoding = "utf-8") as inf_1, \
open(direct + in_file_2 + ".json",
'r', encoding = "utf-8") as inf_2:
timetables = []
count = Counter()
try:
places = json.load(inf_1)
for place in places:
timetable = place["timetable"]
count[timetable] += 1
places = json.load(inf_2)
for place in places:
timetable = place["timetable"]
count[timetable] += 1
except JSONDecodeError:
print("Input file cannot be read")
for word, count in count.most_common():
print(word)
print(digitalise(word))
| [
"bestulive@gmail.com"
] | bestulive@gmail.com |
f1c7d551c9cad864ccf277d23e1d472625b48095 | cbc86dbda2d9c3f3286bee1139884e18bb51f210 | /patienttracker/patients/migrations/0004_auto_20190316_0904.py | cb19cfc417a9ad92cf5601d88080da535bb282f8 | [
"MIT"
] | permissive | Raywire/patientTracker | 94bc7e5635098d51b8eab1b1e22911441781ec2c | ced60d57eaa373a5bd87b44b9a95c0d38a0db838 | refs/heads/develop | 2022-12-10T15:08:16.171806 | 2019-03-21T18:25:07 | 2019-03-21T18:25:07 | 174,653,720 | 2 | 2 | MIT | 2022-12-08T04:54:23 | 2019-03-09T05:43:56 | Python | UTF-8 | Python | false | false | 392 | py | # Generated by Django 2.1.7 on 2019-03-16 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0003_auto_20190316_0721'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='date',
field=models.DateTimeField(unique=True),
),
]
| [
"ryanwire@Ryans-MacBook-Pro.local"
] | ryanwire@Ryans-MacBook-Pro.local |
97abd96db2f49c4bb203461ed7b742f7c56aeb3f | 54a3024bd27756d2e658cc06ca3b2cd851884b22 | /parking/chargerule/migrations/0022_card_car_number.py | 76154ea403097de30161a6a43ce61f0bcac287b9 | [] | no_license | codefish-yu/parking | 0f9568be8650f859d629a1107a035123f1330ed7 | 7752b9fb2ff58958d21b39f8ac2aa4402646574c | refs/heads/master | 2021-06-28T02:37:10.787609 | 2020-01-08T08:42:05 | 2020-01-08T08:42:05 | 232,518,396 | 0 | 0 | null | 2021-06-10T22:28:22 | 2020-01-08T08:44:00 | HTML | UTF-8 | Python | false | false | 426 | py | # Generated by Django 2.2.3 on 2019-12-04 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chargerule', '0021_auto_20191204_1133'),
]
operations = [
migrations.AddField(
model_name='card',
name='car_number',
field=models.CharField(max_length=30, null=True, verbose_name='车牌号'),
),
]
| [
"cherryxie@xiaodeMacBook-Pro.local"
] | cherryxie@xiaodeMacBook-Pro.local |
5255c172d866d6bd3cb6425bb59854eea12e7b6a | aeb69456c4e6f2238c947ae426d346aad033d598 | /剑指/面试题21. 调整数组顺序使奇数位于偶数前面.py | bcda68b2b8d8d1f5c3b58e16b701995610a9f8d4 | [] | no_license | ElonXie/LeetCode-Practice | f2c345cadce8d60515343ee94f52de5f34477d81 | 7a54fc8f85e3e7f937bb504a8f4c6de6dd7da3e2 | refs/heads/master | 2021-05-16T21:09:11.231951 | 2020-06-21T03:39:12 | 2020-06-21T03:39:12 | 250,470,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | '''
输入一个整数数组,实现一个函数来调整该数组中数字的顺序,使得所有奇数位于数组的前半部分,所有偶数位于数组的后半部分。
示例:
输入:nums = [1,2,3,4]
输出:[1,3,2,4]
注:[3,1,2,4] 也是正确的答案之一。
提示:
1 <= nums.length <= 50000
1 <= nums[i] <= 10000
'''
from typing import List
class Solution:
def exchange(self, nums: List[int]) -> List[int]:
L,R = 0,len(nums)
while L<R:
if not nums[L]%2:
R -= 1
nums[L],nums[R] = nums[R],nums[L]
continue
L += 1
return nums
if __name__ == '__main__':
s = Solution()
nums = [3,4,1,2]
print(s.exchange(nums)) | [
"sdaxdh@163.com"
] | sdaxdh@163.com |
9bad6a345a6bdffa026f01429de7892977c34495 | fd64e364368bcb2cdcf77ab1e0fc234a6b698f69 | /Python/Easy/CHEFSETC.py | 48d298e8c44654557290eaf4fd4a08a2753e58f3 | [] | no_license | Parizval/CodeChefCodes | 57712069f3d56cc42282f9e35c6ddd9398e4a5bf | cfd2876816be806882650b6ea51431b1f8d6bec5 | refs/heads/master | 2021-07-16T13:10:15.668713 | 2020-07-06T21:40:09 | 2020-07-06T21:40:09 | 188,693,667 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import itertools
def findsubsets(s, n):
return [set(i) for i in itertools.combinations(s, n)]
for a in range(int(input())):
elements = set(map(int,input().split()))
check = False
for i in range(1,5,1):
array = findsubsets(elements,i)
for i in array:
if sum(i) == 0 :
# print(i)
check = True
break
if check:
break
if check:
print("Yes")
else:
print("No") | [
"anmolgoyal@gmail.com"
] | anmolgoyal@gmail.com |
d12a97b3a0c6ab6156cb469bab5fef501e6be3ac | f9fb37ad45159a33b5adcc54ce29e8f9b0200402 | /bin/mnservice.py | 0663b74f01652f5078ec66f7c8b6850c68f62ad0 | [] | no_license | bojos/mnservice | e16f3d89753d73b2e7db425cb99049e9845235e6 | 7c827212831a2a0892363b5358f4d777d530b404 | refs/heads/master | 2021-01-21T01:02:31.472308 | 2016-03-02T23:58:07 | 2016-03-02T23:58:07 | 52,519,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | import os
import sys
import time
from subprocess import check_output
import daemon
def get_dashd_pid():
try:
# with open(dashd_pidfile, 'r') as f:
# return int(f.read().strip())
ll = check_output(["pidof", "dashd"]).split()
return int(ll[0])
except Exception:
return 0
def is_executed_pid(name, pid):
try:
ll = check_output(["pidof", name]).split()
for id in ll[:]:
if int(id) == pid:
return True
return False
except:
return False
def write_log(msg):
try:
with open(logfile, 'a+') as f:
f.write(time.strftime("%Y-%m-%d %H:%M:%S ",time.localtime()) + msg + '\n')
except IOError:
pass
class DaemonDashd(daemon.daemon_base) :
def __init__(self, pdfile, workpath):
daemon.daemon_base.__init__(self, pdfile, workpath)
self.dashd_pid = get_dashd_pid()
def run(self):
write_log("................................")
write_log(">> mnservice run - pid = {0}".format(self.pid))
write_log("................................")
if self.dashd_pid :
write_log("-> dashd find - pid = {0}".format(self.dashd_pid))
while True:
if not is_executed_pid('dashd', self.dashd_pid):
write_log("** dashd not find - pid = {0}".format(self.dashd_pid))
pp = self.workpath + '/' + 'dashd'
try:
os.spawnl(os.P_WAIT, pp, '')
time.sleep(2)
self.dashd_pid = get_dashd_pid()
write_log("-> dashd restart - pid = {0}".format(self.dashd_pid))
except:
pass
time.sleep(10*60)
def stop(self):
if self.dashd_pid:
write_log("<< mnservice stop - pid = {0}".format(self.pid))
if __name__ == '__main__':
"""Daemon test logic.
This logic must be called as seperate executable (i.e. python3
daemon.py start/stop/restart). See test_daemon.py for
implementation.
"""
usage = 'Missing parameter, usage of test logic:\n' + \
' % python3 mnservice.py start|stop dash_daemon_directory\n'
if len(sys.argv) < 3:
sys.stderr.write(usage)
sys.exit(2)
pidfile = '/tmp/mnservice.pid'
logfile = '/tmp/mnservice.log'
# dashd_pidfile = sys.argv[3]
dc = daemon.daemon_ctl(DaemonDashd, pidfile, sys.argv[2])
if sys.argv[1] == 'start':
dc.start()
elif sys.argv[1] == 'stop':
dc.stop()
| [
"m.prik@email.cz"
] | m.prik@email.cz |
d9bee4ad7b23f641753a2fe3e8fa91e75064ef95 | 4f41601218f7c270a9b1bb0b02a45522dfb586df | /miform/structure.py | 74b867359fe58dc888aa44f38c23b1ce2fe4bec6 | [] | no_license | cr1901/miform | 3e7372cf5f5d2ece04b5df17f8582ed2795ada1f | 23abfbe16a5064cbd267719ebbb66e08f594b689 | refs/heads/master | 2021-07-13T08:58:18.657665 | 2017-10-15T18:01:03 | 2017-10-15T18:01:03 | 106,760,269 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,706 | py | from migen.fhdl.structure import _Statement, wrap, _check_statement
from migen.fhdl.specials import Special
from migen.fhdl.verilog import _AT_BLOCKING, _printexpr as verilog_printexpr
from migen.fhdl.module import _flat_list, _cd_append
import miform.verilog
class _FormalStatement:
pass
class _FormalTask:
def __init__(self):
pass
def to_system_verilog(self):
raise NotImplementedError
class Formal(Special):
"""
The Migen Special for formal verification. This is mainly required to
place all formal statements in their own block.
"""
def __init__(self):
Special.__init__(self)
self.init = list()
self.imm = list()
self.conc = list()
self.glob = list()
self.sync = dict()
"""
Add an assertion or assumption for formal verification purposes.
Parameters
----------
statement : _Statement(), in
A Migen Statement that contains a _FormalStatement such as Assume or Assert;
such statements are tested only when the conditions for the Assume/Assert
are met.
The statement itself can also be a _FormalStatement; these statements
are continously assumed to be true or tested to be true, at all clock ticks.
"""
def add(self, statement):
if not _check_statement(statement):
raise TypeError("Input to Formal specials must be Migen statements")
if isinstance(statement, _FormalStatement):
if statement.initial:
# Initial asserts/assumes look similar to concurrent, though
# the initial "block" is considered an event (I think?).
self.init.append(statement)
else:
# Top-level formal asserts/assumes not bound by other events- i.e.
# checked for all time- are by definition concurrent.
self.conc.append(statement)
else:
# TODO: ensure at least one statement in list is a _FormalStatement.
self.imm += _flat_list(statement)
"""Add an assertion using the SystemVerilog $globalclock task. This is the implied clock
during formal verification; in `yosys`, if the `clk2dfflogic` pass
is executed, all other Migen clock domains, including the default "sys"
clock domain, become synchronous inputs relative to the $global_clock.
Parameters
----------
statement : _Statement(), in
A Migen Statement that is asserted/assumed each tick of the $global_clock.
"""
def add_global(self, statement):
self.glob += _flat_list(statement)
"""Add an assertion that is checked on the positive-edge of the input
clock domain.
Parameters
----------
cd : str, in
Name of the clock-domain for which the assertion/assumption is checked.
statement : _Statement(), in
A Migen Statement that is asserted/assumed each positive-edge of the named `cd`.
"""
def add_sync(self, cd, statement):
_cd_append(self.sync, cd, statement)
@staticmethod
def emit_verilog(formal, ns, add_data_file):
def pe(e):
return verilog_printexpr(ns, e)[0]
r = "`ifdef FORMAL\n"
for i in formal.init:
if isinstance(i, Assert):
r += "initial assert (" + pe(i.cond) + ");\n"
elif isinstance(i, Assume):
r += "initial assume (" + pe(i.cond) + ");\n"
r += "\n"
for c in formal.conc:
if isinstance(c, Assert):
r += "assert property (" + pe(c.cond) + ");\n"
elif isinstance(c, Assume):
r += "assume property (" + pe(c.cond) + ");\n"
else:
TypeError("Only Assume and Assert supported for concurrent assertions.")
r += "\n"
for i in formal.imm:
r += "always @(*) begin\n"
r += miform.verilog._formalprintnode(ns, _AT_BLOCKING, 1, i)
r += "end\n"
r += "\n"
r += miform.verilog._formalprintsync(formal, ns)
r += "\n"
for g in formal.glob:
r += "always @($global_clock) begin\n"
r += miform.verilog._formalprintnode(ns, _AT_BLOCKING, 1, g)
r += "end\n"
r += "`endif\n"
return r
class Assert(_Statement, _FormalStatement):
"""Assert a condition
Parameters
----------
cond : _Value(1), in
Condition
initial : bool, in
Only test the assertion on the first cycle. Defaults to false.
Ignored if the assert is not continuous.
Examples
--------
>>> a = Signal()
>>> b = Signal()
>>> c = Signal()
>>> If(c,
... Assert(a == b)
... )
"""
def __init__(self, cond, initial=False):
self.cond = wrap(cond)
self.initial = initial
class Assume(_Statement, _FormalStatement):
"""Assume a condition holds
Parameters
----------
cond : _Value(1), in
Condition
initial : bool, in
Only assume `cond` on the first cycle. Defaults to false.
Ignored if the assume is not continuous.
Examples
--------
>>> a = Signal()
>>> Assume(a == 0)
"""
def __init__(self, cond, initial=False):
self.cond = wrap(cond)
self.initial=initial
# class GlobalClock(_Statement, _FormalStatement, _FormalTask):
# """The SystemVerilog $globalclock task. This is the implied clock
# during formal verification; in `yosys`, if the `clk2dfflogic` pass
# is executed, all clock domains become synchronous relative to the
# global clock."""
# def __init__(self):
# pass
#
# def to_system_verilog(self):
# return "$"
| [
"thor0505@comcast.net"
] | thor0505@comcast.net |
a8b53a582053bf011063b816014242b9cc4b3276 | a34dc024004dded61c9a5612e047fc4537534ddb | /scripts/utils.py | ca1dc8ab17fc6a2132b90e53bb34b9903dcce3b4 | [] | no_license | tehZevo/aegis-scripts | 29ca28998f3fb2c6c8f7960ef56df8bff5e9970d | 280435890fc7661e73aff65ef28bd9b2a5b24055 | refs/heads/master | 2020-07-21T12:23:55.913579 | 2020-03-06T01:26:40 | 2020-03-06T01:26:40 | 206,863,972 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py | from tensorflow.keras import optimizers as O
import retro
import gym
from aegis_core.callbacks import TensorboardFieldCallback, TensorboardCallback
from aegis_core.callbacks import TensorboardActions, TensorboardPGETReward
from aegis_core.callbacks import TensorboardPGETWeights, TensorboardPGETTraces
class DummyEnv(gym.Env):
def __init__(self, obs_space, action_space):
self.observation_space = obs_space
self.action_space = action_space
def list_retro_games(filter=None):
games = retro.data.list_games()
for game in games:
if filter is None or filter in game.lower():
print(game)
optis = {
"sgd": O.SGD,
"rmsprop": O.RMSprop,
"adagrad": O.Adagrad,
"adadelta": O.Adadelta,
"adam": O.Adam,
"adamax": O.Adamax,
"nadam": O.Nadam
}
def create_optimizer(args):
if args.optimizer is not None:
return optis[args.optimizer](args.learning_rate, clipnorm=args.clipnorm)
return "sgd" #sigh
def env_callbacks(summary_writer, env_name, interval="done"):
cbs = [
#log sum of rewards every episode
TensorboardFieldCallback(summary_writer, "reward", name_format="{}/" + env_name,
reduce="sum", interval=interval, step_for_step=False),
#log action distribution every episode
TensorboardActions(summary_writer, env_name=env_name, interval=interval,
step_for_step=False),
]
return cbs
def pget_callbacks(summary_writer, name, interval=100, outlier_z=2):
cbs = [
TensorboardPGETWeights(summary_writer, name, interval=interval,
combine=False, step_for_step=True),
TensorboardPGETTraces(summary_writer, name, interval=interval,
combine=False, step_for_step=True, outlier_z=outlier_z),
TensorboardPGETReward(summary_writer, name, interval=interval,
step_for_step=True),
]
return cbs
def curiosity_callbacks(summary_writer, name, interval=100):
cbs = [
TensorboardFieldCallback(summary_writer, "loss", name_format=name + " curiosity/{}",
reduce="mean", interval=interval, step_for_step=True),
TensorboardFieldCallback(summary_writer, "surprise", name_format=name + " curiosity/{}",
reduce="mean", interval=interval, step_for_step=True),
]
return cbs
| [
"tehzevo@users.noreply.github.com"
] | tehzevo@users.noreply.github.com |
2932bd24c28cc788ce61d53644013d602e8afd7b | d67df8d1c5a4bced3989301566ff2ab5bcd0010d | /practices/practice2/higher_order_func.py | d85bc41a6fb50a93de5f32d579849b77bd4e928c | [] | no_license | Jindae/seoultech-PL-2020 | dbdf912c3f1860a643f673cb95a03ab8cc62a87e | f16a41045b33f128c8137706d2aba8a4101575a5 | refs/heads/master | 2023-01-04T21:02:20.558391 | 2020-11-04T01:30:34 | 2020-11-04T01:30:34 | 288,345,682 | 19 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | x = 1
def f(y):
return x+y
def g(h):
x = 2
return h(3)+x
def s():
return x+1
def calling_s():
return s
def main():
x = 4
z = g(f)
print("z = g(f):", z)
k = calling_s()
print("k = calling_s():", k())
main() | [
"jindae.kim@seoultech.ac.kr"
] | jindae.kim@seoultech.ac.kr |
5d0a8f3a60c16444192fed17496184eb9d3ed0f5 | 9d364887316a654cda4eff7a774239d1d26b50e6 | /code_archive/video_server_1.py | 9c370791ec1ffbebeb2bac499650037b044bf1f1 | [] | no_license | cobit-git/conshell | a34995b9e93ee613ef99750567065f0f017f04b7 | 08d00b39fa764727139260dbce05846ad4c38a24 | refs/heads/master | 2023-04-10T14:05:05.155637 | 2021-04-30T12:48:49 | 2021-04-30T12:48:49 | 349,293,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | import socket, cv2, pickle, struct
# Socket Create
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#host_name = socket.gethostname()
#host_ip = socket.gethostbyname(host_name)
#print('HOST IP:',host_ip)
port = 9999
socket_address = ('192.168.254.51',port)
# Socket Bind
server_socket.bind(socket_address)
# Socket Listen
server_socket.listen(5)
print("LISTENING AT:",socket_address)
# Socket Accept
while True:
client_socket,addr = server_socket.accept()
print('GOT CONNECTION FROM:',addr)
if client_socket:
vid = cv2.VideoCapture(0)
while(vid.isOpened()):
img,frame = vid.read()
a = pickle.dumps(frame)
message = struct.pack("Q",len(a))+a
client_socket.sendall(message)
cv2.imshow('TRANSMITTING VIDEO',frame)
key = cv2.waitKey(1) & 0xFF
if key ==ord('q'):
client_socket.close() | [
"connercobit@gmail.com"
] | connercobit@gmail.com |
42b03dcc9562188ff8a81630422edb51674a221c | 2c54320b0bebb4351d6056d117796c11b6fb1441 | /test_endpoints.py | 5511b26bb32d7594a6d5ed3116bbdfc142a99fdb | [] | no_license | Amertz08/flask-ci | f728ca59c67d24b5d437de8acd469d2460151f01 | a67c0417345b6b67f86d7f18d509f1f192cf862a | refs/heads/master | 2020-03-23T08:53:14.389682 | 2018-07-18T02:18:29 | 2018-07-18T02:18:29 | 141,353,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | import unittest
from flask import url_for
from flask_testing import TestCase
from app import create_app
class TestApp(TestCase):
def create_app(self):
return create_app()
def test_index(self):
resp = self.client.get(url_for('main.index'))
try:
with open('version.txt', 'r') as f:
_hash = f.read()
except FileNotFoundError:
_hash = 'version.txt not found'
self.assert200(resp)
self.assertEqual(resp.data, f'Hello {_hash}'.encode())
if __name__ == '__main__':
unittest.main()
| [
"adammertz@gmail.com"
] | adammertz@gmail.com |
c792d006de065de7e771b46020fb5e748967d975 | 3f97f4cea120dffbdad54ce03ee1a5e80abf9464 | /Course4/quiz1.py | a3461bf30136bee1a7d9b121d59dd984a84f0c52 | [] | no_license | aishwaryasarath/Coursera | 9b5297289e34d180d914c870806df968be3b26bf | 80604ea0117a35fa985269c80583fcdb636b227e | refs/heads/master | 2022-09-11T05:21:25.392320 | 2020-04-01T16:01:51 | 2020-04-01T16:01:51 | 252,346,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | import re
def compare_strings(string1, string2):
string1 = string1.lower().strip()
string2 = string2.lower().strip()
print(string2)
print(string1)
# Ignore punctuation
punctuation = r"[.?!,;:'-]"
string1 = re.sub(punctuation, r"", string1)
string2 = re.sub(punctuation, r"", string2)
# DEBUG CODE GOES HERE
#print(re.error)
return string1 == string2
print(compare_strings("Have a Great Day!", "Have a great day?")) # True
print(compare_strings("It's raining again.", "its raining, again")) # True
print(compare_strings("Learn to count: 1, 2, 3.", "Learn to count: one, two, three."))
print(compare_strings("They found some body.", "They found somebody.")) # False
| [
"aishwaryasarath@gmail.com"
] | aishwaryasarath@gmail.com |
68be4a145f5591cd39cece0984dc6931714f5716 | 44c65c93549aa06b01ef9114817cd45e645da6f7 | /tests/test_observable/test_concat.py | 7b28c6f1c9fd20302717129da1983eaa60fd0f1c | [
"Apache-2.0"
] | permissive | Affirm/RxPY | 692b6a0089f4e79b92c0c683f11427c558eefd06 | 7c23939ea497761c85b382257f9f0954998ab91e | refs/heads/master | 2023-07-06T11:34:28.229747 | 2017-10-22T16:25:56 | 2017-10-22T16:25:56 | 108,198,347 | 0 | 2 | Apache-2.0 | 2023-03-20T20:28:56 | 2017-10-25T00:18:37 | Python | UTF-8 | Python | false | false | 7,099 | py | import unittest
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class TestConcat(unittest.TestCase):
def test_concat_empty_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(250))
def test_concat_empty_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_never_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e2.concat(e1)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_never_never(self):
scheduler = TestScheduler()
e1 = Observable.never()
e2 = Observable.never()
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_empty_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(250, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(250, ex))
def test_concat_throw_empty(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_concat_throw_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_error(250, 'ex2')]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_concat_return_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_completed(250))
def test_concat_empty_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(240, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(240, 2), on_completed(250))
def test_concat_return_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2))
def test_concat_never_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = Observable.never()
def create():
return e2.concat(e1)
results = scheduler.start(create)
results.messages.assert_equal()
def test_concat_return_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(220, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(240, 3), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(220, 2), on_next(240, 3), on_completed(250))
def test_concat_throw_return(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_next(240, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_concat_return_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(220, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(250, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(220, 2), on_error(250, ex))
def test_concat_some_data_some_data(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_completed(225)]
msgs2 = [on_next(150, 1), on_next(230, 4), on_next(240, 5), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.concat(e2)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250))
| [
"dag@brattli.net"
] | dag@brattli.net |
b7cd1f9a1aea1fcef7d9de69a39850cb6d63dafc | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /log-20190927/132.230.102.123-10.21.12.20/1569574219.py | 4a95238224a8297f24bd861789148dd266556edb | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def divisors(n)->list:
teiler = []
if n < 0:
return "Ungültige Eingabe"
else:
for i in range(1, n+1):
if n%i == 0:
teiler = teiler + [i]
return [teiler]
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(n):
nonlocal covered, count
if n <= 0:
covered.add(0)
if n == 1:
covered.add(1)
r = func (n)
lenr = len (r)
if lenr == 1:
covered.add(2)
if lenr == 2:
covered.add(3)
if (lenr > 2) and ( lenr % 2 == 0):
covered.add(4)
if lenr > 2 and lenr % 2 == 1:
covered.add(5)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
divisors = coverage(divisors)
except:
pass
## Lösung Teil 2. (Tests)
def test_divisors():
assert divisors(1) == [1]
assert divisors(5) == [1]
assert divisors (10) == [1, 2, 5]
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_divisors (self):
assert divisors
assert 'n' in getfullargspec(divisors).args
class TestGrades:
def test_docstring_present(self):
assert divisors.__doc__ is not None
def test_typing_present(self):
assert divisors.__hints__ == typing.get_type_hints(self.divisors_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def divisors_oracle(self, n:int)->list:
return [ d for d in range (1, n + 1) if n % d == 0 ]
def check_divisors (self, x):
assert set(divisors (x)) == set(self.divisors_oracle (x))
def test_correctness(self):
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
ef7c8bd937b31ddb510f10cb61ad543cd824fbcd | c8e2f350b54acb24b599e37d012696d8a97f7d08 | /env/env/lib/python3.8/site-packages/dimagi/utils/decorators/profile.py | 8084b3d295422d49be9339c14eb5bd280746bdac | [] | no_license | nargo0o/geekshop | 9c2dc00e2d91dd3671975a61ea21b9246015f311 | c30837ea4d2ff699a633bff8f5f2f55f03bdde6f | refs/heads/master | 2023-06-22T10:44:53.514629 | 2021-07-18T14:38:14 | 2021-07-18T14:38:14 | 379,321,818 | 1 | 0 | null | 2021-07-13T14:56:22 | 2021-06-22T15:43:50 | Python | UTF-8 | Python | false | false | 4,098 | py | import hotshot
import os
from datetime import datetime
from django.conf import settings
try:
PROFILE_LOG_BASE = settings.PROFILE_LOG_BASE
except Exception:
PROFILE_LOG_BASE = "/tmp"
# Source: http://code.djangoproject.com/wiki/ProfilingDjango
def profile(log_file):
"""Profile some callable.
This decorator uses the hotshot profiler to profile some callable (like
a view function or method) and dumps the profile data somewhere sensible
for later processing and examination.
It takes one argument, the profile log name. If it's a relative path, it
places it under the PROFILE_LOG_BASE. It also inserts a time stamp into the
file name, such that 'my_view.prof' become 'my_view-20100211T170321.prof',
where the time stamp is in UTC. This makes it easy to run and compare
multiple trials.
"""
if not os.path.isabs(log_file):
log_file = os.path.join(PROFILE_LOG_BASE, log_file)
def _outer(f):
def _inner(*args, **kwargs):
# Add a timestamp to the profile output when the callable
# is actually called.
(base, ext) = os.path.splitext(log_file)
base = base + "-" + datetime.now().strftime("%Y%m%dT%H%M%S%f")
final_log_file = base + ext
prof = hotshot.Profile(final_log_file)
try:
ret = prof.runcall(f, *args, **kwargs)
finally:
prof.close()
return ret
return _inner
return _outer
try:
from line_profiler import LineProfiler
def line_profile(follow=[]):
"""
Perform line profiling of a function.
Will output the profile stats per line of each function included in the profiler.
Output will be printed once per function call so take care not to use this on
functions that get called many times.
:param follow: list of additional functions that should be profiled
Example output
--------------
File: demo.py
Function: demo_follow at line 67
Total time: 1.00391 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
67 def demo_follow():
68 1 34 34.0 0.0 r = random.randint(5, 10)
69 11 81 7.4 0.0 for i in xrange(0, r):
70 10 1003800 100380.0 100.0 time.sleep(0.1)
File: demo.py
Function: demo_profiler at line 72
Total time: 1.80702 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
72 @line_profile(follow=[demo_follow])
73 def demo_profiler():
74 1 17 17.0 0.0 r = random.randint(5, 10)
75 9 66 7.3 0.0 for i in xrange(0, r):
76 8 802921 100365.1 44.4 time.sleep(0.1)
77
78 1 1004013 1004013.0 55.6 demo_follow()
"""
def inner(func):
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
for f in follow:
profiler.add_function(f)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
except ImportError:
def line_profile(follow=[]):
"Helpful if you accidentally leave in production!"
def inner(func):
def nothing(*args, **kwargs):
return func(*args, **kwargs)
return nothing
return inner
| [
"nargo4535@gmail.com"
] | nargo4535@gmail.com |
cdeeb508ec19d1afec9224b988cd226e64e9ffe0 | df6e5c950216855df180c97e764406a3fb27adc2 | /ams/urls.py | 0faa29bf102b3585abec4ea57cec5655a8bc30a9 | [] | no_license | shameem21/gitf | 3155d098f5b114fb77dd9a1da043b34560767861 | a21e6e18f01c7c605fd50a0c2cd7b9e3911408d9 | refs/heads/master | 2022-12-09T03:02:40.229971 | 2020-09-02T15:48:00 | 2020-09-02T15:48:00 | 292,328,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py |
from django.urls import path,include
from . import views
from django.conf.urls import url
from django.conf import settings
urlpatterns = [
url(r'^admin_panel/', include('admin_panel.urls')),
url(r'^teacher_panel/', include('teacher_panel.urls')),
url(r'^$', views.login),
path('logout',views.logout)
] | [
"pshameem@icloud.com"
] | pshameem@icloud.com |
8d0cb408640954bbf211313a2d61f528012d489b | 8533aeeaeb12248f3f4d8e7bf3ab092bee8eb881 | /tests/console/test_master.py | a7782c01dff1809595d144bd7f0c69294efd38db | [] | no_license | Jiakun/all_autotests | 6f5d977590b4a0cc5cef7c3df21758f4f660c987 | cde85988fa008c083afbeb980fa66960dbe3cb23 | refs/heads/master | 2021-01-18T16:47:40.916399 | 2017-04-05T04:14:26 | 2017-04-05T04:14:26 | 86,769,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,637 | py | from unittest2 import TestCase
import sys
import os
from sedna.common import NodeResult
from sedna.common import Result
from sedna.common import Service
from sedna.console.master import FormatedResult
from sedna.console.master import main
from sedna.master import Master
from tests.console.test_sampler import get_package_abpath
from tests.console.test_sampler import MockedSednaConfigParser
TEST_IP = "127.0.0.1"
TEST_SERVICE_NAME = "$$test_name$$"
TEST_METHOD = "$$test_method$$"
TEST_GROUP = "$$test_group$$"
TEST_STATUS = "$$test_status$$"
TEST_ANALYSIS = "$$test_analysis$$"
class FormatedResultTest(TestCase):
"""
The test cases to test the output of Sedna result
"""
def setUp(self):
"""
Set up fake results from 3 node.
"""
service = Service(name="$$test_service_a$$", ip=TEST_IP)
service_result_a =\
Result(service=service,
status=TEST_STATUS,
analysis=TEST_ANALYSIS)
service_result_b = \
Result(service=service,
status="active",
analysis=TEST_ANALYSIS)
service_result_c = \
Result(service=service,
status="inactive",
analysis=TEST_ANALYSIS)
result_a = [service_result_a, service_result_b]
result_b = [service_result_c]
node_result_a =\
NodeResult("$$test_group_a$$", "$$test_ip_a$$", result_a)
node_result_b =\
NodeResult("$$test_group_b$$", "$$test_ip_b$$", result_b)
self.node_results = [node_result_a, node_result_b]
self.formated_result = FormatedResult()
def test_format_output_log(self):
"""
The test case to test the log to display results from all nodes.
The result of the test case should be check in
sedna/tests/log/sedna.log
:return:
"""
self.formated_result.format_output_log(node_results=self.node_results)
class MasterMainTest(TestCase):
"""
The test case to test main function in sampler.
"""
def setUp(self):
self.sys_argv = sys.argv
sys.argv = []
os.environ['SEDNA_CONF'] = "fake_path_sedna.conf"
os.environ['SEDNA_LOG_CONF'] = get_package_abpath("./logging.conf")
def test_command_line_arguments_default(self):
sys.argv.append("sedna.console.master")
main(config_paser_class=MockedSednaConfigParser,
master_class=MockedMaster,
format_output_class=MockedFormatedResult)
self.assertEquals(MockedSednaConfigParser.config_path_stub,
"fake_path_sedna.conf")
def test_command_line_arguments_config_path_defined(self):
sys.argv.append("sedna.console.master")
sys.argv.append("--config_path")
sys.argv.append("defined_fake_path_sedna.conf")
main(config_paser_class=MockedSednaConfigParser,
master_class=MockedMaster,
format_output_class=MockedFormatedResult)
self.assertEquals(MockedSednaConfigParser.config_path_stub,
"defined_fake_path_sedna.conf")
def tearDown(self):
sys.argv = self.sys_argv
class MockedMaster(Master):
"""
This class to mock class Master in main function of master console.
"""
def __init__(self):
pass
def verify_nodes(self, sedna_config_value):
pass
class MockedFormatedResult(FormatedResult):
"""
This class to mock class FormateResult in main function of master console.
"""
def format_output_log(self, node_results):
pass
| [
"fjkmail@163.com"
] | fjkmail@163.com |
57113a9c90aa74a3498f093d1107ddbae4fee161 | 6bfc12951180f207bb5d86ce1ddff4323dd9f223 | /mysql_scripts/user.py | ee6b27c3198ed93f852a78ff3637be91e130e649 | [] | no_license | easyfun/trade | 966afedab4753c5d93a38e8678e38abf3eb113ac | b9ee016c8d6783ae8948877d36d1e10880457808 | refs/heads/master | 2021-01-12T08:18:24.688834 | 2018-03-17T11:47:30 | 2018-03-17T11:47:30 | 76,536,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,769 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import mysql.connector
def create_user_table():
conn=mysql.connector.connect(
host='127.0.0.1',
user='root',
password='easyfun',
pool_size=2)
cursor=conn.cursor()
sql='drop database if exists user'
cursor.execute(sql)
sql='create database user'
cursor.execute(sql)
sql='use user'
cursor.execute(sql)
sql="create table t_users(\
user_id bigint NOT NULL COMMENT '用户id,末两位与mobile末两位相同',\
real_name char(64) NOT NULL DEFAULT '' COMMENT '用户姓名',\
nick_name char(128) NOT NULL COMMENT '昵称',\
login_password char(128) NOT NULL COMMENT '密码',\
withdrawal_password char(128) NOT NULL DEFAULT '' COMMENT '取款密码',\
mobile char(24) NOT NULL COMMENT '手机号,末两位与user_id末两位相同',\
status int NOT NULL DEFAULT '0' COMMENT '状态 0正常 1锁定 2黑名单',\
from_type int NOT NULL COMMENT '来源类型 -1:未知 0:PC 1:IOS 2:Andriod',\
user_type int NOT NULL DEFAULT '0' COMMENT '用户类型 0投资用户 1借款用户 2平台用户',\
register_date datetime NOT NULL COMMENT '注册日期',\
referee_uid bigint NOT NULL COMMENT '推荐人uid',\
referee_name char(64) NOT NULL COMMENT '推荐人姓名',\
referee_mobile char(24) NOT NULL COMMENT '推荐人手机号',\
update_time datetime DEFAULT NULL COMMENT '最后一次修改时间',\
create_time datetime DEFAULT NULL COMMENT '创建时间',\
head_portrait_url varchar(256) DEFAULT '' COMMENT '头像地址url',\
PRIMARY KEY (user_id),\
UNIQUE KEY (mobile)\
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='用户表';"
cursor.execute(sql)
for i in range(100):
sql="create table t_user_%02d(\
user_id bigint NOT NULL COMMENT '用户id,末两位与mobile末两位相同',\
real_name char(64) NOT NULL DEFAULT '' COMMENT '用户姓名',\
nick_name char(128) NOT NULL COMMENT '昵称',\
login_password char(128) NOT NULL COMMENT '密码',\
withdrawal_password char(128) NOT NULL DEFAULT '' COMMENT '取款密码',\
mobile char(24) NOT NULL COMMENT '手机号,末两位与user_id末两位相同',\
status int NOT NULL DEFAULT '0' COMMENT '状态 0正常 1锁定 2黑名单',\
from_type int NOT NULL COMMENT '来源类型 -1:未知 0:PC 1:IOS 2:Andriod',\
user_type int NOT NULL DEFAULT '0' COMMENT '用户类型 0投资用户 1借款用户 2平台用户',\
register_date datetime NOT NULL COMMENT '注册日期',\
referee_uid bigint NOT NULL COMMENT '推荐人uid',\
referee_name char(64) NOT NULL COMMENT '推荐人姓名',\
referee_mobile char(24) NOT NULL COMMENT '推荐人手机号',\
update_time datetime DEFAULT NULL COMMENT '最后一次修改时间',\
create_time datetime DEFAULT NULL COMMENT '创建时间',\
head_portrait_url varchar(256) DEFAULT '' COMMENT '头像地址url',\
PRIMARY KEY (user_id),\
UNIQUE KEY (mobile)\
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='用户表';" % (i, )
cursor.execute(sql)
sql="create table t_user_flow_%02d(\
flow_id bigint NOT NULL COMMENT '流水id',\
real_name char(64) NOT NULL DEFAULT '' COMMENT '用户姓名',\
nick_name char(128) NOT NULL COMMENT '昵称',\
login_password char(128) NOT NULL COMMENT '密码',\
withdrawal_password char(128) NOT NULL DEFAULT '' COMMENT '取款密码',\
mobile char(24) NOT NULL COMMENT '手机号',\
from_type int NOT NULL COMMENT '来源类型 -1:未知 0:PC 1:IOS 2:Andriod',\
user_type int NOT NULL DEFAULT '0' COMMENT '用户类型 0投资用户 1借款用户 2平台用户',\
register_date datetime NOT NULL COMMENT '注册日期',\
referee_uid bigint NOT NULL COMMENT '推荐人uid',\
referee_name char(64) NOT NULL COMMENT '推荐人姓名',\
referee_mobile char(24) NOT NULL COMMENT '推荐人手机号',\
operation int NOT NULL DEFAULT 0 COMMENT '操作类型 0注册用户请求 1注册用户成功 2注册用户失败',\
remark char(128) NOT NULL COMMENT '备注',\
create_time datetime DEFAULT NULL COMMENT '创建时间',\
PRIMARY KEY (flow_id),\
UNIQUE KEY (mobile)\
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='用户表流水';" % (i, )
cursor.execute(sql)
if '__main__'==__name__:
create_user_table() | [
"easyfun"
] | easyfun |
0b3bad287052752c8ab7a55ebf45a3caa0462c08 | 8e754b630a78148b3ee0bc851f56d8860e88abaf | /cnn_generic.py | df7655b1d1694a38f7591bf1551cc836d06bec4c | [
"MIT"
] | permissive | agenthimzz/Classification-between-3-students | 9a33c9ec677e67d0335aa25d88762311b7631d38 | 649d1e7a81ae1cf40df85beb13a7d7bd94a55d91 | refs/heads/master | 2020-04-26T20:29:30.023652 | 2019-03-05T05:23:55 | 2019-03-05T05:23:55 | 173,811,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 8 13:14:15 2019
@author: himanshu.ghadigaonkar
"""
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import numpy as np
import matplotlib.pyplot as plt
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Convolution2D(32, 3, 3, input_shape = (256, 256, 3),
activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Convolution2D(32, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a third convolutional layer
classifier.add(Convolution2D(64, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a fourth convolutional layer
classifier.add(Convolution2D(128, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 2, activation = 'softmax'))
# Compiling the CNN
classifier.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset_mine_generic/train',
target_size = (256, 256),
batch_size = 10,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('dataset_mine_generic/test',
target_size = (256, 256),
batch_size = 5,
class_mode = 'categorical')
history = classifier.fit_generator(training_set,
samples_per_epoch = 29,
nb_epoch = 40,
validation_data = test_set,
nb_val_samples = 30)
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show() | [
"himanshu.ghadigaonkar@gmail.com"
] | himanshu.ghadigaonkar@gmail.com |
2dc3d5860b8a87315f0aa0f33b13cdaec15d63a4 | 3670f67e055d376e5c229540f39c335cb632f7c0 | /project/model/navigation.py | 957b99941d7568c9e49cf575e7284c16de616922 | [
"MIT"
] | permissive | pengjinfu/kispower | e5e243aae9418ae2bdfaf27d9be22f0d5b5d85b7 | 38d88c4c5a983a90009cb8c7012cb4295b1aec06 | refs/heads/master | 2022-04-10T04:43:48.308017 | 2020-04-02T02:08:55 | 2020-04-02T02:08:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # -*- coding: utf-8 -*-
# @Time : 2019-12-23
# @Author : mizxc
# @Email : xiangxianjiao@163.com
from mongoengine import *
class Web(EmbeddedDocument):
webName = StringField(max_length=60, required=True)
url = StringField(max_length=1000, required=True)
icon = StringField(max_length=1000)
introduction = StringField(max_length=1000)
class Navigation(Document):
column = StringField(max_length=60, required=True)
number = IntField(required=True)
introduction = StringField(max_length=1000)
webs = ListField(EmbeddedDocumentField(Web))
| [
"xiangxianjiao@163.com"
] | xiangxianjiao@163.com |
fea954915f25e108ef1837d4bda6ee52dc8c7314 | 2550c41d04289410d73118dd2d8008c63e23f021 | /mathcalm/mathcalm/settings/defaults.py | 733bca3d6f29e11cb22e74ddf50382beafea7d3a | [
"MIT"
] | permissive | dspetrenko/mathcalm | de7bbd95b613f2c5def9fef2e7ba28b733917977 | 40553f2a34464479538154f5d3f5d993fc4e6b42 | refs/heads/master | 2020-03-13T14:47:33.733281 | 2018-06-14T06:41:53 | 2018-06-14T06:41:53 | 131,165,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,199 | py | """
Django settings for mathcalm project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# ds: add one more os.path.dirname because settings in other project level than without separation settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'secret_key'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'competition',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mathcalm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mathcalm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"denis.petrenko.dp@gmail.com"
] | denis.petrenko.dp@gmail.com |
1a7f5760894f9c6ff001c18e56bc124a53c55fa9 | a3a0f22305ef052ed6e0b275261b2a14dbf56899 | /listbox.py | a698c7e2edc45687f5319aaf821dd43a0173ff0e | [] | no_license | VictorAlexandre1986/Tkinter | 6add2d8793368cd3dc71e5739ea2c0b453fbf574 | 28d747be13c9a28a0622c36a13e90cda0ad3a54f | refs/heads/main | 2023-07-12T12:19:28.537791 | 2021-08-16T18:06:29 | 2021-08-16T18:06:29 | 394,052,482 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | from tkinter import *
#from tkinter import ttk
import os
def imprimir():
esp=str(lb.get(ACTIVE))
print(esp)
def adicionar():
esp=str(entrada.get())
lb.insert(END,esp)
app = Tk()
app.title('')
app.geometry('300x500')
app.configure(background='#404040')
#Dimensoes da janela
largura = 300
altura = 500
#Resolução da tela
largura_tela = app.winfo_screenwidth()
altura_tela = app.winfo_screenheight()
#posição na janela
posx = largura_tela/2 - largura/2
posy = altura_tela/2 - altura/2
#Definir a centralização da janela no monitor
app.geometry('%dx%d+%d+%d'% (largura,altura,posx,posy))
lbl = Label(app,text='ListBox',bg='#404040',fg='#fff',font='Times 20 bold italic')
lbl.place(x=10,y=10, width=280, height=30)
esportes = ['Futebol','Volei','Basquete']
lb=Listbox(app)
for esporte in esportes:
lb.insert(END,esporte)
lb.place(x=10,y=50, width=280,height=100)
entrada = Entry(app)
entrada.place(x=10,y=160,width=280,height=30)
botao = Button(app,text="Imprimir", command=imprimir)
botao.place(x=10,y=200,width=280,height=30)
botao = Button(app,text="Inserir", command=adicionar)
botao.place(x=10,y=240,width=280,height=30)
app.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
867d3e987905c99db30fe4889288df35f8111057 | ee060ff94272b8f424030b9543a17e2343db915d | /Devices/IDS_ueye_3250CP2.py | 30d72a047a0da8abd83782496aaac16c75e2540b | [] | no_license | jhardin4/APE | 73d686849b853f0afdc23025e3bdd2ca7b1aecb1 | 88560e46f9e90bdae4ab175d589de36e8dc9d464 | refs/heads/master | 2022-05-09T03:08:58.373601 | 2022-01-31T15:11:39 | 2022-01-31T15:11:39 | 194,868,007 | 4 | 3 | null | 2020-06-19T14:00:59 | 2019-07-02T13:25:01 | OpenSCAD | UTF-8 | Python | false | false | 3,579 | py | # blank
# Only import 'basic' python packages up here. All others should be imported
# within the methods.
# Handle the different relative locations for independently running and
#
from Devices import Sensor
class IDS_ueye_3250CP2(Sensor):
def __init__(self, name):
Sensor.__init__(self, name)
self.descriptors.append('ueye')
self.descriptors.append('camera')
self.handle = ''
self.requirements['Measure'] = {}
self.requirements['Measure']['file'] = {
'value': '',
'source': 'direct',
'address': '',
'desc': 'filename to store image at',
}
self.requirements['Configure'] = {}
self.requirements['Configure']['gain'] = {
'value': '',
'source': 'direct',
'address': '',
'desc': 'values for master and RGB gains (0-100)',
}
self.requirements['Configure']['black_level'] = {
'value': '',
'source': 'direct',
'address': '',
'desc': 'black level',
}
self.requirements['Configure']['gamma'] = {
'value': '',
'source': 'direct',
'address': '',
'desc': 'gamma value',
}
self.requirements['Configure']['exposure'] = {
'value': '',
'source': 'direct',
'address': '',
'desc': 'exposure time',
}
def Connect(self):
self.fConnect()
self.addlog(self.name + ' is availible.')
return self.returnlog()
def fConnect(self):
if not self.simulation:
from Devices.Drivers import camera_3250CP2
try:
self.handle = camera_3250CP2.UEye()
except Exception:
temp = input('Do you want to try to connect again?([y],n)')
if temp in ['', 'y', 'yes']:
self.handle = camera_3250CP2.UEye()
self.addlog(self.name + ' is connected.')
def Disconnect(self):
self.fDisconnect()
return self.returnlog()
def fDisconnect(self):
if not self.simulation:
self.handle.close()
self.addlog(self.name + ' is disconnected.')
def Measure(self, file):
if not self.simulation:
self.handle.save_image(file)
self.addlog(self.name + ' took image and saved at ' + str(file))
return self.returnlog()
def Configure(self, **kwargs):
gain = 'default'
black_level = 'default'
gamma = 'default'
exposure = 'default'
if not self.simulation:
if 'gain' in kwargs:
gain = kwargs['gain']
self.handle.set_gain(
master=gain[0], red=gain[1], green=gain[2], blue=gain[3]
)
if 'black_level' in kwargs:
black_level = kwargs['black_level']
self.handle.set_black_level(
black_level
)
if 'gamma' in kwargs:
gamma = kwargs['gamma']
self.handle.set_gamma(
gamma
)
if 'exposure' in kwargs:
exposure = kwargs['exposure']
self.handle.set_exposure(
exposure
)
logstr = f'{self.name} configured the following settings:\n'
for key in kwargs:
logstr += f'{key} was set to {kwargs[key]}'
self.addlog(logstr)
return self.returnlog()
| [
"johardiniv@gmail.com"
] | johardiniv@gmail.com |
653b96c34a53f5ef76223016d07dc363f51d9a46 | 11236c9b6513ee06ec3f7095d3f0c935bf6b4369 | /cookie_stands/apps.py | 0f1ef90a3a52cfdbf8a85edb1633a75fbe11a88e | [] | no_license | RoaaMustafa/cookie-stand-api | a11848f6ab37518fcab2f1aa471a552fd2ca7748 | 12b72581fbd4308b3602e8363873c16c603aeebf | refs/heads/main | 2023-08-03T01:36:16.507519 | 2021-09-08T14:05:50 | 2021-09-08T14:05:50 | 404,302,828 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.apps import AppConfig
class CookieStandConfig(AppConfig):
name = 'cookie_stands'
| [
"labushanab14@gmail.com"
] | labushanab14@gmail.com |
8ac2409f5cb6f10f638f4a529a2f5abd608a6613 | aacec9c81c1f015ac3f76d6e37d798e08b59d150 | /sample/sample/settings.py | 00ed6e4b6be213ff1107b7f54da769cb0c0096b0 | [] | no_license | meghalrag/djangoprgm | 25ae32b04789dc9cdeda5ac64833e6e138234349 | 6a802a6b7a0c2044af24f4e0e90e034c0ba0d9ec | refs/heads/master | 2020-05-19T22:20:51.979077 | 2019-05-06T18:05:42 | 2019-05-06T18:05:42 | 185,244,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,230 | py | """
Django settings for sample project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+qn4gbqf!s-1qc_)bvtccf3n5x8*atnhkghn#99#-6yo*6b)(_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"noreply@github.com"
] | noreply@github.com |
b8afa808079a06eabab4fd45a09362784a68602d | 529722cec22259e788b3ad64e377c127c01d93b3 | /circle.py | dd28eaf577e4aeec006bf2fa177c50e599009b3f | [] | no_license | kushajha/graph-Plotter | 03320b39f3000410eb8bd1671115e9269bb6c546 | 3b5d30253cfc198a56b68318079dd6e315dd9cc4 | refs/heads/master | 2021-03-13T02:08:42.413286 | 2017-06-11T17:28:30 | 2017-06-11T17:28:30 | 91,478,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import matplotlib.pyplot as plt
import numpy as np
import math
def axes():
plt.axhline(0,alpha=1)
plt.axvline(0,alpha=1)
print("\nEquation is of the form (x-a)^2+(y-b)^2=r^2")
a1,a2 = raw_input('Enter Center of the Circle: ').split()
a=float(a1)
b=float(a2)
r=float(raw_input('Enter Radius of the Circle: '))
plt.clf()
plt.plot(a,b,'.')
x1,x2 = raw_input('Enter Minimum and Maximum limit of x for the graph: ').split()
xf1=float(x1)
xf2=float(x2)
pntx = np.linspace(xf1,xf2,200)
axes()
plt.plot(a+r*np.cos(pntx),b+r*np.sin(pntx))
plt.title("Circle")
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
e78ae30bf9795fa2ed715629f4873ab1b7cebfcf | 35e28f5c6d3df8c3478f883795a3643881ad71ba | /USG/settings.py | e2fec5923345595b3048e0215b01814e8ea41c72 | [] | no_license | ajayvjn/USG-WebScrapper | 628cc30e8b6dbac33b9ab36228c1bd72629e98e0 | 093b3e4a1ca8f04f4ce0ca5f574e1275fe2d8a05 | refs/heads/master | 2020-05-20T12:26:08.521352 | 2017-01-30T21:09:25 | 2017-01-30T21:09:25 | 80,463,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | # -*- coding: utf-8 -*-
# Scrapy settings for USG project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'USG'
SPIDER_MODULES = ['USG.spiders']
NEWSPIDER_MODULE = 'USG.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'USG (+http://www.yourdomain.com)'
# Obey robots.txt rules
#ROBOTSTXT_OBEY = True
ROBOTSTXT_OBEY=False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'USG.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'USG.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'USG.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"ajay.vjn17@gmail.com"
] | ajay.vjn17@gmail.com |
3a0f2b78a917ebbb93b31b448eff17706496fcb4 | 55d6de252e61c4b60688ebd8b1f637807acc1e7c | /custom_customer_payment_approval/models/models.py | 83b0d97f1108f7bbd53d5c29fe68c31b90a5209d | [] | no_license | mosadiqit/eerna_erp_uslbd | b707a1d49a4fce7c1543b63e0120e8f9b77b26ce | 73e3994a9e32df7809d244eb6592513162ab7853 | refs/heads/main | 2023-06-30T14:53:04.837197 | 2021-08-04T11:30:46 | 2021-08-04T11:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # -*- coding: utf-8 -*-
# from odoo import models, fields, api
# class custom_customer_payment_approval(models.Model):
# _name = 'custom_customer_payment_approval.custom_customer_payment_approval'
# _description = 'custom_customer_payment_approval.custom_customer_payment_approval'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# for record in self:
# record.value2 = float(record.value) / 100
| [
"ibrahimalazhar264@gmail.com"
] | ibrahimalazhar264@gmail.com |
c568ede767628da5fc788549f85640d9c731bb84 | 682e9f278b3c044785fa3f28e7b4294fe4e78fb5 | /Firstproject/urls.py | 35ccf93f48600eb749112671c52d1dd61be96cd8 | [] | no_license | suyeonshin/firstproject | 705cd2c35fe63a02b11425e7ef16ea60295b5433 | e4b16a2d8521f9368ca1e5fc5004db99273f77bd | refs/heads/master | 2022-05-30T20:20:04.530085 | 2019-04-07T15:39:29 | 2019-04-07T15:39:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | """Firstproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#from django.conf.urls import url
from django.contrib import admin
from django.urls import path
import wordcount.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', wordcount.views.home, name="home"),
path('about/', wordcount.views.about, name="about"),
path('result/', wordcount.views.result, name="result"),
]
| [
"tlstndus2560@naver.com"
] | tlstndus2560@naver.com |
40e0d876ae67bf5605c51f19798985a857354031 | bda51111c9b9176e037e386466c6ff90cdd7a423 | /python-flask/app.py | 4c89fce73cf35ea8e0805f932b07d90ea3e06c61 | [
"Apache-2.0"
] | permissive | alekhyapopuri/Parkinsons-disease-prediction | 781d0772b22b4c660dd40971cfffebbc7e7e2209 | 7f23c70539031606769790b90bdeb6d11ce9d865 | refs/heads/master | 2023-03-14T13:34:50.710781 | 2021-03-06T10:09:39 | 2021-03-06T10:09:39 | 345,058,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 11:32:24 2020
@author: ALEKHYA
"""
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
input_features = [float(x) for x in request.form.values()]
features_value = [np.array(input_features)]
features_name = ['MDVP:Fo(Hz)', 'MDVP:Fhi(Hz)', 'MDVP:Flo(Hz)', 'MDVP:Jitter(%)',
'MDVP:Jitter(Abs)', 'MDVP:RAP', 'MDVP:PPQ', 'Jitter:DDP',
'MDVP:Shimmer', 'MDVP:Shimmer(dB)', 'Shimmer:APQ3', 'Shimmer:APQ5',
'MDVP:APQ', 'Shimmer:DDA', 'NHR', 'HNR', 'RPDE', 'DFA',
'spread1', 'spread2', 'D2', 'PPE']
df = pd.DataFrame(features_value, columns=features_name)
output = model.predict(df)
print(output)
if output == 0:
res_val = "no parkinsons disease "
else:
res_val = "** parkinsons disease **"
return render_template('index.html', prediction_text='Patient has {}'.format(res_val))
if __name__ == "__main__":
app.run(debug=True)
| [
"alekhyapopuri44@gmail.com"
] | alekhyapopuri44@gmail.com |
9ae4655febb6b0686d8bf6a9c63e007579bba638 | 245a1b017c29c1ebf5a1cb76a378609adb21e238 | /pcap2http/pcapparsermod/printer.py | b010c1e7ba1bb92cfe5e7555150cac9b7e89955b | [] | no_license | wsniper/pcap2http | 32f62e1e7fbbb5fbc88c7fe6638f483a7fbbff0c | 3a2f8df97c9b4bb6265b9163533f5ae977e087fc | refs/heads/master | 2021-01-13T07:20:05.284505 | 2016-05-29T18:06:37 | 2016-05-29T18:06:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,799 | py | from __future__ import unicode_literals, print_function, division
from io import StringIO
import sys
from pcapparsermod.config import OutputLevel
# print http req/resp
from pcapparsermod import utils, six
from pcapparsermod import config
import threading
from pcapparsermod.constant import Compress
printer_lock = threading.Lock()
def _get_full_url(uri, host):
if uri.startswith(b'http://') or uri.startswith(b'https://'):
return uri
else:
return b'http://' + host + uri
class HttpPrinter(object):
def __init__(self, client_host, remote_host):
self.parse_config = config.get_config()
self.buf = StringIO()
self.client_host = client_host
self.remote_host = remote_host
def on_http_req(self, req_header, req_body):
"""
:type req_header: HttpRequestHeader
:type req_body: bytes
"""
if self.parse_config.level == OutputLevel.ONLY_URL:
self._println(req_header.method + b" " + _get_full_url(req_header.uri, req_header.host))
elif self.parse_config.level == OutputLevel.HEADER:
self._println(req_header.raw_data)
self._println()
elif self.parse_config.level >= OutputLevel.TEXT_BODY:
self._println(req_header.raw_data)
self._println()
mime, charset = utils.parse_content_type(req_header.content_type)
# usually charset is not set in http post
output_body = self._if_output(mime)
if self.parse_config.encoding and not charset:
charset = self.parse_config.encoding
if req_header.compress == Compress.IDENTITY:
# if is gzip by content magic header
# someone missed the content-encoding header
if utils.gzipped(req_body):
req_header.compress = Compress.GZIP
if output_body:
self._print_body(req_body, req_header.compress, mime, charset)
self._println('')
def on_http_resp(self, resp_header, resp_body):
"""
:type resp_header: HttpResponseHeader
:type resp_body: bytes
"""
if self.parse_config.level == OutputLevel.ONLY_URL:
self._println(resp_header.status_line)
elif self.parse_config.level == OutputLevel.HEADER:
self._println(resp_header.raw_data)
self._println()
elif self.parse_config.level >= OutputLevel.TEXT_BODY:
self._println(resp_header.raw_data)
self._println()
mime, charset = utils.parse_content_type(resp_header.content_type)
# usually charset is not set in http post
output_body = self._if_output(mime)
if self.parse_config.encoding and not charset:
charset = self.parse_config.encoding
if resp_header.compress == Compress.IDENTITY:
# if is gzip by content magic header
# someone missed the content-encoding header
if utils.gzipped(resp_body):
resp_header.compress = Compress.GZIP
if output_body:
self._print_body(resp_body, resp_header.compress, mime, charset)
self._println()
if not config.get_config().group:
self._do_output()
def finish(self):
"""called when this connection finished"""
self._do_output()
def _do_output(self):
printer_lock.acquire()
try:
value = self.buf.getvalue()
self.buf = StringIO()
if value:
print("[%s:%d] -- -- --> [%s:%d] " % (self.client_host[0], self.client_host[1],
self.remote_host[0], self.remote_host[1]),
file=config.out)
if six.is_python2:
print(value.encode('utf8'), file=config.out)
else:
print(value, file=config.out)
config.out.flush()
except IOError as e:
if e.errno == 32:
# may be pipe closed
sys.exit(0)
else:
print(e, file=sys.stderr)
sys.exit(-1)
finally:
printer_lock.release()
def _if_output(self, mime):
return self.parse_config.level >= OutputLevel.ALL_BODY and not utils.is_binary(mime) \
or self.parse_config.level >= OutputLevel.TEXT_BODY and utils.is_text(mime)
def _println(self, line=''):
line = six.ensure_unicode(line)
self.buf.write(line)
self.buf.write('\n')
def _println_if(self, level, line):
if self.parse_config.level >= level:
self._println(line)
def _print_body(self, body, compress, mime, charset):
if compress == Compress.GZIP:
body = utils.ungzip(body)
elif compress == Compress.DEFLATE:
body = utils.decode_deflate(body)
content = utils.decode_body(body, charset)
if content:
if not mime:
# guess mime...
if content.startswith('{') and content.endswith('}') or content.startswith('[') \
and content.endswith(']'):
mime = b'application/json'
if mime is None:
mime = b''
if self.parse_config.pretty:
if b'json' in mime:
utils.try_print_json(content, self.buf)
elif b'www-form-urlencoded' in mime:
utils.try_decoded_print(content, self.buf)
else:
self.buf.write(content)
else:
self.buf.write(content)
self.buf.write('\n') | [
"diman@example.com"
] | diman@example.com |
3eea2bcd2377a53c6ec3a0fac375d30816303266 | ede10f744f89dcc7c81a73e922cfd41c8c415b3f | /setoperation.py | 2dc31b509a25cf4a50900a49bc5f21b382e8ff7e | [] | no_license | Techsrijan/Python11 | 6d76ac9aaa1fe30a1a31f7dbe898927b439ac64b | c1080fcc027044137859e4e55ef6a8d3cb740c2a | refs/heads/master | 2020-06-16T07:02:38.150544 | 2019-08-04T03:29:15 | 2019-08-04T03:29:15 | 195,508,033 | 0 | 35 | null | 2019-07-28T07:48:25 | 2019-07-06T06:52:21 | Python | UTF-8 | Python | false | false | 618 | py | marks={11,12,15,66,12}
print(marks)
marks.add(555)
print(marks)
marks.remove(12)
print(marks)
fruit=frozenset(['apple','mango'])
print(fruit)
#fruit.add('ss')
# this will create dictionary but not set
a={}
print(a)
print(type(a))
# set function creates an empty set
b=set()
print(b)
print(type(b))
b= set(marks) #copy
print(b)
p={1,2,3,4}
q={4,5,61,1}
#union
print(p|q)
#intersection
print(p&q)
#difference
print(p-q) #which are in a but not in b
#symmetric difference
print(p^q)
print(p)
print(p.clear())
print(p)
#memebership operator
print(15 in marks)
print(15 not in marks)
x= q.copy()
print(x)
| [
"aswanibtech@gmail.com"
] | aswanibtech@gmail.com |
8e4ac45e01675c7b4f520a4a23d18060dc8c7369 | cab678a44ecef2fcb9102588006e3080d4529481 | /microsoft/store/partnercenterservices/models/microsoft_partner_sdk_contracts_v1_support_topic.py | df2acd2a2202668d37743f77e8790c7ac41b5299 | [] | no_license | eduardomourar/partner-center-python | 410f61f1ff0dfa8fe34414b1012edba983c289dc | 85e9617d58347fb6c3b8d50b728f9a10201e2f10 | refs/heads/master | 2020-04-19T19:21:16.543501 | 2020-01-28T12:10:33 | 2020-01-28T12:10:33 | 168,386,194 | 2 | 0 | null | 2020-01-28T12:10:35 | 2019-01-30T17:38:16 | Python | UTF-8 | Python | false | false | 1,722 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftPartnerSdkContractsV1SupportTopic(Model):
"""Describes a support topic. Service requests specify a support topic to
ensure that they are processed quickly and effectively.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: Gets or sets the name of the support topic.
:type name: str
:param description: Gets or sets the description of the support topic.
:type description: str
:param id: Gets or sets the unique identifier of the support topic.
:type id: int
:ivar attributes: Gets the attributes.
:vartype attributes:
~microsoft.store.partnercenterservices.models.MicrosoftPartnerSdkContractsV1CommonResourceAttributes
"""
_validation = {
'attributes': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'attributes': {'key': 'attributes', 'type': 'MicrosoftPartnerSdkContractsV1CommonResourceAttributes'},
}
def __init__(self, name=None, description=None, id=None):
super(MicrosoftPartnerSdkContractsV1SupportTopic, self).__init__()
self.name = name
self.description = description
self.id = id
self.attributes = None
| [
"eduardo.rodrigues@sentia.com"
] | eduardo.rodrigues@sentia.com |
915e671d8227fadd82aff81cd3cfa49c70ed068d | fb435fcd279a4a7c7014e0dbd65965c2f36a38bf | /scripts/task.py | caf2a1e19e00532138b373f13e35af78e20c99ee | [] | no_license | himanshu219/pypro | ab825f24592701f9f97286552ed1c96a924c2abe | fb241bfdaac8eb517add2211a251f470141b1e33 | refs/heads/master | 2020-04-06T07:02:16.855609 | 2016-08-14T10:24:21 | 2016-08-14T10:24:21 | 59,350,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | TASK_DIR = '~/.tasks/'
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-a", dest="add", help="adding task", action="store_true", default=False)
parser.add_option("-e", dest="edit", help="editing task", action="store", default=0)
parser.add_option("-l", dest="list", help="list all tasks", action="store_true", default=False)
parser.add_option("-d", dest="delete", help="deletes task", action="store_true", default=False)
parser.add_option("-f", dest="finish", help="done task", action="store_true", default=False)
parser.add_option("-m", dest="message", type='str', help="adds description", action="store", default='')
parser.add_option("-n", dest="name", type='str', help="adds name", action="store", default='')
parser.add_option("-p", dest="priority", type='int', help="adds priority", action="store", default=0)
parser.add_option("-m", dest="message", type='str', help="adds description", action="store", default='')
(options, args) = parser.parse_args(sys.argv[1:])
| [
"hp.iiita@gmail.com"
] | hp.iiita@gmail.com |
137ba7136d89e5ece45a8d4271fd13561c4b608f | 13f25be5c1f9d4023fdc188af20699370bbc896d | /billy/commands/update_external_ids.py | 3e37a7f86791dc9da57d8eb46cbb0c11f1901cb7 | [] | no_license | JT5D/billy | d303ca408527e122faebdd1c1047233cf0231d8c | de1586fddd30d354d80d6b6b2c7932e16bc02991 | refs/heads/master | 2020-12-25T15:51:22.750561 | 2012-09-14T16:23:18 | 2012-09-14T16:23:18 | 5,826,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | import json
import urllib
import urllib2
import time
import sys
from billy import db
from billy.conf import settings
from billy.commands import BaseCommand
class UpdateMissingIds(BaseCommand):
name = 'update-ext-ids'
help = 'update TransparencyData ids'
def add_args(self):
self.add_argument('abbrs', metavar='ABBR', type=str, nargs='+',
help='abbreviations for data to update')
self.add_argument('--apikey', help='the API key to use',
dest='API_KEY')
def handle(self, args):
for abbr in args.abbrs:
meta = db.metadata.find_one({'_id': abbr.lower()})
if not meta:
print "'{0}' does not exist in the database.".format(abbr)
sys.exit(1)
else:
print "Updating ids for {0}".format(abbr)
print "Updating TransparencyData ids..."
current_term = meta['terms'][-1]['name']
query = {'roles': {'$elemMatch':
{'type': 'member',
settings.LEVEL_FIELD: meta['abbreviation'],
'term': current_term},
},
'transparencydata_id': None,
'active': True,
}
updated = 0
initial_count = db.legislators.find(query).count()
abbrev = meta['_id'].upper()
for leg in db.legislators.find(query):
query = urllib.urlencode({'apikey': settings.API_KEY,
'search': leg['full_name'].encode('utf8')})
url = ('http://transparencydata.com/api/1.0/entities.json?' +
query)
data = urllib2.urlopen(url).read()
results = json.loads(data)
matches = []
for result in results:
if (result['state'] == abbrev and
result['seat'][6:] == leg['chamber'] and
result['type'] == 'politician'):
matches.append(result)
if len(matches) == 1:
leg['transparencydata_id'] = matches[0]['id']
db.legislators.save(leg, safe=True)
updated += 1
print 'Updated %s of %s missing transparencydata ids' % (updated,
initial_count)
time.sleep(30)
| [
"james.p.turk@gmail.com"
] | james.p.turk@gmail.com |
4c7ba819b5140bd2232359cb05c21ef6b7cf4108 | 8c6cf9a96f53894c2aa14bbbe21b898bc5dd7541 | /Lesson7-ensemble_methods/ensemnle-methods/venv/bin/easy_install | f49b71ecf15e9967b542cc234f929317ce18676a | [
"MIT"
] | permissive | raafatzahran/Udacity-DataScience | f351a4d37816eb3695419e729d772087475ae3c9 | a27eb164d840fb72fb9ab5f021e43856e60cf243 | refs/heads/master | 2020-03-21T14:05:06.343572 | 2018-07-29T12:34:25 | 2018-07-29T12:34:25 | 138,640,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | #!/Users/raafatzahran/Udacity-ND/Lesson7-ensemble_methods/ensemnle-methods/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"raafat@telemagic.no"
] | raafat@telemagic.no | |
2e393d7356fa2a2021a3160b981dba37058040fc | eabf86b6e381ab13d08c60003437946857fefcad | /FormattedStrings.py | dc475152173587b657f9e83636192f59c6b9b6fd | [
"MIT"
] | permissive | ejrach/exercises-mosh | 4a1d471e513a1c0f7ff78b25c0bff5b0ba699f23 | baaa02dff58652a9910d654be9bdd3e76dece9b7 | refs/heads/master | 2023-03-20T12:10:13.836764 | 2019-10-01T14:39:35 | 2019-10-01T14:39:35 | 198,443,046 | 0 | 0 | MIT | 2023-03-03T07:12:07 | 2019-07-23T14:04:41 | JavaScript | UTF-8 | Python | false | false | 283 | py | first_name = 'John'
last_name = 'Smith'
#the output can be written like this, a concatenated string
message = first_name + ' [' + last_name + '] is a coder'
print(message)
#or like this formatted string - prefixed with an f
msg = f'{first_name} [{last_name}] is a coder'
print(msg) | [
"eric.rach@gmail.com"
] | eric.rach@gmail.com |
49fdee19a2749ce313384605cac5604eb9846fa3 | ef3f05684f83a1a2ff8fc1e405858dc21bb037ce | /play_midi.py | 1a04092540be9a482a740e55e4e26a2954e87c5f | [] | no_license | scott929/musical-neural-net-new | 517581ff6728597d5ec37a98ba7fe65da369281b | a98eac24985374c574d14880dbc4143c90e48b61 | refs/heads/main | 2023-04-03T17:26:33.830780 | 2021-04-15T16:10:56 | 2021-04-15T16:10:56 | 358,308,322 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import pygame as pg
def play_music(music_file):
clock = pg.time.Clock()
pg.mixer.music.load(music_file)
print("Music file {} loaded!".format(music_file))
pg.mixer.music.play()
# check if playback has finished
while pg.mixer.music.get_busy():
clock.tick(30)
music_file = "sample-output.mid"
freq = 44100 # audio CD quality
bitsize = -16 # unsigned 16 bit
channels = 2 # 1 is mono, 2 is stereo
buffer = 2048 # number of samples (experiment to get right sound)
pg.mixer.init(freq, bitsize, channels, buffer)
# optional volume 0 to 1.0
pg.mixer.music.set_volume(0.8)
try:
play_music(music_file)
except KeyboardInterrupt:
# if user hits Ctrl/C then exit
# (works only in console mode)
pg.mixer.music.fadeout(1000)
pg.mixer.music.stop()
raise SystemExit
| [
"scottcao929@gmail.com"
] | scottcao929@gmail.com |
808312317df6d0dfeca66bc7bba065770d48e142 | eb8d605fb98e32facdfef9c2b1ee154d72a9d944 | /venv/Lib/site-packages/icalendar/tests/test_recurrence.py | fc8b1be0554f5607062da3a0386c2132fa00df43 | [
"MIT"
] | permissive | Dhhoyt/Physballs | d07472864a6c342eef7cf210d45e51007647bd88 | 2225f5d88c7e16ac2b9aa59eb6e312eb62750955 | refs/heads/master | 2022-11-26T21:30:41.023974 | 2020-07-22T01:35:42 | 2020-07-22T01:35:42 | 281,017,914 | 0 | 1 | MIT | 2020-07-22T01:35:44 | 2020-07-20T05:01:05 | Python | UTF-8 | Python | false | false | 2,066 | py | # -*- coding: utf-8 -*-
from icalendar.caselessdict import CaselessDict
import unittest
import datetime
import icalendar
import os
import pytz
class TestRecurrence(unittest.TestCase):
def setUp(self):
directory = os.path.dirname(__file__)
with open(os.path.join(directory, 'recurrence.ics'), 'rb') as fp:
data = fp.read()
self.cal = icalendar.Calendar.from_ical(data)
def test_recurrence_exdates_one_line(self):
first_event = self.cal.walk('vevent')[0]
self.assertIsInstance(first_event, CaselessDict)
self.assertEqual(
first_event['rrule'], {'COUNT': [100], 'FREQ': ['DAILY']}
)
self.assertEqual(
first_event['exdate'].to_ical(),
b'19960402T010000Z,19960403T010000Z,19960404T010000Z'
)
self.assertEqual(
first_event['exdate'].dts[0].dt,
pytz.utc.localize(datetime.datetime(1996, 4, 2, 1, 0))
)
self.assertEqual(
first_event['exdate'].dts[1].dt,
pytz.utc.localize(datetime.datetime(1996, 4, 3, 1, 0))
)
self.assertEqual(
first_event['exdate'].dts[2].dt,
pytz.utc.localize(datetime.datetime(1996, 4, 4, 1, 0))
)
def test_recurrence_exdates_multiple_lines(self):
event = self.cal.walk('vevent')[1]
exdate = event['exdate']
# TODO: DOCUMENT BETTER!
# In this case we have multiple EXDATE definitions, one per line.
# Icalendar makes a list out of this instead of zipping it into one
# vDDDLists object. Actually, this feels correct for me, as it also
# allows to define different timezones per exdate line - but client
# code has to handle this as list and not blindly expecting to be able
# to call event['EXDATE'].to_ical() on it:
self.assertEqual(isinstance(exdate, list), True) # multiple EXDATE
self.assertEqual(exdate[0].to_ical(), b'20120529T100000')
# TODO: test for embedded timezone information!
| [
"mewsiof@gmail.com"
] | mewsiof@gmail.com |
d51070832466bbae325f25e103f2e00e81030c3e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02717/s273928982.py | 4e8c5d3b6646e3a0f9abc51aba12f955647e3323 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | A,B,C=[i for i in input().split(" ")]
print(" ".join([C,A,B]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a9239f4187b6f66c91675499cc7ba3940726ddc5 | 9367ff3cc1ad4128e8c7e3559185b5ad86ab562e | /scripts/figures/plot_pg_dist.py | 3d31a1c637133fecf22cfefb975be7bc3796231d | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | mitdbg/treeline | b829b2e4dd5b114c03e59235391fd048a5244047 | 6a7c35fb38f5de93026d451a0ed2d3199de8ad7a | refs/heads/master | 2023-07-23T17:12:13.313496 | 2023-07-18T15:08:05 | 2023-07-18T15:08:05 | 373,646,092 | 94 | 8 | MIT | 2023-07-18T15:08:07 | 2021-06-03T21:30:18 | C++ | UTF-8 | Python | false | false | 2,041 | py | import conductor.lib as cond
import matplotlib.pyplot as plt
import pandas as pd
from plot_common import DATASET_COLORS, DATASET_MAP
plt.rcParams["font.size"] = 14
def normalize_data(data):
norm = data.copy()
norm["num_pages"] = norm["num_segments"] * norm["segment_page_count"]
norm["norm"] = norm["num_pages"] / norm["num_pages"].sum()
return norm
def load_data(data_dir):
results = {}
for file in data_dir.iterdir():
if file.suffix != ".csv":
continue
df = pd.read_csv(file)
norm = normalize_data(df)
results[file.stem] = norm
return results
def plot_dist(all_data, config, show_legend=False):
datasets = ["amzn", "osm", "synth"]
fig, axs = plt.subplots(1, 3, figsize=(6.5, 2.25), tight_layout=True, sharey=True)
handles = []
for dataset, ax in zip(datasets, axs):
norm = all_data["{}-{}-segs".format(dataset, config)]
xlabels = norm["segment_page_count"]
xpos = list(range(len(xlabels)))
h = ax.bar(xpos, norm["norm"] * 100, color=DATASET_COLORS[dataset])
handles.append(h)
ax.set_xticks(xpos, xlabels)
ax.set_ylim((0, 110))
ax.set_xlabel("Segment Size")
if dataset == "amzn":
ax.set_ylabel("Proportion (%)")
if show_legend:
fig.legend(
handles,
map(lambda d: DATASET_MAP[d], datasets),
fancybox=False,
edgecolor="#000",
fontsize="small",
loc="upper left",
bbox_to_anchor=(0.14, 0.92),
)
return fig
def main():
deps = cond.get_deps_paths()
assert len(deps) == 1
in_dir = deps[0]
out_dir = cond.get_output_path()
seg_dists = load_data(in_dir / "seg_dist")
fig = plot_dist(seg_dists, "64B", show_legend=True)
fig.savefig(out_dir / "pg_dist_64B.pdf")
plt.close(fig)
fig = plot_dist(seg_dists, "1024B")
fig.savefig(out_dir / "pg_dist_1024B.pdf")
plt.close(fig)
if __name__ == "__main__":
main()
| [
"geoffxy@mit.edu"
] | geoffxy@mit.edu |
6a7f38fa9a2ac069c47ae663559c52d12a956478 | 1a3732420facd1644cb61fc662b7646901e2f358 | /Django_Rest_Tutorial/env/bin/django-admin.py | 91cb77acbf62b80fa57541f3c5621d2ba8488dc9 | [] | no_license | Amanda-Wakefield/Projects | a46d911b431beedbbda987b86e2177dfe970ee1a | 7d267391520c5ea5fef1ba6e4a1da52f65866e56 | refs/heads/master | 2016-09-05T10:13:51.084236 | 2015-07-22T22:58:47 | 2015-07-22T22:58:47 | 39,464,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/home/amanda/Django_Rest_Tutorial/env/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"tud16919@temple.edu"
] | tud16919@temple.edu |
586e8552ba0e6b911410a52c8989eb48b31506c7 | 9eb7dd8ef7fbe22c435ce2856906fe45736e8cef | /Day03/ digit_letter_counter.py | 9ee12de0853b5a2d39d4231d779ddf0a7e3c1a15 | [] | no_license | iabdullahism/python-programming | a0bc3fe590790d2d389a747481069bd18f645ce4 | 4129d3f9e997e0f3c0d56e552dc49e8241e29182 | refs/heads/master | 2020-08-11T17:07:24.577278 | 2019-07-26T14:34:00 | 2019-07-26T14:34:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | """
Code Challenge
Name:
Digit Letter Counter
Filename:
digit_letter_counter.py
Problem Statement:
Write a Python program that accepts a string from User and calculate the number of digits and letters.
Hint:
Store the letters and Digits as keys in the dictionary
Input:
Python 3.2
Output:
Digits 2
Letters 6
"""
my_dict ={}
while(True):
user_input = input("Enter Keys and Inputs: ")
value = user_input.split(" ")
#split all the contents in one string
if user_input:
if ' '.join(value[:-1]) not in my_dict:
#connect two charactor into one and remove last values using[:-1]
my_dict[' '.join(value[:-1])]=float(value[-1])
#add into dictonary key and values
if not user_input:
break
for key,value in my_dict.items():
count1=0
count2=0
count3=0
for i in key:
if(i.isdigit()):
pass
count2=count2+1
for j in str(value):
if(j.isdigit()):
count1=count1+1
pass
print(key,value)
print("Letters is:",count2)
print("Digits is:=",count1)
print("\n")
| [
"rahulpandit151197@gmail.com"
] | rahulpandit151197@gmail.com |
a6daad4330bb0a531e150f6cbc030decbbccf430 | fae09433ae0985fd13940cb4efb105b9782197f3 | /Working Demonstration/RetinalModeling-Starburst-Growth/RetinalModeling-Mike-Starburst-Growth/StarburstMorphology.py | f885beda9fa590edba6be96bb621aefbe369d8c4 | [] | no_license | mikewesthad/RetinalModeling | 6b55504044587bfa886203041746ce9dedf632cc | a5c2d854d5716cba6d1a6232c66401e45943b8b3 | refs/heads/master | 2020-04-08T08:43:15.012285 | 2013-05-10T19:35:06 | 2013-05-10T19:35:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,287 | py | from random import uniform, randint, shuffle, choice
from igraph import Graph, ADJ_UNDIRECTED
import matplotlib.pyplot as plt
import numpy as np
import pygame
from pygame.locals import *
from Vector2D import Vector2D
from StarburstDendrite import DendriteSegment
from Compartment import Compartment
from Constants import *
class StarburstMorphology(object):
def __init__(self, retina, location=Vector2D(0.0, 0.0), average_wirelength=150*UM_TO_M,
radius_deviation=.1, min_branches=6, max_branches=6, heading_deviation=10,
step_size=10*UM_TO_M, max_segment_length=35*UM_TO_M, children_deviation=20,
dendrite_vision_radius=30*UM_TO_M, diffusion_width=0.5,
decay_rate=0.1, input_strength=0.0, color_palette=GOLDFISH,
draw_location=Vector2D(0.0,0.0), visualize_growth=True, scale=1.0,
display=None):
# General neuron variables
self.retina = retina
self.display = display
self.location = location
# Visualization variables
self.visualize_growth = visualize_growth
self.display = display
self.background_color = color_palette[0]
self.draw_location = draw_location
self.scale = scale
grid_size = retina.grid_size
# Wirelength variables
average_wirelength = average_wirelength / grid_size
max_wirelength = average_wirelength * (1.0+radius_deviation)
min_wirelength = average_wirelength * (1.0-radius_deviation)
self.bounding_radius = max_wirelength
# Dendrite variables
self.heading_deviation = heading_deviation
self.step_size = step_size / grid_size
self.max_segment_length = max_segment_length / grid_size
self.dendrite_vision_radius = dendrite_vision_radius / grid_size
# Initialize the first branches
number_dendrites = randint(min_branches, max_branches)
heading_spacing = 360.0 / number_dendrites
heading = 0.0
self.dendrites = []
for i in range(number_dendrites):
wirelength = uniform(min_wirelength, max_wirelength)
dendrite = DendriteSegment(self, self.location, heading, wirelength, wirelength,
children_deviation, self.dendrite_vision_radius)
self.dendrites.append(dendrite)
heading += heading_spacing
# Slicing needed to force a copy of the elements (instead of creating a reference to a list)
# Note: this only works if the lists are not nested (if they are, use deepcopy)
self.master_dendrites = self.dendrites[:]
self.grow()
self.colorDendrites(color_palette[1:])
self.discretize(1.0)
self.createPoints()
self.establishPointSynapses()
self.compartmentalize(30)
self.colorCompartments(color_palette[1:])
self.establishCompartmentSynapses()
self.buildCompartmentBoundingPolygons()
self.decay_rate = decay_rate
self.input_strength = input_strength
self.diffusion_width = diffusion_width #Units
self.establisthDiffusionWeights()
def grow(self):
active_dendrites = self.master_dendrites[:]
running = True
i = 0
clock = pygame.time.Clock()
while running and active_dendrites != []:
# Grab a dendrite and update it
dendrite = active_dendrites[i]
is_growing, children = dendrite.grow()
# If it isn't growing, delete it and adjust index
if not(is_growing):
del active_dendrites[i]
i -= 1
# If it had children, add them to the active list
if children != []:
for child in children:
active_dendrites.insert(0, child)
self.dendrites.append(child)
# Increment index
i += 1
if i >= len(active_dendrites): i=0
if self.visualize_growth:
self.display.fill(self.background_color)
self.draw(self.display, new_location=self.draw_location,
draw_segments=True, scale=self.scale)
pygame.display.update()
clock.tick(30)
# Check for close button signal from pygame window
for event in pygame.event.get():
if event.type == QUIT: running = False
def colorCompartments(self, palette):
colors = palette
index = 0
for compartment in self.master_compartments:
compartment.colorCompartments(colors, index)
index += 1
if index >= len(colors): index = 0
def colorDendrites(self, palette):
colors = palette
index = 0
for dendrite in self.master_dendrites:
dendrite.colorDendrites(colors, index)
index += 1
if index >= len(colors): index = 0
def establisthDiffusionWeights(self):
self.buildGraph()
self.distances = self.findShortestPathes()
# Perform e^(-distance**2/width) on each element in the distance matrix
sigma = self.diffusion_width
self.diffusion_weights = np.exp(-(self.distances)**2/(2.0*sigma**2.0))
# Get the sum of each row
row_sum = np.sum(self.diffusion_weights, 1)
# Reshape the rowSum into a column vector since sum removes a dimension
row_sum.shape = (len(self.compartments), 1)
# Normalize the weight matrix
self.diffusion_weights = self.diffusion_weights / row_sum
def findShortestPathes(self):
shortest_pathes = np.array(self.graph.shortest_paths())
# Directly connect each compartment with itself (0 distance)
for i in range(len(self.compartments)):
shortest_pathes[i, i] = 0
return shortest_pathes
def buildGraph(self):
adjacency = []
for compartment in self.compartments:
row = []
for other_compartment in self.compartments:
proximal_neighbors = (compartment in other_compartment.proximal_neighbors)
distal_neighbors = (compartment in other_compartment.distal_neighbors)
if proximal_neighbors or distal_neighbors:
row.append(1)
else:
row.append(0)
adjacency.append(row)
self.adjacency = adjacency
self.graph = Graph.Adjacency(adjacency, mode=ADJ_UNDIRECTED)
def buildCompartmentBoundingPolygons(self):
for compartment in self.compartments:
compartment.buildBoundingPolgyon()
def establishCompartmentSynapses(self):
for compartment in self.compartments:
compartment.buildNeurotransmitterWeights()
def establishPointSynapses(self):
inputs = set([GLU])
outputs = set([GABA, ACH])
output_threshold_wirelength = 2.0/3.0 * self.bounding_radius
for point in self.points:
if point.wirelength >= output_threshold_wirelength:
point.neurotransmitters_released = outputs.copy() # SHALLOW COPY!
point.neurotransmitters_accepted = inputs.copy() # SHALLOW COPY!
def compartmentalize(self, compartment_size):
self.compartments = []
# Build the master compartments recursively
self.master_compartments = []
for dendrite in self.master_dendrites:
compartment = Compartment(self)
self.master_compartments.append(compartment)
# Recursively compartmentalize starting from the master compartments
for index in range(len(self.master_compartments)):
compartment = self.master_compartments[index]
proximal_compartments = []
for other_compartment in self.master_compartments:
if compartment != other_compartment:
proximal_compartments.append(other_compartment)
dendrite = self.master_dendrites[index]
dendrite.compartmentalize(compartment, compartment_size, compartment_size,
prior_compartments=proximal_compartments)
def discretize(self, delta):
for dendrite in self.master_dendrites:
dendrite.discretize(delta=delta)
def createPoints(self):
self.points = []
for dendrite in self.master_dendrites:
dendrite.createPoints(self.location, 0.0)
def draw(self, surface, scale=1.0, new_location=None, draw_segments=False,
draw_compartments=False, draw_points=False):
# Shift the cell's location
if new_location == None:
new_location = self.location
old_location = self.location
self.location = new_location
if draw_segments:
for dendrite in self.dendrites:
dendrite.draw(surface, scale=scale)
elif draw_compartments:
for compartment in self.compartments:
compartment.draw(surface, scale=scale)
elif draw_points:
for point in self.points:
point.draw(surface, scale=scale)
# Shift the cell's location back to the original
self.location = old_location
def plotBranchProbability(self):
xs = np.arange(0, self.max_segment_length, 0.1)
ys = [self.branchProbability(x) for x in xs]
plt.plot(xs,ys)
plt.title("Branching as a Function of Wirelength")
plt.xlabel("Fraction of Max Wirelength")
plt.ylabel("Branch Probability")
plt.grid(True)
plt.show()
def branchProbability(self, segment_length):
return 1.05**(segment_length-self.max_segment_length)
# .............................................................................
# Old functions that may not be used?
# .............................................................................
def rescale(self, scale_factor):
for dendrite in self.dendrites:
dendrite.rescale(scale_factor)
def findCentroid(self):
average_location = Vector2D(0.0, 0.0)
number_locations = 0.0
for dendrite in self.dendrites:
for location in dendrite.locations:
average_location += location
number_locations += 1.0
average_location /= number_locations
soma_to_average = average_location.distanceTo(Vector2D(0.0,0.0))
soma_to_average_fraction = soma_to_average / self.bounding_radius
print "Cell Centroid:\t\t\t\t\t", average_location
print "Number of Dendrite Points (before discretization):\t\t{0:,.0f}".format(number_locations)
print "Linear Distance from Soma to Centroid:\t\t\t{0:.3f}".format(soma_to_average)
print "Linear Distance from Soma to Centroid Normalized by Radius:\t{0:.3%}".format(soma_to_average_fraction)
print
def animateCompartments(self, surface):
compartments_to_draw = self.master_compartments[:]
# compartments_to_draw = [choice(self.compartments)]
running = True
next_iteration = False
while running:
surface.fill((255,255,255))
for event in pygame.event.get():
if event.type == QUIT:
running = False
if event.type == KEYDOWN:
next_iteration = True
for c in self.compartments:
old_color = c.color
c.color = (0,0,0)
c.draw(surface)
c.color = old_color
for c in compartments_to_draw:
c.draw(surface)
pygame.display.update()
if next_iteration:
print "<<<<<<<<<<<<<<<<<<<"
for c in compartments_to_draw:
print c.index,
print
distal_neighbors = []
for c in compartments_to_draw:
distal_neighbors.extend(c.distal_neighbors)
# distal_neighbors.extend(c.proximal_neighbors)
compartments_to_draw = distal_neighbors
print "==================="
for c in compartments_to_draw:
print c.index,
print
print ">>>>>>>>>>>>>>>>>>>"
next_iteration = False
| [
"mikewesthad@gmail.com"
] | mikewesthad@gmail.com |
e20d78120e7e8e868e2796fbd0ad91445e24f16a | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/abc/abc101-200/abc170/d.py | 6f7baffcc4792acd400908f7b89a52f59bfb752e | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 357 | py | import sys
from collections import Counter
input = sys.stdin.readline
MAX = 1000001
dp = [True for _ in range(MAX)]
n = int(input())
a = list(map(int, input().split()))
cnt = Counter(a)
a = sorted(list(set(a)))
ans = 0
for v in a:
if cnt[v] <= 1 and dp[v]:
ans += 1
m = v
while m < MAX:
dp[m] = False
m += v
print(ans)
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
5c1825d0e3f94426431d89cffacf3ceb9ed7a923 | 3fca0f24fe91abfaeea1992453fae917e84d460d | /airflow/providers/presto/hooks/presto.py | 709a378a8d89f49fc5ae4494cb97628e9cc272bf | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | john-bodley/incubator-airflow | eba853453b59e7902737f73bb6c9a56fdec050a4 | d7777bb910d9d5eac40849cc27993676ed0da7f6 | refs/heads/main | 2022-08-09T19:38:25.069559 | 2022-07-22T22:43:27 | 2022-07-22T22:43:27 | 59,783,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,882 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import warnings
from typing import Any, Callable, Iterable, List, Mapping, Optional, Union, overload
import prestodb
from prestodb.exceptions import DatabaseError
from prestodb.transaction import IsolationLevel
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import Connection
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
try:
from airflow.utils.operator_helpers import DEFAULT_FORMAT_PREFIX
except ImportError:
# This is from airflow.utils.operator_helpers,
# For the sake of provider backward compatibility, this is hardcoded if import fails
# https://github.com/apache/airflow/pull/22416#issuecomment-1075531290
DEFAULT_FORMAT_PREFIX = 'airflow.ctx.'
def generate_presto_client_info() -> str:
"""Return json string with dag_id, task_id, execution_date and try_number"""
context_var = {
format_map['default'].replace(DEFAULT_FORMAT_PREFIX, ''): os.environ.get(
format_map['env_var_format'], ''
)
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
# try_number isn't available in context for airflow < 2.2.5
# https://github.com/apache/airflow/issues/23059
try_number = context_var.get('try_number', '')
task_info = {
'dag_id': context_var['dag_id'],
'task_id': context_var['task_id'],
'execution_date': context_var['execution_date'],
'try_number': try_number,
'dag_run_id': context_var['dag_run_id'],
'dag_owner': context_var['dag_owner'],
}
return json.dumps(task_info, sort_keys=True)
class PrestoException(Exception):
"""Presto exception"""
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == 'false':
return False
elif value.lower() == 'true':
return True
return value
class PrestoHook(DbApiHook):
"""
Interact with Presto through prestodb.
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
conn_type = 'presto'
hook_name = 'Presto'
def get_conn(self) -> Connection:
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]
extra = db.extra_dejson
auth = None
if db.password and extra.get('auth') == 'kerberos':
raise AirflowException("Kerberos authorization doesn't support password.")
elif db.password:
auth = prestodb.auth.BasicAuthentication(db.login, db.password)
elif extra.get('auth') == 'kerberos':
auth = prestodb.auth.KerberosAuthentication(
config=extra.get('kerberos__config', os.environ.get('KRB5_CONFIG')),
service_name=extra.get('kerberos__service_name'),
mutual_authentication=_boolify(extra.get('kerberos__mutual_authentication', False)),
force_preemptive=_boolify(extra.get('kerberos__force_preemptive', False)),
hostname_override=extra.get('kerberos__hostname_override'),
sanitize_mutual_error_response=_boolify(
extra.get('kerberos__sanitize_mutual_error_response', True)
),
principal=extra.get('kerberos__principal', conf.get('kerberos', 'principal')),
delegate=_boolify(extra.get('kerberos__delegate', False)),
ca_bundle=extra.get('kerberos__ca_bundle'),
)
http_headers = {"X-Presto-Client-Info": generate_presto_client_info()}
presto_conn = prestodb.dbapi.connect(
host=db.host,
port=db.port,
user=db.login,
source=db.extra_dejson.get('source', 'airflow'),
http_headers=http_headers,
http_scheme=db.extra_dejson.get('protocol', 'http'),
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema,
auth=auth,
isolation_level=self.get_isolation_level(), # type: ignore[func-returns-value]
)
if extra.get('verify') is not None:
# Unfortunately verify parameter is available via public API.
# The PR is merged in the presto library, but has not been released.
# See: https://github.com/prestosql/presto-python-client/pull/31
presto_conn._http_session.verify = _boolify(extra['verify'])
return presto_conn
def get_isolation_level(self) -> Any:
"""Returns an isolation level"""
db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]
isolation_level = db.extra_dejson.get('isolation_level', 'AUTOCOMMIT').upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
@overload
def get_records(self, sql: str = "", parameters: Optional[dict] = None):
"""Get a set of records from Presto
:param sql: SQL statement to be executed.
:param parameters: The parameters to render the SQL query with.
"""
@overload
def get_records(self, sql: str = "", parameters: Optional[dict] = None, hql: str = ""):
""":sphinx-autoapi-skip:"""
def get_records(self, sql: str = "", parameters: Optional[dict] = None, hql: str = ""):
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
try:
return super().get_records(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
@overload
def get_first(self, sql: str = "", parameters: Optional[dict] = None) -> Any:
"""Returns only the first row, regardless of how many rows the query returns.
:param sql: SQL statement to be executed.
:param parameters: The parameters to render the SQL query with.
"""
@overload
def get_first(self, sql: str = "", parameters: Optional[dict] = None, hql: str = "") -> Any:
""":sphinx-autoapi-skip:"""
def get_first(self, sql: str = "", parameters: Optional[dict] = None, hql: str = "") -> Any:
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
try:
return super().get_first(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
@overload
def get_pandas_df(self, sql: str = "", parameters=None, **kwargs):
"""Get a pandas dataframe from a sql query.
:param sql: SQL statement to be executed.
:param parameters: The parameters to render the SQL query with.
"""
@overload
def get_pandas_df(self, sql: str = "", parameters=None, hql: str = "", **kwargs):
""":sphinx-autoapi-skip:"""
def get_pandas_df(self, sql: str = "", parameters=None, hql: str = "", **kwargs):
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self.strip_sql_string(sql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data, **kwargs)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame(**kwargs)
return df
@overload
def run(
self,
sql: Union[str, Iterable[str]],
autocommit: bool = False,
parameters: Optional[Union[Iterable, Mapping]] = None,
handler: Optional[Callable] = None,
split_statements: bool = False,
return_last: bool = True,
) -> Optional[Union[Any, List[Any]]]:
"""Execute the statement against Presto. Can be used to create views."""
@overload
def run(
self,
sql: Union[str, Iterable[str]],
autocommit: bool = False,
parameters: Optional[Union[Iterable, Mapping]] = None,
handler: Optional[Callable] = None,
split_statements: bool = False,
return_last: bool = True,
hql: str = "",
) -> Optional[Union[Any, List[Any]]]:
""":sphinx-autoapi-skip:"""
def run(
self,
sql: Union[str, Iterable[str]],
autocommit: bool = False,
parameters: Optional[Union[Iterable, Mapping]] = None,
handler: Optional[Callable] = None,
split_statements: bool = False,
return_last: bool = True,
hql: str = "",
) -> Optional[Union[Any, List[Any]]]:
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
return super().run(
sql=sql,
autocommit=autocommit,
parameters=parameters,
handler=handler,
split_statements=split_statements,
return_last=return_last,
)
def insert_rows(
self,
table: str,
rows: Iterable[tuple],
target_fields: Optional[Iterable[str]] = None,
commit_every: int = 0,
replace: bool = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
'Transactions are not enable in presto connection. '
'Please use the isolation_level property to enable it. '
'Falling back to insert all rows in one transaction.'
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every)
| [
"noreply@github.com"
] | noreply@github.com |
de713e5c295ae8c0bb8e2d6b4f55ac6239d2cecc | d980ba52910c9ccf77a97026f4b9af4078c6b822 | /mysql.py | 57519d680bef3bd2bc37f3f224dc91a875399544 | [
"MIT"
] | permissive | KirieHaruna/web_scanner | bee0f18e17da3be6ca6b10771d5e7836beb570a3 | 4ff69f74c79c2a97b30f58b5879654f6db1aaa42 | refs/heads/master | 2022-05-27T08:18:57.317237 | 2020-04-28T08:34:27 | 2020-04-28T08:34:27 | 255,586,272 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | import pymysql
def creat():
db = pymysql.connect("localhost", "root", "123456", "sakila")
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS BUGINFO")
sql = """CREATE TABLE BUGINFO (
URL CHAR(100) NOT NULL,
PAYLOAD CHAR(50),
INJECTABLE CHAR(5),
CVSS CHAR(10),
PARAMETER CHAR(20) )"""
cursor.execute(sql)
db.close()
def insert(url, payload, injectable, cvss, parameter):
db = pymysql.connect("localhost", "root", "123456", "sakila")
cursor = db.cursor()
payload = payload.replace("\'","\\\'")
sql = """INSERT INTO BUGINFO(URL,
PAYLOAD, INJECTABLE, CVSS, PARAMETER)
VALUES(' """ + url + "\',\'" + payload + "\',\'" + injectable + "\',\'" + cvss + "\',\'" + parameter + "\')"
try:
cursor.execute(sql)
db.commit()
except Exception as re:
print(re)
print(sql)
db.rollback()
db.close()
| [
"noreply@github.com"
] | noreply@github.com |
b35168230eab80e6c3898fbb27a2e8ba27fcc281 | c3a46ea8c3b726c136ce3032686b010c711276a9 | /proto_parse.py | 26d32a9d865ba2e618c941b56ad82f5b21767385 | [] | no_license | silicontrip/bpmdj-tools | b7623a97165337718c2c0cd9e2c64ea1db51bda6 | 6a890b7d0e4f07ada0f51cce3b0fa89580d26cff | refs/heads/master | 2020-04-08T17:37:25.343215 | 2018-11-29T02:02:06 | 2018-11-29T02:02:06 | 159,574,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,920 | py | #!/usr/bin/python
import sys
import struct
# Helper functions ------------------------------------------------------------
# this comes largely straight out of the google protocol-buffers code for DecodeVarint(internal\decoder.py)
# with a few tweaks to make it work for me
def readVarInt(buffer, pos):
mask = (1 << 64) - 1
result = 0
shift = 0
startPos = pos
while 1:
b = ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos, pos-startPos)
shift += 7
if shift >= 64:
raise Error('Too many bytes when decoding varint.')
def readQWORD(d, pos):
try:
v = struct.unpack("<Q", d[pos:pos+8])[0]
except:
print "Exception in readQWORD"
print sys.exc_info()
return (None, pos)
pos += 8
return (v, pos);
def readDWORD(d, pos):
try:
v = struct.unpack("<L", d[pos:pos+4])[0]
except:
print "Exception in readDWORD"
print sys.exc_info()
return (None, pos)
pos += 4
return (v, pos);
def readBYTE(d, pos):
try:
v = struct.unpack("<B", d[pos:pos+1])[0]
except:
print "Exception in readBYTE"
print sys.exc_info()
return (None, pos)
pos += 1
return (v, pos);
# returns (value, new position, data type, field ID, and value's length)
def readField(d, pos):
# read field and type info
(v, p) = readBYTE(d, pos);
datatype = v & 7;
fieldnum = v >> 3;
if datatype == 0: # varint
(v, p, l) = readVarInt(d, p)
return (v, p, datatype, fieldnum, l)
elif datatype == 1: # 64-bit
(v,p) = readQWORD(d, p)
return (v, p, datatype, fieldnum, 8)
elif datatype == 2: # varlen string/blob
(v, p, l) = readVarInt(d, p) # get string length
return (d[p:p+v], p+v, datatype, fieldnum, v)
elif datatype == 5: # 32-bit value
(v,p) = readDWORD(d, p)
return (v, p, datatype, fieldnum, 4)
else:
print "Unknown type: %d [%x]\n" % (datatype, pos)
return (None, p, datatype, fieldnum, 1);
# PARSERS ---------------------------------------------------------------------
# Parse DescriptorProto field
def PrintDescriptorProto(data, size, prefix):
pos = 0
while pos < size:
(d, p, t, fid, l) = readField(data, pos);
pos = p
if fid == 1: print "%smessage %s {" % (prefix,d)
elif fid == 2: PrintFieldDescriptorProto(d, l, prefix+"\t") # FieldDescriptorProto
elif fid == 3: PrintDescriptorProto(d, l, prefix+"\t") # DescriptorProto
elif fid == 4: PrintEnumDescriptorProto(d, l, prefix+"\t") # EnumDescriptorProto
elif fid == 5:
print "%sextension_range:" % (prefix)
PrintDescriptorProto(d, l, prefix+"\t") # ExtensionRange
elif fid == 6: print "%sextension: %s" % (prefix,d) # FieldDescriptorProto
elif fid == 7: print "%soptions: %s" % (prefix,d) # MessageOptions
else: print "***UNKNOWN fid in PrintDescriptorProto %d" % fid
print "%s}" % prefix
# Parse EnumDescriptorProto
def PrintEnumDescriptorProto(data, size, prefix):
pos = 0
while pos < size:
(d, p, t, fid, l) = readField(data, pos);
pos = p
if fid == 1: print "%senum %s {" % (prefix,d)
elif fid == 2: PrintEnumValueDescriptorProto(d, l, prefix+"\t") # EnumValueDescriptorProto
elif fid == 3: # EnumOptions
print "%soptions" % prefix
else: print "***UNKNOWN fid in PrintDescriptorProto %d" % fid
print "%s};" % prefix
# Parse EnumValueDescriptorProto
def PrintEnumValueDescriptorProto(data, size, prefix):
pos = 0
enum = {"name": None, "number": None}
while pos < size:
(d, p, t, fid, l) = readField(data, pos);
pos = p
if fid == 1: enum['name'] = d
elif fid == 2: enum['number'] = d
elif fid == 3: # EnumValueOptions
print "%soptions: %s" % (prefix,d)
else: print "***UNKNOWN fid in PrintDescriptorProto %d" % fid
print "%s%s = %s;" % (prefix, enum['name'], enum['number'])
# Parse FieldDescriptorProto
def PrintFieldDescriptorProto(data, size, prefix):
pos = 0
field = {"name": None, "extendee": None, "number": None, "label": None, "type": None, "type_name": None, "default_value": None, "options": None}
while pos < size:
(d, p, t, fid, l) = readField(data, pos);
pos = p
if fid == 1: field['name'] = d
elif fid == 2: field['extendee'] = d
elif fid == 3: field['number'] = d
elif fid == 4:
if d == 1: field['label'] = "optional"
elif d == 2: field['label'] = "required"
elif d == 3: field['label'] = "repeated"
else: print "{{Label: UNKNOWN (%d)}}" % (prefix,d)
elif fid == 5:
types = {1: "double",
2: "float",
3: "int64",
4: "uint64",
5: "int32",
6: "fixed64",
7: "fixed32",
8: "bool",
9: "string",
10: "group",
11: "message",
12: "bytes",
13: "uint32",
14: "enum",
15: "sfixed32",
16: "sfixed64",
17: "sint32",
18: "sint64" }
if d not in types:
print "%sType: UNKNOWN(%d)" % (prefix,d)
else:
field['type'] = types[d]
elif fid == 6: field["type_name"] = d
elif fid == 7: field["default_value"] = d
elif fid == 8: field["options"] = d
else: print "***UNKNOWN fid in PrintFieldDescriptorProto %d" % fid
output = prefix
if field['label'] is not None: output += " %s" % field['label']
output += " %s" % field['type']
output += " %s" % field['name']
output += " = %d" % field['number']
if field['default_value']: output += " [DEFAULT = %s]" % field['default_value']
output += ";"
print output
# Parse ExtensionRange field
def PrintExtensionRange(data, size, prefix):
pos = 0
while pos < size:
(d, p, t, fid, l) = readField(data, pos);
pos = p
print "%stype %d, field %d, length %d" % (prefix, t, fid, l)
if fid == 1: print "%sstart: %d" % (prefix,d)
elif fid == 2: print "%send: %d" % (prefix,d)
else: print "***UNKNOWN fid in PrintExtensionRange %d" % fid
def PrintFileOptions(data, size, prefix):
pos = 0
while pos < size:
(d, p, t, fid, l) = readField(data, pos);
pos = p
if fid == 1: print "%soption java_package = \"%s\";" % (prefix,d)
elif fid == 8: print "%soption java_outer_classname = \"%s\"" % (prefix,d)
elif fid == 10: print "%soption java_multiple_files = %d" % (prefix,d)
elif fid == 20: print "%soption java_generate_equals_and_hash = %d" % (prefix,d)
elif fid == 9: print "%soption optimize_for = %d" % (prefix,d)
elif fid == 16: print "%soption cc_generic_services = %d" % (prefix,d)
elif fid == 17: print "%soption java_generic_services = %d" % (prefix,d)
elif fid == 18: print "%soption py_generic_services = %d" % (prefix,d)
elif fid == 999: print "%soption uninterpreted_option = \"%s\"" % (prefix,d) # UninterpretedOption
else: print "***UNKNOWN fid in PrintFileOptions %d" % fid
# -----------------------------------------------------------------------------
# Main function.
def ParseProto(filename, offset, size):
f = open(filename, "rb").read()
data = f[offset:offset+size]
pos = 0
while pos < size:
(d, p, t, fid, l) = readField(data, pos);
pos = p
#print "type %d, field %d, length %d" % (t, fid, l)
if fid == 1: print "// source filename: %s" % d
elif fid == 2: print "package %s;" % d
elif fid == 3: print "import \"%s\"" % d
elif fid == 4: PrintDescriptorProto(d, l, "")
elif fid == 5: print "EnumDescriptorProto: %s" % d
elif fid == 6: print "ServiceDescriptorProto: %s" % d
elif fid == 7: print "FieldDescriptorProto: %s" % d
elif fid == 8: PrintFileOptions(d, l, "")
else: print "***UNKNOWN fid in ParseProto %d" % fid
return {}
# main
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Usage: %s binaryfile offset size" % sys.argv[0]
sys.exit(0)
ParseProto(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
| [
"mjpeg0@silicontrip.org"
] | mjpeg0@silicontrip.org |
bbee8a3bfa82f347f64cd2ead29a20eb61dd68ba | 5df2ab174c9ffa966376e04d04648bdff2b564cb | /Python_Book/part2_data/scatter_squares.py | d9346fe2b318b9d2e8f2b9db3803f85022c87880 | [] | no_license | ingoglia/python_work | 6c518c795030ec05ce6124547d6eb6dca5c31973 | c99963c701d50d6cf0d52b9affb11092dec97a0e | refs/heads/master | 2021-01-01T15:34:06.689019 | 2017-09-02T18:27:26 | 2017-09-02T18:27:26 | 97,644,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | import matplotlib.pyplot as plt
x_values = list(range(1, 1001))
y_values = [x**2 for x in x_values]
plt.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Blues,
edgecolor='none', s=40)
# Set chart title and label axes.
plt.title("Square Numbers", fontsize=24)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
# Set size of tick labels.
plt.tick_params(axis='both', which='major', labelsize=14)
# Set the range for each axis.
plt.axis([0, 1100, 0, 1100000])
plt.savefig('squares_plot.png', bbox_inches='tight')
| [
"mrnobody@nowhere.special"
] | mrnobody@nowhere.special |
3002d3ecdd09c0ed835fd4a309d107f444c02bda | 29a00bb16146dde9c8512cc6479d2eabae8d6357 | /treehacks/lib/python3.6/stat.py | bb198db2c5dd9d4d3ce6192c0665f0669e089eb4 | [] | no_license | KatieMishra/Treehacks | 0ffe2ca82fbca792f7010831763d148e93701e43 | e49996109b09bfbaf9ef0caeea9c39e0beb37952 | refs/heads/master | 2020-04-23T06:08:13.212775 | 2019-02-17T12:25:51 | 2019-02-17T12:25:51 | 170,963,196 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | /Users/JenniferHe/anaconda/lib/python3.6/stat.py | [
"jenhe2000@yahoo.com"
] | jenhe2000@yahoo.com |
8daa0d6edb6393d5d6418711516b72ba4cda6421 | cea0e1d9e39b0be9ad699bc8e6b537226616c6a3 | /day7/day7.py | d236b41df2d397ce0c45e6ad83bb33e1fd965c5f | [] | no_license | leonmbauer/Advent-Of-Code-2020 | c51cffffe06ea627c5e798aaaa6c5623c474d85b | 77595563da71177a9814f5dd31e68b7a6ced10ba | refs/heads/main | 2023-01-30T13:37:12.743900 | 2020-12-15T16:47:43 | 2020-12-15T16:47:43 | 317,654,635 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # Puzzle link: https://adventofcode.com/2020/day/7
inputfile = open("D:\coding\Advent of Code 2020\day7\day7input.txt", "r")
lines = inputfile.readlines()
rules = []
totalbags = []
for value in lines:
rules.append(value.strip("\n"))
def day7(rules, color):
acceptsbag = []
splitrule = ""
for rule in rules:
splitrule = rule.split("bags")[1:]
for split in splitrule:
if color in split:
if rule.split("bags")[0].rstrip(" ") not in acceptsbag:
acceptsbag.append(rule.split("bags")[0].rstrip(" "))
for bag in acceptsbag:
totalbags.append(bag)
day7(rules, bag)
day7(rules, "shiny gold")
print(len(set(totalbags))) | [
"lbauer822@gmail.com"
] | lbauer822@gmail.com |
e6e308bdd1e7dd283f4399a15d8fb8554aaab6dd | 8d97abead3d1e7fb1a77cf77da2c237c507e1599 | /pureFreshProj/pureFreshProj/asgi.py | 668ad6ea6a347828e0a8f8785e87894b6845b505 | [] | no_license | techiesoul8/pureFreshRepo | f8b5d5828149835593408cf3c9c6d904cabcc96e | 9b41710c8ed22c9ab87617dc0e6e0df2bd731aa7 | refs/heads/master | 2021-09-24T22:14:33.675072 | 2020-02-01T20:51:08 | 2020-02-01T20:51:08 | 236,665,625 | 0 | 0 | null | 2021-09-22T18:28:47 | 2020-01-28T05:32:06 | HTML | UTF-8 | Python | false | false | 403 | py | """
ASGI config for pureFreshProj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pureFreshProj.settings')
application = get_asgi_application()
| [
"techiesoul8@gmail.com"
] | techiesoul8@gmail.com |
8e2c0be6c7d23cf0d22524fbf1a53d0d962939ac | 72d4cf6ef8b4d0b1ca50b969e673bb494055deb4 | /LinearSystem.py | 480ea85c3caf1c34bf9154ba9cc275ff8588a90b | [] | no_license | tourist1029/notebook_python | 3bfc2b8abc078391433fc2087d7947fe636786da | 68f73dd4079f48923a76487f3a10901a3c938635 | refs/heads/master | 2020-06-21T00:31:26.143647 | 2019-07-17T10:47:15 | 2019-07-17T10:47:15 | 197,299,631 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | from Matrix import Matrix
from Vector import Vector
class LinearSystem:
def __init__(self, A, b):
assert A.row_num() == len(b), "row number of A must be equal to the length of b"
self._m = A.row_num()
self._n = A.col_num()
assert self._m == self._n # 后面可以省略这个限制
#定义增广矩阵, 添加对b是向量还是矩阵的判断
if isinstance(b, Vector):
self.Ab = [Vector(A.row_vector(i).underlying_list() + [b[i]])
for i in range(self._m)]
if isinstance(b, Matrix):
self.Ab = [Vector(A.row_vector(i).underlying_list() + b.row_vector(i).underlying_list())
for i in range(self._m)]
def _max_row(self, index, n):
best , ret = self.Ab[index][index], index
for i in range(index+1, n):
if self.Ab[i][index] < best:
best, ret = self.Ab[i][index], i
return ret
def _forward(self):
n = self._m
for i in range(n):
#Ab[i][i]为主元
max_row = self._max_row(i, n)
self.Ab[i], self.Ab[max_row] = self.Ab[max_row], self.Ab[i]
#将主元归一
self.Ab[i] = self.Ab[i] / self.Ab[i][i] #TODO: self.Ab[i][i] ==0
for j in range(i+1, n):
self.Ab[j] = self.Ab[j] - self.Ab[j][i] * self.Ab[i] #使得主元[i][i]这一列下面的元素通过乘以Ab[j][i],进行相减变为1
def _backward(self):
n = self._m
for i in range(n-1, -1, -1):
#Ab[i][i]为主元
for j in range(i-1, -1, -1):
self.Ab[j] = self.Ab[j] - self.Ab[j][i] * self.Ab[i]
def gauss_jordan_elimination(self):
self._forward()
self._backward()
def fancy_print(self):
for i in range(self._m):
print(' '.join(str(self.Ab[i][j]) for j in range(self._n)), end=' ')
print('|', self.Ab[i][-1])
# if __name__ == '__main__':
# A = Matrix([[1,2,4], [3,7,2], [2,3,3]])
# b = Vector([7, -11, 1])
# ls = LinearSystem(A, b)
# ls.gauss_jordan_elimination()
# ls.fancy_print()
| [
"enricyx@outlook.com"
] | enricyx@outlook.com |
61800924239c8fb6f1f53e3f2ac06aeed078d177 | 350c96dc9322722f146e618f6fda8b53012e8cfa | /python3/thinkcs-python3/13.11.1.py | 6a0bae6ef2cda9feb9d13cb40be60d0f65efea67 | [] | no_license | jhn--/sample-python-scripts | a5f896b0ad78eee6fe6ea51b4d5f695275a7cc5f | 980724da60372a31e23aff902dcead3a03efcc6f | refs/heads/master | 2021-01-25T06:06:12.717869 | 2020-01-03T19:55:40 | 2020-01-03T19:55:40 | 22,878,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | '''
13.11.1
Write a program that reads a file and writes out a new file with the lines in reversed order (i.e. the first line in the old file becomes the last one in the new file.)
'''
oldfile = "text.txt"
newfile = "rev_text.txt"
def reverse_file(old, new):
f = open(old, "r")
xs = f.readlines()
f.close()
g = open(new, "w")
for i in range(len(xs)-1, 0, -1):
g.write(xs[i])
g.close()
'''
def reverse_file(old,new):
f = open(old)
content = f.read()
f.close()
g = open(new, "w")
content = content.split()
print(content)
for i in range(len(content)-1, 0, -1):
g.write(content[i])
g.close()
'''
reverse_file(oldfile, newfile) | [
"john.hoe@gmail.com"
] | john.hoe@gmail.com |
da1424b954c6ea7946bf5c4b741adee5647928ce | 2e69d2f140bb653938dc1b7238b85a4af4754123 | /metanic/settings/development.py | 24903ea9e622fd8f43c25fe974be2913277bb0c6 | [
"BSD-3-Clause"
] | permissive | metanic/services | f866d78e7207624cf4b420929d987b6005394d1d | a00b99f9b697864a078e2cb886be4d75c10458a9 | refs/heads/master | 2021-06-06T22:33:56.823827 | 2018-08-14T08:05:00 | 2018-08-14T08:05:00 | 115,375,318 | 0 | 0 | NOASSERTION | 2020-02-11T21:34:25 | 2017-12-26T01:57:09 | Python | UTF-8 | Python | false | false | 2,287 | py | from metanic.settings.defaults import INSTALLED_APPS
from metanic.settings.defaults import MIDDLEWARE
from metanic.settings.defaults import REST_FRAMEWORK
from metanic.settings.defaults import cache_url
from metanic.settings.defaults import env_value
from metanic.settings.defaults import project_path
# We specifically allow `import *` in this case to pull in expected settings
from metanic.settings.defaults import * # noqa
DEBUG = True
DEFAULT_FROM_EMAIL = 'services@metanic.local'
FRONTEND_URL = env_value('frontend_url', 'http://localhost:3030/')
MEDIA_ROOT = project_path('media')
MEDIA_URL = '/media/'
METANIC_REDIRECT_URL = 'http://localhost:3030/'
ROOT_URLCONF = 'metanic.core.urls.development'
STATIC_ROOT = project_path('static')
STATIC_URL = '/static/'
MAILGUN_API_KEY = env_value('mailgun_api_key', default='TEST')
ANYMAIL['MAILGUN_API_KEY'] = MAILGUN_API_KEY
SECRET_KEY = env_value(
'secret_key',
'diagonal stunning powder ledge employ dealer',
)
ACCESS_CONTROL_ALLOW_ORIGINS = [
'localhost:3030',
]
REST_FRAMEWORK['DEFAULT_THROTTLE_CLASSES'] = []
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'
] += ('rest_framework.authentication.SessionAuthentication',)
REST_FRAMEWORK['DEFAULT_THROTTLE_RATES'] = {
'anon': env_value('anon_throttle_rate', default='100/second'),
'sensitive': env_value('sensitive_throttle_rate', default='100/second'),
'user': env_value('user_throttle_rate', default='100/second'),
}
INSTALLED_APPS += [
'debug_toolbar',
'django_extensions',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
CACHES = {
'default': cache_url('redis://localhost:6379/0'),
}
DATABASES = {
'default':
{
'ENGINE':
'django.db.backends.sqlite3',
'NAME':
project_path(
env_value('DATABASE_FILENAME', 'metanic.sqlite3')
),
},
}
ALLOWED_HOSTS = [
'localhost',
'metanic.local',
]
ACCESS_CONTROL_ALLOW_ORIGINS = [
'::1:',
'127.0.0.1',
'127.0.0.1:*',
'localhost',
'localhost:*',
'metanic.local',
'metanic.local:*',
]
INTERNAL_IPS = [
'127.0.0.1',
]
| [
"monokrome@monokro.me"
] | monokrome@monokro.me |
6ac9b4b2e0a9cabb8fefd64438d1e78820449696 | cefb51a0833609caae3c721b39a43754137fad4a | /PaperSubmission/ActiveLearner/InteractiveLabeling.py | 89162ff30b323423b7072ecdf8914deab6a40222 | [] | no_license | emmableu/CuratingExamples | b116fbdf9f56d955a777b51acdd77df3ab57bdd6 | 6c1f0aa0a91153f46657aa03e0cb06f30f28faec | refs/heads/master | 2021-02-13T17:15:43.712643 | 2020-07-09T23:49:53 | 2020-07-09T23:49:53 | 244,715,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,789 | py | import sys
sys.path.append('/Users/wwang33/Documents/IJAIED20/CuratingExamples/PaperSubmission/PassiveLearner')
from PatternMining import *
sys.setrecursionlimit(10**8)
from ActiveLearnActionData import *
import warnings
warnings.filterwarnings('ignore')
# from pattern_mining_util import *
def interactive_label(label_name, code_shape_p_q_list):
x_train, y_train, pattern_orig = get_data(code_shape_p_q_list, digit01= True, action_name = label_name, datafolder = "xy_0heldout")
total_data = len(y_train)
all_simulation = {}
all_simulation["y"] = y_train
for repetition in range(10):
new_row = {}
read = ActiveLearnActionData(x_train, y_train)
print("read.session: ", read.session)
total = total_data
for id in read.start_as_1_pos():
read.code([id])
candidate = read.random(read.step)
read.code(candidate)
read.get_numbers()
input_x = np.insert(x_train[read.train_ids1], 0, 1, axis=1)
perf_dict = svm_linear.model_cross_val_predict(input_x, y_train[read.train_ids1])
new_row_value = (perf_dict['f1'])
new_row[read.step + 1] = new_row_value
for j in (range(1, (total - 1) // read.step)):
new_row_key = min((j) * read.step + 11, total)
new_row_key = new_row_key
read.get_numbers()
if len(read.poses) == 1:
candidate= read.random(read.step)
model = read.passive_train()
else:
model, candidate = read.active_model_selection_train(all_uncertainty = True)
if len(candidate) > 0:
read.code(candidate)
input_x = np.insert(x_train, 0, 1, axis=1)
perf_dict = model.model_cross_val_predict(input_x, y_train)
new_row_value = (perf_dict['f1'])
new_row[new_row_key] = new_row_value
print("new_row: ", new_row)
save_dir = base_dir + "/Simulation/PatternMining/SessionTable/ActiveLearning/F1Curve/ModelSelectionAndUncertainty/" + label_name
if repetition == 0:
atomic_save_performance_for_one_repetition(new_row, save_dir, code_shape_p_q_list,repetition, dpm = False)
else:
save_performance_for_one_repetition(new_row, save_dir,code_shape_p_q_list, repetition, dpm = False)
def encapsulated_simulate():
label_name_s = ['keymove', 'jump', 'cochangescore', 'movetomouse','moveanimate']
# label_name_s = ['moveanimate']
code_shape_p_q_list_s = [[[1, 0], [1, 1], [1, 2], [1, 3], [2, 3]]]
code_shape_p_q_list_s = [[[1, 0]]]
for label_name in label_name_s:
for code_shape_p_q_list in code_shape_p_q_list_s:
interactive_label(label_name, code_shape_p_q_list)
| [
"wwang33@ncsu.edu"
] | wwang33@ncsu.edu |
24a0eef4f1af79a957cf9029213903e7a4999b2b | 3899dd3debab668ef0c4b91c12127e714bdf3d6d | /venv/Lib/site-packages/tensorflow/python/estimator/canned/linear.py | f20177bc113e7d69e4c24f89d653ca26be594025 | [] | no_license | SphericalPotatoInVacuum/CNNDDDD | b2f79521581a15d522d8bb52f81b731a3c6a4db4 | 03c5c0e7cb922f53f31025b7dd78287a19392824 | refs/heads/master | 2020-04-21T16:10:25.909319 | 2019-02-08T06:04:42 | 2019-02-08T06:04:42 | 169,691,960 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 22,582 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variable_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import ftrl
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import estimator_export
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return ftrl.FtrlOptimizer(learning_rate=learning_rate)
def _get_expanded_variable_list(var_list):
"""Given a list of variables, expands them if they are partitioned.
Args:
var_list: A list of variables.
Returns:
A list of variables where each partitioned variable is expanded to its
components.
"""
returned_list = []
for variable in var_list:
if (isinstance(variable, variable_ops.Variable) or
resource_variable_ops.is_resource_variable(variable)):
returned_list.append(variable) # Single variable case.
else: # Must be a PartitionedVariable, so convert into a list.
returned_list.extend(list(variable))
return returned_list
# TODO(rohanj): Consider making this a public utility method.
def _compute_fraction_of_zero(variables):
"""Given a linear variables list, compute the fraction of zero weights.
Args:
variables: A list or list of list of variables
Returns:
The fraction of zeros (sparsity) in the linear model.
"""
all_weight_vars = []
for var_or_var_list in variables:
var_list = nest.flatten(var_or_var_list)
# Skip empty-lists associated with columns that created no Variables.
if var_list:
all_weight_vars += [array_ops.reshape(var, [-1]) for var in var_list]
return nn.zero_fraction(array_ops.concat(all_weight_vars, axis=0))
def _linear_logit_fn_builder(units, feature_columns, sparse_combiner='sum'):
"""Function builder for a linear logit_fn.
Args:
units: An int indicating the dimension of the logit layer.
feature_columns: An iterable containing all the feature columns used by
the model.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. One of "mean", "sqrtn", and "sum".
Returns:
A logit_fn (see below).
"""
def linear_logit_fn(features):
"""Linear model logit_fn.
Args:
features: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
Returns:
A `Tensor` representing the logits.
"""
if feature_column_v2.is_feature_column_v2(feature_columns):
shared_state_manager = feature_column_v2.SharedEmbeddingStateManager()
linear_model = feature_column_v2.LinearModel(
feature_columns=feature_columns,
units=units,
sparse_combiner=sparse_combiner,
shared_state_manager=shared_state_manager)
logits = linear_model(features)
bias = linear_model.bias_variable
# We'd like to get all the non-bias variables associated with this
# LinearModel. This includes the shared embedding variables as well.
variables = linear_model.variables
variables.remove(bias)
variables.extend(shared_state_manager.variables)
# Expand (potential) Partitioned variables
bias = _get_expanded_variable_list([bias])
variables = _get_expanded_variable_list(variables)
else:
linear_model = feature_column._LinearModel( # pylint: disable=protected-access
feature_columns=feature_columns,
units=units,
sparse_combiner=sparse_combiner,
name='linear_model')
logits = linear_model(features)
cols_to_vars = linear_model.cols_to_vars()
bias = cols_to_vars.pop('bias')
variables = cols_to_vars.values()
if units > 1:
summary.histogram('bias', bias)
else:
# If units == 1, the bias value is a length-1 list of a scalar Tensor,
# so we should provide a scalar summary.
summary.scalar('bias', bias[0][0])
summary.scalar('fraction_of_zero_weights',
_compute_fraction_of_zero(variables))
return logits
return linear_logit_fn
def _linear_model_fn(features, labels, mode, head, feature_columns, optimizer,
partitioner, config, sparse_combiner='sum'):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape `[batch_size, logits_dimension]`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
feature_columns: An iterable containing all the feature columns used by
the model.
optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
partitioner: Partitioner for variables.
config: `RunConfig` object to configure the runtime settings.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. One of "mean", "sqrtn", and "sum".
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: mode or params are invalid, or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
optimizer = optimizers.get_optimizer_instance(
optimizer or _get_default_optimizer(feature_columns),
learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'linear',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
logit_fn = _linear_logit_fn_builder(
units=head.logits_dimension, feature_columns=feature_columns,
sparse_combiner=sparse_combiner)
logits = logit_fn(features=features)
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
optimizer=optimizer,
logits=logits)
@estimator_export('estimator.LinearClassifier')
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using an optimizer with a learning rate decay.
estimator = LinearClassifier(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b],
optimizer=lambda: tf.train.FtrlOptimizer(
learning_rate=tf.exponential_decay(
learning_rate=0.1,
global_step=tf.get_global_step(),
decay_steps=10000,
decay_rate=0.96))
# Or estimator with warm-starting from a previous checkpoint.
estimator = LinearClassifier(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b],
warm_start_from="/path/to/checkpoint/dir")
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM,
sparse_combiner='sum'):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
optimizer: An instance of `tf.Optimizer` used to train the model. Can also
be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
callable. Defaults to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights and biases are warm-started, and it is assumed that vocabularies
and Tensor names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
`tf.feature_column.linear_model`.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes, weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
"""Call the defined shared _linear_model_fn."""
return _linear_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
partitioner=partitioner,
config=config,
sparse_combiner=sparse_combiner)
super(LinearClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
warm_start_from=warm_start_from)
@estimator_export('estimator.LinearRegressor')
class LinearRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear regression problems.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearRegressor(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearRegressor(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using an optimizer with a learning rate decay.
estimator = LinearRegressor(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b],
optimizer=lambda: tf.train.FtrlOptimizer(
learning_rate=tf.exponential_decay(
learning_rate=0.1,
global_step=tf.get_global_step(),
decay_steps=10000,
decay_rate=0.96))
# Or estimator with warm-starting from a previous checkpoint.
estimator = LinearRegressor(
feature_columns=[categorical_column_a,
categorical_feature_a_x_categorical_feature_b],
warm_start_from="/path/to/checkpoint/dir")
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column` is not `None`:
key=weight_column, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
Loss is calculated by using mean squared error.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM,
sparse_combiner='sum'):
"""Initializes a `LinearRegressor` instance.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
optimizer: An instance of `tf.Optimizer` used to train the model. Can also
be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
callable. Defaults to FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights and biases are warm-started, and it is assumed that vocabularies
and Tensor names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
`tf.feature_column.linear_model`.
"""
head = head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension, weight_column=weight_column,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
"""Call the defined shared _linear_model_fn."""
return _linear_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
partitioner=partitioner,
config=config,
sparse_combiner=sparse_combiner)
super(LinearRegressor, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
warm_start_from=warm_start_from)
| [
"a@bogdanov.co"
] | a@bogdanov.co |
afe9008e6a9e13d645434deab28633691d4014b5 | 4be6e0e23505165a4fa42e50bd2b62b005d6fb2b | /main.py | 1b51b83dd5f36e0b9e332151751ef0aa91edd650 | [] | no_license | curiousboey/Salesman-Problem | e1d6e4c858bf95e0a6e77e94e746571daffb100a | 53e3c4e42b78e7a95be8dc74d63eea8f29e83e8e | refs/heads/master | 2023-07-29T09:06:31.274486 | 2021-09-11T12:05:02 | 2021-09-11T12:05:02 | 405,368,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | import numpy as np
import matplotlib.pyplot as plt
import math
from itertools import permutations
no_of_points = 8
x_value = np.random.randint(1, 100, no_of_points)
y_value = np.random.randint(1, 100, no_of_points)
x_per = list(permutations(x_value))
y_per = list(permutations(y_value))
distance_sum= []
for x in range(math.factorial(no_of_points)):
dist = []
for y in range(no_of_points-1):
d1 = np.sqrt((y_per[x][y]-y_per[x][y+1])**2 + (x_per[x][y]-x_per[x][y+1])**2)
dist.append(d1)
distance_sum.append(sum(dist))
print('The maximum distance = ', max(distance_sum))
print('The minimum distance = ', min(distance_sum))
max_pos = distance_sum.index(max(distance_sum))
min_pos = distance_sum.index(min(distance_sum))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(x_per[max_pos], y_per[max_pos], color='red', linestyle='dashed', linewidth = 2,
marker='o', markerfacecolor='blue', markersize=10)
ax2.plot(x_per[min_pos], y_per[min_pos], color='green', linestyle='dashed', linewidth = 2,
marker='o', markerfacecolor='blue', markersize=10)
plt.show
| [
"bhupendrabc71@gmail.com"
] | bhupendrabc71@gmail.com |
6c3db8b53bd8c75510c300d3ababa12ae4c34617 | b65177afe65a16bb51bfd5e25e5f753b09ecaed6 | /scripts/build_model.py | de8bd5c88a104849e97759fa58285977a7536cdd | [] | no_license | maxguillaume20/Structure-Prediction-Assignment | 1813f25af0fd1db9b9307c777050531b8a6eca2b | 10e3e6d5fcdab05a29373647e24cbcff1024e7cd | refs/heads/master | 2021-01-17T21:49:00.683738 | 2016-02-29T16:38:02 | 2016-02-29T16:38:02 | 52,432,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | import argparse
import os
from modeller import *
from modeller.automodel import *
def parse_arguments():
"""Parse arguments"""
def _is_valid_file_(x):
""" Checks if argument if an existing file, else returns an error"""
if not os.path.isfile(x):
raise argparse.ArgumentTypeError(x + " is not a valid file")
return x
parser = argparse.ArgumentParser(description="Make models with MODELLER")
parser.add_argument("alignment", help="Alignment file in PIR format")
parser.add_argument("sequence", help="Sequence to be modelled")
parser.add_argument("template", help="Name of template (should also have a PDB file in the same directory)")
parser.add_argument("-n", "--n_models", type=int, default=5, help="Integer specifying the number of models")
args = parser.parse_args()
#_is_valid_file_(args.template + ".pdb")
return args
def main():
args = parse_arguments()
env = environ()
a = automodel(env,
alnfile=args.alignment,
knowns=args.template,
sequence=args.sequence,
assess_methods=(assess.DOPE, assess.GA341))
a.starting_model = 1
a.ending_model = args.n_models
a.make()
if __name__ == "__main__":
main()
| [
"olivmrtn@gmail.com"
] | olivmrtn@gmail.com |
c2128ecbf70539ca24c18357cbb9cc5599012840 | f0bd81fa7ac9a58e358b685dede3e1316eac882d | /ebsTagginInheritance.py | 24ef9a2a1206905c1ef04f5a76f1c3b02d79d49c | [] | no_license | justwes2/aws_cloud_mgmt_scripts | 64fb7dac58cdf936e0ea929097812cd838644de3 | f6981bc71607d5448ef50e1dfe87cde2c25ac7cb | refs/heads/master | 2021-04-30T08:46:49.960406 | 2018-06-25T15:36:33 | 2018-06-25T15:36:33 | 121,383,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | import boto3
ec2Client = boto3.client('ec2')
ec2Resource = boto3.resource('ec2')
def get_instance_id(event):
# Passing in test inst for dev
# instance_id = event["detail"]["instance-id"]
instance_id = 'i-0e9d4b20432516e9f'
# instance = ec2Client.describe_instances(
# Filters = [
# {
# 'Name' : 'instance-id',
# 'Values' : [
# instance_id
# ]
# }
# ]
# )
return instance_id
def make_vol_tags(vm_tags, vol_tags):
final_tags_list = []
final_tags = {}
for pair in vol_tags:
final_tags[pair["Key"]] = pair["Value"]
for vm_tag_pair in vm_tags:
if vm_tag_pair["Key"] in final_tags:
print '{} already exists'.format(vm_tag_pair["Key"])
else:
final_tags[vm_tag_pair["Key"]] = vm_tag_pair["Value"]
print final_tags
for key, value in final_tags.iteritems():
print key, value
final_tags_list.append({"Key" : key, "Value" : value})
return final_tags_list
def get_tags(instance):
for instance in ec2Resource.instances.filter(
Filters = [
{
'Name' : 'instance-id',
'Values' : [
instance
]
}
]
):
ec2_tags = instance.tags
for volume in instance.volumes.all():
vol_tags = volume.tags
if vol_tags == None:
volume.create_tags(Tags = ec2_tags)
else:
volume.create_tags(Tags = make_vol_tags(ec2_tags, vol_tags))
def lambda_handler(event, context):
get_tags(get_instance_id('event'))
lambda_handler('event', 'context')
| [
"wes.coffay@gmail.com"
] | wes.coffay@gmail.com |
e66b011408adab3c02dfcec3760cc9db05c95c59 | 54a759a213e65ddb3a64eb5ec7892f3b34ce7e09 | /venv/bin/easy_install-3.6 | e0809e2d2ec532f78dfd3d1d09dc5f6643d62da0 | [] | no_license | nishantpa/graphql-python | a30b50a13b9c07f5256901e9aa6d23c98c4d860a | c9e018079a0a8895d3169f6b9180f18f67a5c9df | refs/heads/master | 2021-09-04T12:56:22.958181 | 2018-01-18T22:22:41 | 2018-01-18T22:22:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | 6 | #!/Users/nishantpatel/Development/graphql/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nishantpatel@Nishants-MBP.home"
] | nishantpatel@Nishants-MBP.home |
ceea7101bd7a8fbb6a69d2bd09009feb8a85da09 | fdf6b431dbb8f6a5af7af586eae16fce758026e7 | /accounts/urls.py | 6b56120905a0a874a29a0aeb503fe9a68b5f3d85 | [] | no_license | ghaywatc/cghaywat | 4fa9c81c319e9d8a909ab13160fc5ae9fd349b1a | 67f404f74b197c4a913c6f51819aef990ac11991 | refs/heads/master | 2023-03-13T23:57:45.886682 | 2021-02-23T20:48:21 | 2021-02-23T20:48:21 | 341,685,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
app_name= 'accounts'
urlpatterns = [
path('login/',auth_views.LoginView.as_view(template_name='accounts/login.html'),name='login'),
path('logout/',auth_views.LogoutView.as_view(),name='logout'),
path('Signup/',views.Signup.as_view(),name='signup')
] | [
"ghaywatc@gmail.com"
] | ghaywatc@gmail.com |
26192e0ebc0d1cd4c5f4c4daacc29ba82a8324dc | 5fc4767f01833e7dd3d20a133f597123191c1431 | /Maxima and minima of functions/Minima.py | 2bfec9739085163b8c7c7c548f57562fd96f5a94 | [] | no_license | PayalS1999/Genetic-Algorithm | a6ea63e2a3a4501b3945f957b56fe175419b59e8 | 9a85ba891cf8994c643fe64c244999b797ac99e2 | refs/heads/main | 2023-04-20T11:48:34.269907 | 2021-05-01T18:17:37 | 2021-05-01T18:17:37 | 363,345,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,206 | py | import numpy as np
import math
import random
import matplotlib.pyplot as plt
def func(x):
return x**2 + 48*x -385
def plotting(n,x,y):
fig= plt.figure()
plt.plot(n,x,'-',color='red',alpha=0.6,label='Population average')
plt.plot(n,y,'--',color='green',label='Population maximum')
plt.legend()
plt.show()
def roulette_wheel(fx,gen_num):
#print('Inside roulette wheel')
fx=fx-min(fx)
fx=max(fx)-fx
m=sum(fx)
pick=random.uniform(0,m)
s=0
for idx,ch in enumerate(fx):
s+=ch
#print('m=',m,' s=',s,' pick=',pick)
if s>=pick:
break
return idx
def next_gen(gen,x,fx,gen_num,chrom_len,lower):
crossover_prob=0.7
mutation_prob=0.3
child_gen= np.zeros((gen_num,chrom_len),dtype=int)
row=0
for iteration in range(int(gen_num/2)):
#SELECTION OF PARENTS
ind1= roulette_wheel(fx,gen_num)
#print('after 1')
ind2= roulette_wheel(fx,gen_num)
#print('after 2')
#while(ind1==ind2):
# ind2= roulette_wheel(fx,gen_num)
child_gen[row]=gen[ind1]
child_gen[row+1]=gen[ind2]
#2 POINT CROSSOVER
prob= random.random()
if prob<= crossover_prob:
cr1= random.randint(1,chrom_len-1)
cr2=random.randint(1,chrom_len-1)
while(cr2==cr1):
cr2=random.randint(1,chrom_len-1)
if cr2<cr1:
cr2,cr1=cr1,cr2
for i in range(cr1,cr2):
child_gen[row][i],child_gen[row+1][i]=child_gen[row+1][i],child_gen[row][i]
#MUTATION
for i in range(row,row+2):
prob= random.random()
if prob<= mutation_prob:
mu= random.randint(0,chrom_len-1)
if child_gen[i][mu]==0:
child_gen[i][mu]=1
else:
child_gen[i][mu]=0
row+=2
ch_x= np.zeros(gen_num,dtype=int)
ch_fx= np.zeros(gen_num,dtype=float)
bit=np.zeros(chrom_len,dtype=int)
for i in range(chrom_len):
bit[chrom_len-i-1]=math.pow(2,i)
for i in range(gen_num):
for j in range(chrom_len):
ch_x[i]+=bit[j]*child_gen[i][j]
ch_x[i]+=lower
ch_fx=func(ch_x)
dummy= np.vstack([gen,child_gen])
dx= np.vstack([x.reshape((gen_num,1)),ch_x.reshape((gen_num,1))])
dfx= np.vstack([fx.reshape((gen_num,1)),ch_fx.reshape((gen_num,1))])
dummy= np.hstack([dummy,dx])
dummy= np.hstack([dummy,dfx])
dummy= dummy[dummy[:,chrom_len+1].argsort()]
for i in range(gen_num):
x[i]=dummy[i][chrom_len]
fx[i]=dummy[i][chrom_len+1]
dummy=np.delete(dummy,np.s_[-2:],1)
for i in range(gen_num):
gen[i]=dummy[i]
return gen,fx,x
def ga(lower,upper):
x2=upper-lower
x1=0
gen_num= 10
for chrom_len in range(11):
if math.pow(2,chrom_len)>x2:
break
gen= np.zeros((gen_num,chrom_len),dtype=int)
x= np.zeros(gen_num,dtype=int)
fx= np.zeros(gen_num)
#GENERATE INITIAL POPULATION
for i in range(gen_num):
r= random.randint(x1,x2)
bi= np.binary_repr(r, width=chrom_len)
x[i]=r+lower
fx[i]=func(x[i])
for j in range(chrom_len):
gen[i][j]= bi[j]
print(x,fx)
avg=[]
maxi=[]
n=[]
for i in range(50):
#FOR PLOTTING
n.append(i)
avg.append(fx.mean())
maxi.append(fx[0])
avg1= fx.mean()
gen,fx,x= next_gen(gen,x,fx,gen_num,chrom_len,lower)
print("iteration ",i+1)
print("population mean : ", fx.mean())
print("population max : ", fx[0])
print("optimal value: ",x[0])
print()
avg2= fx.mean()
if(abs(avg1-avg2)<0.01):
break
print("population mean converges to: ", fx.mean())
print("population max converges to: ", fx[0])
print("optimal value: ",x[0])
plotting(n,avg,maxi)
ga(-55,7)
| [
"noreply@github.com"
] | noreply@github.com |
eb399de506fea74d5ed8b71968924f9c1bec5da9 | 5ef2fc5fa0c2605d17353a77d2279bd7bbc22034 | /utilities/Customlogger.py | 66c74e2644e7cbecd214b48662e668b9a4350151 | [] | no_license | makpowerful/DSNAP | 2fd40ff3c8cc34974cde98e2931a4b3d7ea187f5 | 8cc26ec1210693087161b305abc7e7922587a78b | refs/heads/master | 2023-01-24T13:13:39.220663 | 2020-12-10T09:12:02 | 2020-12-10T09:12:02 | 316,496,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | import logging
#class LogGen():
#@staticmethod
#def loggen():
# for handler in logging.root.handlers[:]:
# logging.root.removeHandler(handler)
# logging.basicConfig(filename="C://Users//mkalamshabaz//PycharmProjects//DSNAP//Logs//automation.log",format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%m/%d/%Y %I:%M:%S %p')
# logger = logging.getLogger()
# logger.setLevel(logging.INFO)
#return logger
class LogGen:
@staticmethod
def loggen():
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename="C://Users//mkalamshabaz//PycharmProjects//DSNAP//Logs//automation.log", filemode='w',
format='%(asctime)s: %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return logger
#logging.basicConfig(filename="C://Users//mkalamshabaz//PycharmProjects//DSNAP//Logs//automation.log",format='%(asctime)s: %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
#logger = logging.getLogger()
#logger.setLevel(logging.INFO)
#logger.info("This is info message")
| [
"mak_powerful@yahoo.co.in"
] | mak_powerful@yahoo.co.in |
84b75d96249ce2d145a4267e417b242d359e1cae | 41e3e64f1487e6061729a6ef9aaab1062ca33f67 | /DiscreteCosineTransform.py | fc723b24379549a41d671e6b94a391d548708f33 | [] | no_license | Wilmir/DiscreteCosineTransform | b0ec8aee282c38b8b9544df233f50bcebc2480bc | 222cc9799d99983cd7f9ca3d87a8f91b38fbb806 | refs/heads/master | 2023-01-30T22:09:03.781538 | 2020-12-08T12:20:19 | 2020-12-08T12:20:19 | 319,414,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | import numpy as np
import math
N = 8
n = 8.0
T = np.zeros((N,N),dtype="float_")
Tt = np.zeros((N,N),dtype="float_")
def printMatrix(npArray):
for r in range(N):
for c in range(N):
print("{0:0.3f}".format(npArray[r][c]), end = "\t")
print()
def Generate_T():
for r in range(N):
for c in range(N):
if r == 0:
T[r][c] = 1/math.sqrt(n)
else:
T[r][c] = (math.sqrt(2/n)) * math.cos((r * math.pi)*(2*c + 1)/(2*N))
# Tt[c][r] = T[r][c]
def main():
print("This script calculates the matrices T and T' as employed in DCT's")
Generate_T()
print("Matrix T")
printMatrix(T)
Tt = np.transpose(T)
print("Matrix Tt - T Transposed")
printMatrix(Tt)
if __name__ == "__main__":
main() | [
"a00278899@student.ait.ie"
] | a00278899@student.ait.ie |
d40061d5b471c876bfd9807d919cd5353053d42a | 2fad06f78a76c4fc15bf0c7a9736dcf7244240d0 | /node_modules/fsevents/build/config.gypi | d4b7b3d2cd4fc96079828ea6bc1c41ac22268ffd | [
"MIT"
] | permissive | Pstarsss/ts_snake | f028883bfddc15a8467ec597623af5dd59adaedf | 14559254e989848afbf384ee6e97b6f508bf3808 | refs/heads/master | 2023-04-04T06:35:10.433253 | 2021-04-14T11:27:59 | 2021-04-14T11:27:59 | 357,879,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,743 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"llvm_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "64.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/fintopia/.node-gyp/10.16.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/Users/fintopia/.npm-global/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npm.taobao.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"registry": "https://registry.npm.taobao.org/",
"home": "https://npm.taobao.org",
"fetch_retries": "2",
"noproxy": "",
"key": "",
"message": "%s",
"versions": "",
"phantomjs_cdnurl": "https://npm.taobao.org/mirrors/phantomjs/",
"globalconfig": "/Users/fintopia/.npm-global/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/fintopia/.npm-init.js",
"userconfig": "/Users/fintopia/.npmrc",
"cidr": "",
"node_version": "10.16.0",
"user": "501",
"auth_type": "legacy",
"editor": "/usr/bin/vim",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/Users/fintopia/.nvm/versions/node/v10.16.0/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"sass_binary_site": "https://npm.taobao.org/mirrors/node-sass/",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/fintopia/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.9.0 node/v10.16.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/1s/jbffxcyd4pdgz1gpry2llyl80000gn/T",
"unsafe_perm": "true",
"prefix": "/Users/fintopia/.npm-global",
"link": ""
}
}
| [
"xingpan@yangqianguan.com"
] | xingpan@yangqianguan.com |
f1ca9eed522212d6934ddb637b30f8665ffb3113 | b1a2cd489ed0baafc44f1d406952c2792bbb9661 | /code/pwc/rho_vis.py | 8b1bb69b85e513f152b6c22806f0bc181993454a | [] | no_license | fsoest/ba | df5f26726f4db761853efd2e8d5c4252e49577a9 | 40f08044848fa805776eccb16f3237ee234f8840 | refs/heads/master | 2023-03-03T09:31:06.066888 | 2021-02-10T11:31:42 | 2021-02-10T11:31:42 | 297,967,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | from multiproc.pwc_helpers import rho_path
import numpy as np
import matplotlib.pyplot as plt
import qutip as qt
def data_wrapper(data, dt, steps):
N = int(len(data[0]) / 2)
theta_d = data[0][:N]
phi_d = data[0][N:]
theta_t = data[1][:N]
phi_t = data[1][N:]
rho_0 = data[3]
return rho_path(theta_d, phi_d, theta_t, phi_t, dt, rho_0, N, steps)
def exp_xyz(rhos):
"""
calculate expectation value of sigma x y z matrices
"""
exp = np.zeros((len(rhos), 3))
for i in range(len(rhos)):
exp[i, 0] = np.real(np.trace(qt.sigmax().full() @ rhos[i]))
exp[i, 1] = np.real(np.trace(qt.sigmay().full() @ rhos[i]))
exp[i, 2] = np.real(np.trace(qt.sigmaz().full() @ rhos[i]))
return exp
# %%
if __name__ == '__main__':
dt1 = np.load('multi_train_data/N_5/dt_1_eigen_sobol_10_run_0.npy', allow_pickle=True)
dt5 = np.load('multi_train_data/N_5/dt_5_0_sobol_10_run_0.npy', allow_pickle=True)
dt10 = np.load('multi_train_data/N_5/dt_10_eigen_sobol_10_run_0.npy', allow_pickle=True)
# %%
inx = 20
path1, step1, e1 = data_wrapper(dt1[inx], 1, 50)
path5, step5, e5 = data_wrapper(dt5[inx], 5, 50)
path10, step10, e10 = data_wrapper(dt10[inx], 10, 50)
exp_1 = exp_xyz(path1)
exp_5 = exp_xyz(path5)
exp_10 = exp_xyz(path10)
exp5 = exp_xyz(step5)
# %%
plt.plot(range(len(e1)), -np.real(e1), label='1')
plt.plot(range(len(e5)-1), -np.real(e5)[:-1], label='5')
plt.plot(range(len(e10)), -np.real(e10), label='10')
plt.legend(title='$\Delta T$')
# %%
a = ['x', 'y', 'z']
for i, dir in enumerate(a):
# plt.plot(range(len(exp_1)), exp_1[:, i], label='1 {0}'.format(dir))
plt.plot(range(len(exp_5)), exp_5[:, i], label='5 {0}'.format(dir))
# for i, dir in enumerate(a):
# plt.plot(range(len(exp_10)), exp_10[:, i], label='10 {0}'.format(dir))
# plt.plot(np.linspace(0, 5, 201), exp_1[:, 2])
# plt.plot(np.linspace(0, 5, 201), exp_5[:, 2])
# plt.plot(np.linspace(0, 5, ))
plt.legend()
plt.xlabel('t')
plt.ylabel('$<\sigma_i>$')
| [
"felix@soest-net.de"
] | felix@soest-net.de |
1c1031d5eadebb75a4a6bd7db5d0f83a4e500726 | e66dc121195ce4464d8958cb65baf909c3b8bb19 | /survey.py | 2c761ffbef662f476843c041cfe86e039459d3d1 | [] | no_license | youurt/Python | a524c126ac30836be013600931232173e1ce793b | 41ff30c0d2b316d16fb51b8290176dffad4f92a1 | refs/heads/master | 2020-09-10T07:27:46.792183 | 2019-11-14T11:43:17 | 2019-11-14T11:43:17 | 221,684,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py |
class AnonymousSurvey():
"""Collect anonymous answers to a survey question."""
def __init__(self, question):
"""Store a question, and prepare to store responses."""
self.question = question
self.responses = []
def show_question(self):
"""Show the survey question."""
print(self.question)
def store_response(self, new_response):
"""Store a single response to the survey."""
self.responses.append(new_response)
def show_results(self):
"""Show all the responses that have been given."""
print("Survey results:")
for response in self.responses:
print('- ' + response)
| [
"ugurtigu@gmail.com"
] | ugurtigu@gmail.com |
197ada5990972489d11e42d1f750c1109aa8419a | 93e4d67c01e31ec74cf9e4cd210fa4a018317b6e | /src/DATest/LotteryDA/WelfareLottery/3D/DataDownload.py | b7e553e75cc25fd1dbb182a5edd5bee39e927afd | [] | no_license | Sammion/DAStudy | d0c1d485d1dc01555d69da59098e5170d3ec3c0a | 23852b3ccb830a5baf9a990a609a5c2586f80f8e | refs/heads/master | 2021-09-26T10:30:38.585465 | 2018-10-29T09:57:50 | 2018-10-29T09:57:50 | 115,117,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,655 | py | # -*- coding: utf-8 -*-
"""
Created on 8/10/2018
@author: Samuel
@Desc:
@dependence: Noting
"""
import requests
from bs4 import BeautifulSoup
import xlwt
import time
# 获取第一页的内容
def get_one_page(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
# 解析第一页内容,数据结构化
def parse_one_page(html):
soup = BeautifulSoup(html, 'lxml')
i = 0
for item in soup.select('tr')[2:-1]:
yield {
'time': item.select('td')[i].text,
'issue': item.select('td')[i + 1].text,
'digits': item.select('td em')[0].text,
'ten_digits': item.select('td em')[1].text,
'hundred_digits': item.select('td em')[2].text,
'single_selection': item.select('td')[i + 3].text,
'group_selection_3': item.select('td')[i + 4].text,
'group_selection_6': item.select('td')[i + 5].text,
'sales': item.select('td')[i + 6].text,
'return_rates': item.select('td')[i + 7].text
}
# 将数据写入Excel表格中
def write_to_excel():
f = xlwt.Workbook()
sheet1 = f.add_sheet('3D', cell_overwrite_ok=True)
row0 = ["开奖日期", "期号", "个位数", "十位数", "百位数", "单数", "组选3", "组选6", "销售额", "返奖比例"]
# 写入第一行
for j in range(0, len(row0)):
sheet1.write(0, j, row0[j])
# 依次爬取每一页内容的每一期信息,并将其依次写入Excel
i = 0
for k in range(1, 247):
url = 'http://kaijiang.zhcw.com/zhcw/html/3d/list_%s.html' % (str(k))
html = get_one_page(url)
print('正在保存第%d页。' % k)
# 写入每一期的信息
for item in parse_one_page(html):
sheet1.write(i + 1, 0, item['time'])
sheet1.write(i + 1, 1, item['issue'])
sheet1.write(i + 1, 2, item['digits'])
sheet1.write(i + 1, 3, item['ten_digits'])
sheet1.write(i + 1, 4, item['hundred_digits'])
sheet1.write(i + 1, 5, item['single_selection'])
sheet1.write(i + 1, 6, item['group_selection_3'])
sheet1.write(i + 1, 7, item['group_selection_6'])
sheet1.write(i + 1, 8, item['sales'])
sheet1.write(i + 1, 9, item['return_rates'])
i += 1
f.save('3D.xls')
def main():
write_to_excel()
if __name__ == '__main__':
main() | [
"33392372+Sammion@users.noreply.github.com"
] | 33392372+Sammion@users.noreply.github.com |
cc5944b4c83a4775a2bb0eb3623f8812a6f2e3ac | c4a2c5d2ee3bb946333bec267c337858c2eaa87c | /tests/bhive/test_asciichart.py | 33f157a84494dea26f1582b0203b3aa21810eb53 | [
"MIT"
] | permissive | TheCrazyGM/bhive | 93b237140def25a8cb4de0160678db116b45d4e0 | 1494e90a99123ecfc5efbd927258f9ba59443e2e | refs/heads/master | 2021-04-10T20:15:59.966431 | 2020-03-22T23:50:52 | 2020-03-22T23:50:52 | 248,962,200 | 3 | 1 | NOASSERTION | 2020-10-27T22:24:53 | 2020-03-21T11:29:02 | Python | UTF-8 | Python | false | false | 1,211 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from builtins import range
from builtins import super
import string
import random
import unittest
import base64
from pprint import pprint
from bhive.asciichart import AsciiChart
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.curve = [1.2, 4.3, 2.0, -1.3, 6.4, 0.]
def test_plot(self):
ac = AsciiChart(height=3, width=3)
self.assertEqual(len(ac.canvas), 0)
ret = ac.plot(self.curve, return_str=True)
ac.plot(self.curve, return_str=False)
self.assertTrue(len(ret) > 0)
ac.clear_data()
self.assertEqual(len(ac.canvas), 0)
def test_plot2(self):
ac = AsciiChart(height=3, width=3)
ac.clear_data()
ac.adapt_on_series(self.curve)
self.assertEqual(ac.maximum, max(self.curve))
self.assertEqual(ac.minimum, min(self.curve))
self.assertEqual(ac.n, len(self.curve))
ac.new_chart()
ac.add_axis()
ac.add_curve(self.curve)
| [
"thecrazygm@gmail.com"
] | thecrazygm@gmail.com |
94f9719ea3bafb52fb5ea71541380aa245912c33 | afdeedfb743fbb149d36c14cfad65feaf03acd21 | /code/1253-reconstruct-a-2-row-binary-matrix.py | bd9cbd79b960e9959b96f36d73f49227eb0ec9fe | [] | no_license | linhx13/leetcode-code | f16cd4a0d35be34c41b86715fc9f3e8ec4b0a577 | c71574acfc68174a091c1751f10985b8f5737a1f | refs/heads/master | 2021-07-04T03:45:20.030275 | 2021-06-09T13:55:18 | 2021-06-09T13:55:18 | 70,423,464 | 0 | 1 | null | 2019-08-01T09:37:49 | 2016-10-09T18:48:33 | null | UTF-8 | Python | false | false | 870 | py | from typing import List
class Solution:
def reconstructMatrix(
self, upper: int, lower: int, colsum: List[int]
) -> List[List[int]]:
n = len(colsum)
res = [[0] * n for _ in range(2)]
for i, s in enumerate(colsum):
if s == 2 or (s == 1 and lower < upper):
res[0][i] = 1
if s == 2 or (s == 1 and not res[0][i]):
res[1][i] = 1
upper -= res[0][i]
lower -= res[1][i]
if lower == 0 and upper == 0:
return res
else:
return []
if __name__ == "__main__":
# upper = 2
# lower = 3
# colsum = [2, 2, 1, 1]
upper = 5
lower = 5
colsum = [2, 1, 2, 0, 1, 0, 1, 2, 0, 1]
# upper = 1
# lower = 4
# colsum = [2, 1, 2, 0, 0, 2]
print(Solution().reconstructMatrix(upper, lower, colsum))
| [
"mylhx288@gmail.com"
] | mylhx288@gmail.com |
b023e3022a68f36d0c3aac3e56334c0f705e2383 | 010840eb9317b3e4f10d499c96d01a2427e83c31 | /DiaryPlanet/DiaryPlanet/settings.py | 7586dfd65060294fab54c915be90a60303b5b0b2 | [] | no_license | ASTARCHEN/PlanetDiary | 7c5241770fe5139bf97f67c6f78263038f60da67 | 29f793ac683baa998cbf3b0505e4be6fbc1f94de | refs/heads/master | 2020-07-30T05:34:28.936973 | 2019-04-03T01:33:08 | 2019-04-03T01:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | """
Django settings for DiaryPlanet project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yj7ug_&rflcrfecfly6mjc+ts!4-10dk-j8oky1bx$3$#=43%i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'BloodClub',
'DiaryVideo',
"users",
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
AUTH_USER_MODEL="users.userProfile"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DiaryPlanet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DiaryPlanet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'DiaryPlanet',
'USER':'root',
'PASSWORD':'root',
'HOST':'127.0.0.1'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"m1582422856@163.com"
] | m1582422856@163.com |
b8b6b730b3d1e9345cd8228e34aab0f42a31aa8c | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /19_数学/数论/BSGS/G - 222.py | 6cdc9f5606b73ac8778ecd183c8606680b877f35 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # 形如2,22,222,...的数列
# !这个数列第一个k的倍数的项是否存在, 若存在是第几项
# k<=1e8
# !等价于 2*(10^x-1)/9 ≡ 0 (mod k)
# !即 10^x ≡ 1 (mod k*9/gcd(k,2))
from math import gcd
from bsgs import exbsgs
# 即为扩展exbsgs
# TODO 有问题
import sys
sys.setrecursionlimit(int(1e9))
input = lambda: sys.stdin.readline().rstrip("\r\n")
MOD = 998244353
INF = int(4e18)
def find(k: int) -> int:
return exbsgs(10, 1, k * 9 // gcd(k, 2))
if __name__ == "__main__":
T = int(input())
for _ in range(T):
k = int(input())
print(find(k))
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.