blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6644f5d39c16c8085f33054bbbdde0e525062265 | 3c2323929499a4d81adada6f60ee64bde1e86cb2 | /Simple_Backpropagation_Program/pytorch/views.py | ad9420f44e05bb17b3ef53f819f0390a0c1d09d5 | [] | no_license | GeonwooVincentKim/Backpropagation_Pytorch_Django | 8ba22bb065aca35fed114420b749bb9f0a383688 | 41df659956e5e4e8126b272bd4f5053cdeb30663 | refs/heads/master | 2022-11-22T06:44:27.901139 | 2020-07-16T14:01:06 | 2020-07-16T14:01:06 | 273,230,382 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'index.html', {})
def sub(request):
# if request.method == 'POST':
# """
# Write down some code which is related to
# the number that you input.
# """
# """
# From now you are going to here
# to handle the data, you should make database
# that helps save data users input numbers into this
# Simple BackPropagation Algorithm.
# """
# context = {'form': }
return render(request, "sub/sub.html", {})
# return render(request, 'sub/sub.html', {})
def input(request):
return render(request, "sub/index.html", {})
# context = {'form': InputForm()}
# return render(request, "input/input.html", {})
| [
"kdsnop@gmail.com"
] | kdsnop@gmail.com |
d785c9b583818c160e60f5526e855a49a12bea8d | ae240e884e9d79243d7a9e338e77e5ee70a8bfd1 | /secuity/bijective-function.py | 79356c9da633665f8a04cdc78641990f3284f58d | [] | no_license | roshnet/HackerRank | b35bbab3f03ff37cd29b257822b00b8c2e81a189 | 9a90adac9cd6000139ca4801445c4fd3c640d545 | refs/heads/master | 2020-03-29T06:10:43.496703 | 2018-09-20T15:54:31 | 2018-09-20T15:54:31 | 149,612,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py |
def all_equal(arr, value):
n = len(arr)
passes = 0
for i in range(n - 1):
if arr[i] != arr[i+1]:
return False
elif arr[i] == value:
passes = 1
else:
passes = 0
return False
# till now, all n-1 elements are equal to value..
# checking for the last element only...
if arr[n-1] == value and passes == 1:
return True
def is_bijective(x, y):
n = len(x)
passes = []
# checking for one-one nature...
for i in range(n):
curr = y[i]
times = 0
for chk in range(n):
if y[chk] == curr:
times = times + 1
if times > 1:
return "NO"
passes.append(True)
# checking for onto nature...
if len(x) == len(y):
passes.append(True)
res = all_equal(passes, True)
if res == True:
return "YES"
else:
return "NO"
def main():
n = abs(int(input()))
X, Y = [], []
for i in range(1, n+1):
X.append(i)
Y = input().split()
if len(Y) < n:
print('Too few values as images. Function cannot be defined.')
exit()
result = is_bijective(X, Y)
print(result)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | roshnet.noreply@github.com |
07db860fc6de84b931e4b270036c770e99f84c94 | 89b6997b24e404c176358073626a8bfad7bcdb8e | /.history/chat/consumers_20210427011737.py | 8bc854916d684ee4e430ad24a1f2c472b16dc6f0 | [] | no_license | mohamedhawas123/Education-platform-django | 513e64ac112880385402ce609077796578b4e9ee | 7b83e66bba66b8b2b1a007f5818a534653e6abfb | refs/heads/main | 2023-07-18T16:19:52.177886 | 2021-09-24T12:04:09 | 2021-09-24T12:04:09 | 352,306,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | import json
from channels.generic.websocket import WebsocketConsumer
| [
"mohamedhawas123@gmail.com"
] | mohamedhawas123@gmail.com |
d30dbb2ecc229ae57a7dff9c32a62233067b88a5 | 89ca99b517c10494f769cbda756bf0c8e00167fc | /src/sim/jvision_pb/keyvalue_pb2.py | 38d5cfa3bc751374df88e9bf27d9deb2b4a0eab2 | [] | no_license | nkumar43212/jvsim | fad56e25ae5eb8e8d0ad1272dacd8a25cb7e88ec | 7663c493cefcb7ffb04321226d0534be5711194e | refs/heads/master | 2021-01-17T11:49:10.052784 | 2016-07-28T22:32:56 | 2016-07-28T22:32:56 | 51,335,614 | 4 | 12 | null | 2018-08-07T02:11:14 | 2016-02-08T23:42:49 | C++ | UTF-8 | Python | false | true | 4,680 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: keyvalue.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import telemetry_top_pb2 as telemetry__top__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='keyvalue.proto',
package='',
syntax='proto2',
serialized_pb=b'\n\x0ekeyvalue.proto\x1a\x13telemetry_top.proto\"R\n\x08keyvalue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x11\n\tint32_val\x18\x02 \x01(\x05\x12\x12\n\nuint64_val\x18\x03 \x01(\x04\x12\x12\n\nstring_val\x18\x04 \x01(\t\"(\n\nkvresponse\x12\x1a\n\x07kvpairs\x18\x01 \x03(\x0b\x32\t.keyvalue:<\n\rkvresponseExt\x12\x17.JuniperNetworksSensors\x18\xd0\x0f \x01(\x0b\x32\x0b.kvresponse'
,
dependencies=[telemetry__top__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KVRESPONSEEXT_FIELD_NUMBER = 2000
kvresponseExt = _descriptor.FieldDescriptor(
name='kvresponseExt', full_name='kvresponseExt', index=0,
number=2000, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_KEYVALUE = _descriptor.Descriptor(
name='keyvalue',
full_name='keyvalue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='keyvalue.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='int32_val', full_name='keyvalue.int32_val', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uint64_val', full_name='keyvalue.uint64_val', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='string_val', full_name='keyvalue.string_val', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=121,
)
_KVRESPONSE = _descriptor.Descriptor(
name='kvresponse',
full_name='kvresponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kvpairs', full_name='kvresponse.kvpairs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=163,
)
_KVRESPONSE.fields_by_name['kvpairs'].message_type = _KEYVALUE
DESCRIPTOR.message_types_by_name['keyvalue'] = _KEYVALUE
DESCRIPTOR.message_types_by_name['kvresponse'] = _KVRESPONSE
DESCRIPTOR.extensions_by_name['kvresponseExt'] = kvresponseExt
keyvalue = _reflection.GeneratedProtocolMessageType('keyvalue', (_message.Message,), dict(
DESCRIPTOR = _KEYVALUE,
__module__ = 'keyvalue_pb2'
# @@protoc_insertion_point(class_scope:keyvalue)
))
_sym_db.RegisterMessage(keyvalue)
kvresponse = _reflection.GeneratedProtocolMessageType('kvresponse', (_message.Message,), dict(
DESCRIPTOR = _KVRESPONSE,
__module__ = 'keyvalue_pb2'
# @@protoc_insertion_point(class_scope:kvresponse)
))
_sym_db.RegisterMessage(kvresponse)
kvresponseExt.message_type = _KVRESPONSE
telemetry__top__pb2.JuniperNetworksSensors.RegisterExtension(kvresponseExt)
# @@protoc_insertion_point(module_scope)
| [
"kumarn@juniper.net"
] | kumarn@juniper.net |
68e429904fe72245794c1b21b63e11df67f9ce97 | cb13037cdbd3e0ab6108670108e9497cc1e2a5a7 | /0.leetcode/0.基本的算法/4.排序/1.冒泡排序Bubblesort.py | 87ddbad13767a3782c1a06daaf71a3b8bf67122c | [] | no_license | GMwang550146647/network | 390fe0d1c72dcaca8b6d6dd1307adca0d56b55ce | 576de9b993f7763789d25a995702b40c9bc6fa57 | refs/heads/master | 2023-06-15T04:42:54.306077 | 2021-07-12T06:06:02 | 2021-07-12T06:06:02 | 315,488,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | '''
1.冒泡排序:
把最大的数一个一个地丢到最前面(期间对比的时候,见到比自己小的就交换相邻两个)
优点:
在非顺序链表都可以用
'''
def bubbleSort(arr):
for i in range(len(arr)-1,0,-1):
for j in range(1,i+1):
if arr[j-1]>arr[j]:
arr[j],arr[j-1]=arr[j-1],arr[j]
return arr
def bubbleSortModified(arr):
for i in range(len(arr)-1,0,-1):
modified=False
for j in range(1,i+1):
if arr[j-1]>arr[j]:
arr[j],arr[j-1]=arr[j-1],arr[j]
modified=True
if not modified:
break
return arr
arr=[9,8,7,6,5,4,3,2,1]
print(bubbleSort(arr.copy()))
print(bubbleSortModified(arr.copy())) | [
"gmwang_global@qq.com"
] | gmwang_global@qq.com |
349ea6ce098e264d8c03d7b91b59e71dad2c0350 | d15eb2285895469a452867f76b033d0d64a4af5c | /Old_scripts_delete_20220804/Scripts/measurements/vna_autler_townes.py | 7d51bf373377dba1857bae3f809c5d6dc426d33d | [] | no_license | MRitter95/Kollar-Lab | 45ac62ed7805ad9faeeb33b54be50f39950f3b2c | c905725c43af6a49fe5bb2a994d5180f2ba469c2 | refs/heads/master | 2023-08-19T03:38:43.761313 | 2023-08-10T17:49:00 | 2023-08-10T17:49:00 | 236,054,959 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,356 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 18:31:45 2020
@author: Kollarlab
"""
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import userfuncs
import plotting_tools as plots
def get_default_settings():
settings = {}
#Save location
settings['scanname'] = 'scanname'
settings['meas_type'] = 'Autler_Townes'
settings['project_dir'] = r'Z:\Data\defaultdir'
#Sweep parameters
settings['CAV_Attenuation'] = 30
settings['Qbit_Attenuation'] = 10
settings['Autler_Attenuation'] = 10
settings['ext_flux'] = 0
settings['autler_power'] = -20
settings['start_autler_freq'] = 3.5e9
settings['stop_autler_freq'] = 4.5e9
settings['autler_points'] = 31
#VNA settings
settings['channel'] = 1
settings['avg_time'] = 30
settings['measurement'] = 'S21'
settings['start_freq'] = 3.5e9
settings['stop_freq'] = 4.5e9
settings['freq_points'] = 501
settings['RFpower'] = -25
settings['RFport'] = 3
settings['Mport'] = 2
settings['CAVport'] = 1
settings['CAVpower'] = -55
settings['CAVfreq'] = 8.12555e9
settings['ifBW'] = 1e3
return settings
def vna_autler_townes(instruments, settings):
#Instruments used
vna = instruments['VNA']
autlergen = instruments['RFsource']
SRS = instruments['SRS']
vna.reset()
#Data saving and naming
saveDir = userfuncs.saveDir(settings['project_dir'], settings['meas_type'])
stamp = userfuncs.timestamp()
filename = settings['scanname'] + '_' + stamp
scanname = settings['scanname']
CAV_Attenuation = settings['CAV_Attenuation']
Qbit_Attenuation = settings['Qbit_Attenuation']
Autler_Attenuation = settings['Autler_Attenuation']
settings['CAVpower'] = settings['CAVpower'] + CAV_Attenuation
settings['RFpower'] = settings['RFpower'] + Qbit_Attenuation
settings['autler_power'] = settings['autler_power'] + Autler_Attenuation
autlergen.power = settings['autler_power']
autlergen.output = 'On'
SRS.output = 'On'
SRS.voltage_ramp(settings['ext_flux'])
start_autler_freq = settings['start_autler_freq']
stop_autler_freq = settings['stop_autler_freq']
autler_points = settings['autler_points']
autler_freqs = np.round(np.linspace(start_autler_freq, stop_autler_freq, autler_points),-3)
findices = np.array(list(range(len(autler_freqs))))
if settings['reverse']:
findices = np.flipud(findices)
if settings['random']:
np.random.shuffle(findices)
mags = np.zeros((len(autler_freqs), settings['freq_points']))
phases = np.zeros((len(autler_freqs), settings['freq_points']))
tstart = time.time()
for freqind in findices:
autler_freq = autler_freqs[freqind]
print('Freq: {}, final freq: {}'.format(autler_freq, autler_freqs[-1]))
autlergen.freq = autler_freq
data = vna.spec_meas(settings)
vna.autoscale()
mags[freqind] = data['mag']
phases[freqind] = data['phase']
if freqind==0:
tstop = time.time()
singlePointTime = tstop-tstart
estimatedTime = singlePointTime*len(autler_freqs)
print(' ')
print('estimated time for this scan : ' + str(np.round(estimatedTime/60, 1)) + ' minutes')
print('estimated time for this scan : ' + str(np.round(estimatedTime/60/60, 2)) + ' hours')
print(' ')
freqs = data['xaxis']
labels = ['Freq (GHz)', 'Autler freq (GHz)']
full_data = {}
single_data = {}
if not settings['random']:
if settings['reverse']:
full_data = {}
full_data['xaxis'] = freqs
full_data['mags'] = mags[freqind:]
full_data['phases'] = phases[freqind:]
single_data = data
yaxis = autler_freqs[freqind:]
else:
full_data = {}
full_data['xaxis'] = freqs
full_data['mags'] = mags[0:freqind+1]
full_data['phases'] = phases[0:freqind+1]
single_data = data
yaxis = autler_freqs[0:freqind+1]
plots.simplescan_plot(full_data, single_data, yaxis, filename, labels, identifier='', fig_num=1)
userfuncs.SaveFull(saveDir, filename, ['full_data', 'single_data', 'autler_freqs', 'labels', 'filename'], locals(), expsettings=settings)
plt.savefig(os.path.join(saveDir, filename+'.png'), dpi = 150)
t2 = time.time()
print('Elapsed time: {}'.format(t2-tstart))
if settings['random']:
full_data = {}
full_data['xaxis'] = freqs
full_data['mags'] = mags
full_data['phases'] = phases
single_data = data
yaxis = autler_freqs
plots.simplescan_plot(full_data, single_data, yaxis, filename, labels, identifier='', fig_num=1)
# SRS.voltage_ramp(0.)
# SRS.output = 'Off'
autlergen.output = 'Off'
userfuncs.SaveFull(saveDir, filename, ['full_data', 'single_data', 'autler_freqs', 'labels', 'filename'], locals(), expsettings=settings)
plt.savefig(os.path.join(saveDir, filename+'.png'), dpi = 150) | [
"maritter@umd.edu"
] | maritter@umd.edu |
72f9a8aa6ffa67f07d79e64e1d54d6e39df82cf3 | 682f374456fc241d90d11b098ffa5920bde1e04d | /imgs/test_geometry/test_extrapolated_intersection/ah_test_line_line_intr_later.py | f7c4c2b02a2202e909806bb3a38caf918fcd56a8 | [
"MIT"
] | permissive | kgashok/pygorithm | eab6582126aeb561da1880787a44dd58598bee63 | e9afd6805e5b3adc5f362bc5ff1bf01e6ff88dd7 | refs/heads/master | 2021-09-09T23:39:45.935231 | 2021-09-08T15:34:01 | 2021-09-08T15:34:01 | 290,754,365 | 2 | 0 | MIT | 2020-08-27T11:12:32 | 2020-08-27T11:12:30 | null | UTF-8 | Python | false | false | 1,410 | py | from utils import create_newfig, create_moving_line, create_still_segment, run_or_export
func_code = 'ah'
func_name = 'test_line_line_intr_later'
def setup_fig01():
fig, ax, renderer = create_newfig('{}01'.format(func_code))
create_moving_line(fig, ax, renderer, (5, 4), (6, 3), (-2, -2), 'topright')
create_still_segment(fig, ax, renderer, (3.5, 1.5), (3.5, 0), 'botleft', 'bot')
return fig, ax, '{}01_{}'.format(func_code, func_name)
def setup_fig02():
fig, ax, renderer = create_newfig('{}02'.format(func_code))
create_moving_line(fig, ax, renderer, (5, 4), (5, 3), (-2, -2), 'topright')
create_still_segment(fig, ax, renderer, (3, 3), (3, 0), 'left')
return fig, ax, '{}02_{}'.format(func_code, func_name)
def setup_fig03():
fig, ax, renderer = create_newfig('{}03'.format(func_code))
create_moving_line(fig, ax, renderer, (5, 4), (5, 3), (-2, 0), 'right')
create_still_segment(fig, ax, renderer, (1, 1), (3, 3.5), 'left')
return fig, ax, '{}03_{}'.format(func_code, func_name)
def setup_fig04():
fig, ax, renderer = create_newfig('{}04'.format(func_code))
create_moving_line(fig, ax, renderer, (0, 1), (1, 0), (1, 2), 'topright')
create_still_segment(fig, ax, renderer, (2, 1), (2, 4), 'right')
return fig, ax, '{}04_{}'.format(func_code, func_name)
run_or_export(setup_fig01, setup_fig02, setup_fig03, setup_fig04) | [
"mtimothy983@gmail.com"
] | mtimothy983@gmail.com |
3a907c358ef16c6b25e6cfc3950549464ea7c1b6 | 9965e8dffe094e5c82e0fb7656491f7a659bed28 | /hsvhist/extract_features_and_Indexing.py | f7bd280713054c9b115c952d26ade4334c2eb55a | [] | no_license | shukesu/XP | 1f59747376fb28e6403393fdfeb0c43bfb6c1cb1 | f58bc6ecae80212c6cc401c56855bcd6416fbc88 | refs/heads/master | 2020-03-06T18:31:15.349363 | 2018-04-24T14:24:01 | 2018-04-24T14:24:01 | 127,008,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | "CBIR(Content-Base Image Retrieval)--Extract Features and Indexing"
import color_descriptor
import argparse
import glob
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="Path to the directory that cntains the images to be indexed")
ap.add_argument("-i", "--index", required=True, help="Path to where the computed index will be stored")
args = vars(ap.parse_args())
cd = color_descriptor.ColorDescriptor((8,12,3))
#Open the output index file for writing
output = open(args["index"],"w")
# use glob to grab the image paths and loop over them
for imagePath in glob.glob(args["dataset"]+"/*.jpg"):
# extract the image ID from the image
imageID = imagePath[imagePath.rfind("/")+1:]
image = cv2.imread(imagePath)
# describe the image
features = cd.describe(image)
# write feature to file
features = [str(f) for f in features]
output.write("%s,%s\n" %(imageID,",".join(features)))
# close index file
output.close()
| [
"noreply@github.com"
] | shukesu.noreply@github.com |
72aa9c1bf64f5f4f9047020c38dd4282c7cf1b7c | 5cea0a771736a5018876591edb48d4815475364d | /Projects/migrations/0009_auto_20200818_1658.py | db238817c370a47afc65232c9b3101d0f6ad892b | [] | no_license | Jose-Velasco/cs-club-django | b6585446a0147471a261af29b58c63bfe473a3a2 | ff5562027762e46d40c04ca3ead53ccc2ae35291 | refs/heads/master | 2023-05-26T19:37:19.629594 | 2020-08-19T00:26:56 | 2020-08-19T00:26:56 | 195,765,212 | 0 | 1 | null | 2023-05-22T23:34:22 | 2019-07-08T08:05:03 | Python | UTF-8 | Python | false | false | 374 | py | # Generated by Django 2.2.4 on 2020-08-18 23:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Projects', '0008_auto_20200818_1655'),
]
operations = [
migrations.RenameField(
model_name='contactform',
old_name='hasBeenRead',
new_name='unread',
),
]
| [
"jose.juarez.velasco@gmail.com"
] | jose.juarez.velasco@gmail.com |
ff65e1e0c8f907b827c9cd8712f3c7cfe02e904c | c926ef1ad709ca98309186709a18898bf3701688 | /MIT_P10/server.py | 04e85801ebb61a7bd27857b2afc06ab5685a9522 | [
"MIT"
] | permissive | JialinC/xv6 | f877c8fe7f3b1a2b42ab9df0334abdfa89373ead | 5bd7294ec30d19c44c813b418c3ca1e97224628e | refs/heads/master | 2022-11-21T18:41:11.075118 | 2020-07-28T06:19:35 | 2020-07-28T06:19:35 | 268,158,669 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import socket
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addr = ('localhost', int(sys.argv[1]))
print >>sys.stderr, 'listening on %s port %s' % addr
sock.bind(addr)
while True:
buf, raddr = sock.recvfrom(4096)
print >>sys.stderr, buf
if buf:
sent = sock.sendto(buf, raddr)
| [
"jialin@system76-pc.localdomain"
] | jialin@system76-pc.localdomain |
0c2dc8e32a64509a557a00e987cfc8a78b918cf1 | 69295fa0064b1496f5770c851d5add989a5bb8bc | /bin/checked-fftc-accuracy | 6df47cfd09742ac9fca6e0525b5331a36d47de6b | [
"MIT"
] | permissive | huonw/sisfft-py | 3ebaf6b40b753caac4827abf790855f665258ba2 | 3cf048022c6641a9c5d60b6db01734fa414e6d7b | refs/heads/master | 2023-01-06T14:41:20.958019 | 2016-05-31T05:44:05 | 2016-05-31T05:44:05 | 60,028,580 | 5 | 1 | MIT | 2022-12-26T20:04:16 | 2016-05-30T17:46:58 | Python | UTF-8 | Python | false | false | 3,332 | #!/usr/bin/env python
from __future__ import print_function
import sisfft
import argparse
import numpy
np = numpy
import sys
DEFAULT_REPEAT = 10
DEFAULT_LENGTH = '(2**x for x in range(1, 14))'
DEFAULT_ALPHA = 1e3
DEFAULT_LIMIT = 1e-10
def rel(x, y):
return np.abs(np.expm1(x - y))
def ranges(v):
return v.max() - sisfft._utils.log_min_pos(v), sisfft._utils.log_dynamic_range(v)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--repeats', type=int, default=DEFAULT_REPEAT,
help = 'number of PMFs to test')
parser.add_argument('-n', '--lengths', default=DEFAULT_LENGTH,
help = 'expression generating the lengths to test with')
parser.add_argument('-a', '--alpha', default=DEFAULT_ALPHA, type=float,
help = 'the alpha to use')
parser.add_argument('-l', '--limit', type=float, action='append',
help = 'the limit to generate values to')
args = parser.parse_args()
lengths = list(eval(args.lengths))
alpha = args.alpha
limits = args.limit or [DEFAULT_LIMIT]
np.random.seed(1)
print('L,dynrange1,dynrange2,dynrange,R1,R2,R,bad,actual_bad')
for n in lengths:
for limit1 in limits:
for limit2 in limits:
count_bad = 0
count_actual_bad = 0
count_had_bad = 0
count_had_actual_bad = 0
for _ in range(args.repeats):
v1 = np.random.random(n) * np.log(limit1)
v1 -= sisfft._utils.log_sum(v1)
v2 = np.random.random(n) * np.log(limit2)
v2 -= sisfft._utils.log_sum(v2)
exact = sisfft._naive.convolve_naive(v1, v2)
conv, bad_places = sisfft._afftc.checked_fftc(v1, v2, alpha)
this_bad = 0
this_actual_bad = 0
for i in bad_places:
this_bad += 1
if rel(conv[i], exact[i]) > 1.0 / alpha:
this_actual_bad += 1
dynrange1, R1 = ranges(v1)
dynrange2, R2 = ranges(v2)
dynrange, R = ranges(exact)
count_bad += this_bad
count_actual_bad += this_actual_bad
count_had_bad += int(this_bad > 0)
count_had_actual_bad += int(this_actual_bad > 0)
print('%d,%f,%f,%f,%f,%f,%f,%d,%d' % (
n,
dynrange1, dynrange2, dynrange,
R1, R2, R,
this_bad, this_actual_bad))
def summary(x, y):
if y == 0:
pct = 100 if x == 0 else float('nan')
else:
pct = float(x) / y * 100
return '%d/%d (%.0f%%)' % (x, y, pct)
print('length %s, limit1 %e, limit2 %e, elementwise %s, whole vector %s' % (
n, limit1, limit2,
summary(count_actual_bad, count_bad),
summary(count_had_actual_bad, count_had_bad)),
file = sys.stderr)
if __name__ == '__main__':
main()
| [
"huonw@maths.usyd.edu.au"
] | huonw@maths.usyd.edu.au | |
5b31b66f750123829300588af08c4e00e0432925 | 22697cc13a40546ca67d4f30d106d88274804bbb | /cluster_for_weibo_data.py | ed2f36bdb43fdf243c943e6d133bdfd44ae46b5c | [] | no_license | vincenthuang1229/cluster_for_weibo_data | 7d92b18483114480441ea48a2730308d45eb8431 | 5c5376c5a69302e64f37cbf41e35e36e6e4f0818 | refs/heads/master | 2020-03-07T01:16:07.157090 | 2016-04-11T13:05:40 | 2016-04-11T13:05:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,208 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
from numpy import *
import math
import jieba
def loadWeiboData(fileName):
weiboData = []
i = 0
with open(fileName) as f:
for line in f:
#print line
i += 1
lineSplit = line.strip().split(',')
#print lineSplit
if len(lineSplit) == 15:
#if len(lineSplit) == 16:
data = []
data.append(i)
data.append(lineSplit[7].strip().decode('utf-8'))
#data.append(lineSplit[5].strip().decode('utf-8'))
weiboData.append(data)
if len(lineSplit) == 22:
#if len(lineSplit) == 16:
data = []
data.append(i)
data.append(lineSplit[10].strip().decode('utf-8'))
#data.append(lineSplit[5].strip().decode('utf-8'))
weiboData.append(data)
#if len(lineSplit) == 15:
if len(lineSplit) == 16:
data = []
data.append(i)
#data.append(lineSplit[7].strip().decode('utf-8'))
data.append(lineSplit[5].strip().decode('utf-8'))
weiboData.append(data)
return array(weiboData)
def getStopWords():
stopwords = []
for word in open("stopwords.txt", "r"):
stopwords.append(word.decode('utf-8').strip())
return stopwords
def cutContent(content, stopwords):
#print stopwords
cutWords = []
words = jieba.cut(content)
#print words
for word in words:
if word == u' ':
continue
if word not in stopwords:
cutWords.append(word)
#print unicode(word)
return cutWords
def getTfid(word, recordContent):
i = 0
for wordData in recordContent:
if wordData == word:
i = i+1
return i
dictData = {}
def getNi(word, documents):
#print 'getNi'
global dictData
if word in dictData.keys():
return dictData[word]
j = 0
n = documents.shape[0]
for i in range(n):
if word in documents[i][1]:
j = j+1
dictData[word] = j
return j
'''
w i=t f i( d) *lo g( N/ n i) ( 1)
其中t f i( d) 为特征项t i在文档d 中出现的频率, N
为所有文档数目, n i为含有项t i的文档数目。
'''
def VSMdocument(i, documents):
N = documents.shape[0]
recordContent = documents[i][1] #分词列表
#compute the term's weights
VSM = []
for word in set(recordContent):
termWeight = []
wi = 0
tfid = getTfid(word, recordContent)
n = getNi(word, documents)
wi = tfid*log(float(N)/n)
#print wi
termWeight.append(word)
termWeight.append(wi)
VSM.append(termWeight)
return array(VSM, dtype = object)
def simcos(vecA, vecB):
k = min(vecA.shape[0], vecB.shape[0])
numerator = 0
for i in range(k):
numerator += vecA[i][1]*vecB[i][1]
denoinatorA = 0
denoinatorB = 0
for i in range(k):
denoinatorA += math.pow(vecA[i][1], 2)
for i in range(k):
denoinatorB += math.pow(vecB[i][1], 2)
denoinator = sqrt(denoinatorA*denoinatorB)
return (numerator)/denoinator
def jaccardCoeff(vecA, vecB):
#print vecA
if len(vecA) == 0:
return 0.00000001
if len(vecB) == 0:
return 0.00000001
#print vecA[:,0]
setA = set(vecA[:,0])
setB = set(vecB[:,0])
#print set(vecA[:,0])
#print vecA[:,0]
#print vecA
#setA = set(vecA[:])
#setB = set(vecB[:])
unionset = setA | setB
interset = setA & setB
answer = (float)(len(interset))/len(unionset)
if answer < 0.0000000001:
return 0.00000001
return answer
def getMaxSimilarity(Vec, v):
similarity = []
for index,item in enumerate(v):
similarity.append(jaccardCoeff(Vec, item))
#print max(similarity)
return max(similarity)
dictTopic = {}
numTopic = 0
def single_pass(Vec, TC):
#find the old topic
if len(Vec) == 0: return
global dictTopic
global numTopic
allSimilarity = []
#实现只和话题中的第一个进行比较
#oneSimilarity = []
if numTopic == 0:
dictTopic[numTopic] = []
dictTopic[numTopic].append(Vec)
numTopic += 1
else:
maxValue = 0
maxIndex = -1
for k,v in dictTopic.iteritems():
oneSimilarity = getMaxSimilarity(Vec, v)#jaccardCoeff(Vec, v[0])
if oneSimilarity > maxValue:
maxValue = oneSimilarity
maxIndex = k
#allSimilarity.append(oneSimilarity)
#if the similarity is bigger than TC
#join the most similar topic
if maxValue > TC:
dictTopic[maxIndex].append(Vec)
#else create the new topic
else:
dictTopic[numTopic] = []
dictTopic[numTopic].append(Vec)
numTopic += 1
def loadDataSet(fileName):
dataMat = []
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split(',')
fltLine = map(float, curLine)
dataMat.append(fltLine)
#print dataMat
return array(dataMat)
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2)))
def randWeiboCent2(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k, n)))
for j in range(n):
minJ = min(dataSet[:,j])
rangeJ = float(max(dataSet[:,j]) - minJ)
centroids[:,j] = minJ + rangeJ * random.rand(k, 1)
return centroids
def randWeiboCent(dataSet, k):
n = shape(dataSet)[0]
#print n
kset = []
while(1):
if len(kset) >= k:
break
j = random.randint(0,n-1)
if j not in kset:
kset.append(j)
return kset
def takemin(ptsInClust, dataSet, distMeas=simcos):
setCent = list(ptsInClust)
n = len(setCent)
#print setCent
minIndex = -1
#print n
#if n == 0:
# return -1
for i in range(n):
distsum = 0
mindist = inf
for j in range(n):
distsum += (float)(1)/distMeas(dataSet[ptsInClust[i]], dataSet[ptsInClust[j]])
if distsum < mindist:
mindist = distsum
minIndex = i
#print minIndex
return setCent[minIndex]
def weibokMeans(dataSet, k, distMeas=simcos, createCent=randWeiboCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m, 2)))
centroids = createCent(dataSet, k) #list contains record ids
clusterChanged = True
sumcount = 0
while clusterChanged:
#sumcount += 1
#if sumcount > 5 :
# break
clusterChanged = False
for i in range(m):
minDist = inf
minIndex = -1
for j in range(k):
distJI = 1/distMeas(dataSet[centroids[j]], dataSet[i])
if distJI < minDist:
minDist = distJI
minIndex = centroids[j]
if clusterAssment[i,0] != minIndex:
clusterChanged = True
clusterAssment[i,:] = minIndex, minDist
print centroids
print '--------------------'
#delIndex = -1
for cent in range(k):
#ptsInClust = dataSet[nonzero(clusterAssment[:,0].A == cent)[0]]
ptsInClust = nonzero(clusterAssment[:,0].A == centroids[cent])
#print type(ptsInClust)
#print ptsInClust[0]
lenpts = len(list(ptsInClust[0]))
#print lenpts
if lenpts == 0:
clusterAssment[centroids[cent],:] = centroids[cent], 1
#print 'is 0'
continue
centroids[cent] = takemin(ptsInClust[0],dataSet)
'''
#if centroids[cent] == -1:
#delIndex = cent
if delIndex != -1:
k = k - 1
del centroids[delIndex]
'''
return centroids, clusterAssment
def weibokMeans2(dataSet, k, distMeas=simcos, createCent=randWeiboCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m, 2)))
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):
minDist = inf
minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:], dataSet[i,:])
if distJI < minDist:
minDist = distJI
minIndex = j
if clusterAssment[i,0] != minIndex:
clusterChanged = True
clusterAssment[i,:] = minIndex, minDist**2
#print centroids
for cent in range(k):
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A == cent)[0]]
centroids[cent,:] = mean(ptsInClust, axis=0)
return centroids, clusterAssment
if __name__ == '__main__':
#datMat = loadWeiboData('yibin2.csv')
#datMat = loadWeiboData('yibin.csv') #1
#datMat = loadWeiboData('yulebao.csv') #2
datMat = loadWeiboData('yibin_yulebao.csv') #4
#datMat = loadWeiboData('suiji.csv') #3
#datMat = loadWeiboData('suiji.csv')
#print type(datMat)
stopWords = getStopWords()
n = datMat.shape[0]
print 'total records:', n
cutWeiboData = []
for i in range(n):
#print datMat[i][1]
data = []
data.append(datMat[i][0])
data.append(cutContent(datMat[i][1], stopWords))
cutWeiboData.append(data)
cutWeiboData = array(cutWeiboData, dtype=object)
print 'cutWeiboData is done'
#print cutWeiboData[0]
#get VSM
recordVSMs = []
for i in range(n):
recordVSM = []
recordVSM = VSMdocument(i, cutWeiboData)
recordVSMs.append(recordVSM)
#print recordVSMs[0]
recordVSMs = array(recordVSMs)
print 'VSM is done'
print 'kMeans is starting..'
#print simcos(recordVSMs[0], recordVSMs[1])
#print jaccardCoeff(recordVSMs[0], recordVSMs[1])
#randK = randWeiboCent(recordVSMs, 3)
'''
centroids, clusterAssment = weibokMeans(recordVSMs, 3, jaccardCoeff)
#centroids, clusterAssment = weibokMeans(recordVSMs, 3)
#print centroids
#print '---------------'
#print clusterAssment
for i in centroids:
print datMat[i][1]
'''
for vec in recordVSMs:
single_pass(vec, 0.03)
print numTopic
#print dictTopic
for i in range(5):
for key in dictTopic[i][0]:
print key[0]
print '---------------------'
#print dictTopic
| [
"abc1871450@163.com"
] | abc1871450@163.com |
e1cf471b0d6b5a2b75230406c676ccd15fbfeace | bdf3364eb293abcb02aca9b1594e7181ecbc651f | /net-diffusion-adaptive.py | 65f31bc4a1a5e75f3b1f64b68175d16f2c8cf170 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | hsayama/PyCX | f5ee2ec1d02b1ec7529725fc7c175b431a45ef06 | f61a56fcb5b79734216daf08203f7c08c9662dfb | refs/heads/master | 2023-03-16T20:59:55.706196 | 2023-03-08T15:19:38 | 2023-03-08T15:19:38 | 228,787,396 | 210 | 77 | NOASSERTION | 2023-01-17T14:32:30 | 2019-12-18T07:54:03 | Python | UTF-8 | Python | false | false | 1,472 | py | import pycxsimulator
from pylab import *
import networkx as nx
def initialize():
global g, nextg
g = nx.karate_club_graph()
for i, j in g.edges:
g.edges[i, j]['weight'] = 0.5
g.pos = nx.spring_layout(g)
for i in g.nodes:
g.nodes[i]['state'] = 1 if g.nodes[i]['club'] == 'Mr. Hi' else 0
nextg = g.copy()
nextg.pos = g.pos
def observe():
global g, nextg
cla()
nx.draw(g, cmap = cm.Spectral, vmin = 0, vmax = 1,
node_color = [g.nodes[i]['state'] for i in g.nodes],
edge_cmap = cm.binary, edge_vmin = 0, edge_vmax = 1,
edge_color = [g.edges[i, j]['weight'] for i, j in g.edges],
pos = g.pos)
alpha = 1 # diffusion constant
beta = 3 # rate of adaptive edge weight change
gamma = 3 # pickiness of nodes
Dt = 0.01 # Delta t
def update():
global g, nextg
for i in g.nodes:
ci = g.nodes[i]['state']
nextg.nodes[i]['state'] = ci + alpha * ( \
sum([(g.nodes[j]['state'] - ci) * g.edges[i, j]['weight']
for j in g.neighbors(i)])) * Dt
for i, j in g.edges:
wij = g.edges[i, j]['weight']
nextg.edges[i, j]['weight'] = wij + beta * wij * (1 - wij) * ( \
1 - gamma * abs(g.nodes[i]['state'] - g.nodes[j]['state'])
) * Dt
nextg.pos = nx.spring_layout(nextg, pos = g.pos, iterations = 5)
g, nextg = nextg, g
pycxsimulator.GUI().start(func=[initialize, observe, update])
| [
"noreply@github.com"
] | hsayama.noreply@github.com |
204487240a2d41647a0c34c703682a3d83e4c3d9 | 0f9356229ece68edec039b0ec626338355ba6f55 | /lib/core/enums.py | 48fa08cc9db5f98d5b77cb6df99b3d8f5ce597a4 | [] | no_license | HeartSleep/Shadow-Border | b7ba06eab213e36fb2b91b0391a2b364f94e3efd | 0548a83ba1e94f26efd6fa8c596ff42cb5849a7e | refs/heads/master | 2022-04-08T13:03:39.570875 | 2020-02-09T05:31:41 | 2020-02-09T05:31:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# project = https://github.com/fanxs-t/Shadow-Border
class CUSTOM_LOGGING:
SYSINFO = 9
SUCCESS = 8
ERROR = 7
WARNING = 6
| [
"tan.weixiu@outlook.com"
] | tan.weixiu@outlook.com |
5241322fc5a9b59e05984a280cf07e11a2302750 | 819e52d85d0eb5ce7a7545b27371b7ea931e7884 | /fix_scene_shaders_gui.py | d195a142cbd458fceba4935b23306704e60cdac9 | [
"MIT"
] | permissive | AndreySibiryakov/tools | 772a92d3837b49079726cbf595f5d2bdd5d9f16c | 2a78f3ebfac78841eb69b2aa771a2faa10b8d827 | refs/heads/master | 2021-01-15T08:14:07.191192 | 2019-12-06T09:04:12 | 2019-12-06T09:04:12 | 99,560,696 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,225 | py | import re
import os
import maya.cmds as cmds
import maya.mel as mel
def get_meshes():
'''Gets all the transform meshes node names from a scene
Returns:
list: all meshes in scene
'''
objects = cmds.ls('*', type='mesh')
meshes = cmds.listRelatives(objects, parent=True)
meshes = list(set(meshes))
return meshes
def find_texture(texture):
'''Searches for the texture file by its name in a given drive.
Uses to substitute old broken paths by up to date ones.
Args:
texture (str): full path of the texture
Returns:
str: Up to date full texture path
'''
path = 't:/'
name = os.path.basename(texture)
for path, dirs, file_names in os.walk(path):
for file_name in file_names:
if file_name == name:
new_texture = os.path.join(path, file_name)
return new_texture
return texture
def fix_texture_files_path():
'''Substitutes the right image path for all texture file nodes in scene.
Returns:
None:
'''
texutre_files = cmds.ls(type='file')
for t_file in texutre_files:
texture = cmds.getAttr("%s.fileTextureName" % t_file)
found_texture_path = find_texture(texture)
if found_texture_path:
cmds.setAttr("%s.fileTextureName" % t_file, found_texture_path, type="string")
def get_texture_name(texture):
'''Gets image file name of a texture node.
Args:
texture (str): file node
Returns:
str: image file name
'''
try:
texture_path = cmds.getAttr("%s.fileTextureName" % texture)
return os.path.basename(texture_path).split('.')[0]
except Exception as e:
print '# %s\nError while executing function get_texture_name().' % e
return
def get_texture(f):
'''Gets 2D texture node from a given texture node.
Returns:
str: 2D texture node name
'''
return cmds.ls(cmds.listHistory(f), type='place2dTexture')
def rename_2d_texture(texture, name):
'''Renames 2D texture node from a given texture node.
Args:
texture (str): texture name
name (str): new name for 2D texture
Returns:
None:
'''
texture_2d = cmds.ls(cmds.listHistory(str(texture)), type='place2dTexture')
if texture_2d:
cmds.rename(texture_2d[0], name)
def rename_sg_nodes(template, sg='', texture='', count=''):
'''Renames nodes connected to a shading group or a texture according to template naming.
Texture nodes types (normal, diffuse, specular) are guessed according to prefixes.
They are standart: '_nm', '_refl' or '_bump'. Diffuse has no prefix.
Args:
template (str): name template to be given to nodes
sg (str, optional): shading group node name. sg or texture should be filled.
texture (str, optional): texture node name. sg or texture should be filled.
count (str, optional): prefix. Should be written with underscore '_2'
Returns:
None:
'''
if not sg and not texture:
return
if not sg and texture:
shaders = list(set(cmds.ls(cmds.listConnections(texture), materials=True)))
sgs = list(set(cmds.ls(cmds.listConnections(shaders), type='shadingEngine')))
else:
sgs = [sg]
shaders = list(set(cmds.ls(cmds.listConnections(sg), materials=True)))
# Protects from bad named normals
if shaders and sgs:
nodes = cmds.listHistory(sgs)
files = list(set(cmds.ls(nodes, type='file')))
tangent = cmds.ls(nodes, type='bump2d')
else:
return
# Pattern to specify type of materials in image file name.
refl_pr = ur'_refl\b'
nm_pr = ur'_nm\b'
bump_pr = ur'_bump\b'
# Prefixes, that are defined while renaming sg nodes.
sg_pr = '_SG'
sh_pr = '_M'
diffuse_pr = '_D'
specular_pr = '_S'
normal_pr = '_N'
tangent_pr = '_N_Tangent'
texture_pr = '_2D'
# Another protection from bad image files naming.
# Assuming, that diffuse, specular and normal can be used only once in material.
diffuse_used = False
specular_used = False
normal_used = False
for f in list(set(files)):
texture_name = get_texture_name(f)
if not texture_name:
continue
if re.search(refl_pr, texture_name) or re.search(bump_pr, texture_name) and not specular_used:
cmds.rename(f, template + specular_pr + count)
rename_2d_texture(template + specular_pr + count, template + specular_pr + texture_pr + count)
specular_used = True
elif re.search(nm_pr, texture_name) and not normal_used:
cmds.rename(f, template + normal_pr + count)
rename_2d_texture(template + normal_pr + count, template + normal_pr + texture_pr + count)
normal_used = True
elif not diffuse_used:
cmds.rename(f, template + diffuse_pr + count)
rename_2d_texture(template + diffuse_pr + count, template + diffuse_pr + texture_pr + count)
diffuse_used = True
else:
print '# Skipping file %s with texture name %s' % (f, texture_name)
if tangent:
try:
cmds.rename(tangent, template + tangent_pr + count)
except:
print '# Did not rename tangent node %s' % tangent
for shader in shaders:
try:
cmds.rename(shader, template + sh_pr + count)
except:
print '# Did not rename shader node %s' % shader
for sg in sgs:
try:
cmds.rename(sg, template + sg_pr + count)
except:
print '# Did not rename shading group node %s' % sg
def find_identical_meshes(regex, vtxs_check=True, vtxs=[]):
'''Searches for all polygon meshes in scene that matches regex expression
and optional - vertex count.
Preset vertex count fits two main types of head meshes, male, female,
and male neck cut.
Args:
regex (str): regular expression
vtxs_check (boolean, optional): condition, if to check the vertex count on
top of the regular expression
vtxs (list, optional): vertices count (int)
Returns:
list: polygon meshes that match search parameters
Examples:
>>> find_identical_meshes('(_head|head_)')
'''
# 2770 - is for cut Krest cut neck head
if not vtxs:
vtxs = [2782, 3335, 2770]
meshes = get_meshes()
found = []
[found.append(m) for m in meshes if re.search(regex, m)]
if not found:
return
# Meshes I'm searching for can by messy named,
# so the only way to find them is to compare by vertices quantity.
if not vtxs:
return found
meshes_filtered = [m for m in found for vtx in vtxs if cmds.polyEvaluate(m, v=True) == vtx]
if meshes_filtered:
return meshes_filtered
def get_mesh_diffuse_name(mesh):
'''Returns diffuse image name, that is connected to mesh material.
Diffuse is guessed with a help of prefixes of non-diffuse textures.
[ur'_refl\b', ur'_nm\b', ur'_bump\b'].
Args:
mesh (str):
Returns:
list: diffuse image names
'''
shape = cmds.listRelatives(mesh, c=True)[0]
sgs = list(set(cmds.ls(cmds.listConnections(shape), type='shadingEngine')))
nodes = cmds.listHistory(sgs)
textures = list(set(cmds.ls(nodes, type='file')))
diffuse_names = []
not_diffuse = [ur'_refl\b', ur'_nm\b', ur'_bump\b']
for texture in textures:
texture_file = cmds.getAttr("%s.fileTextureName" % texture)
texture_file_name = os.path.basename(texture_file).split('.')[0]
if [True for d in not_diffuse if re.search(d, texture_file_name)]:
continue
else:
diffuse_names.append(texture_file_name)
return list(set(diffuse_names))
def get_most_used_texture_and_heads():
'''Searches for all polygon meshes in scene that matches regex expression
and optional - vertex count.
Preset vertex count fits two main types of head meshes, male, female,
and male neck cut.
Args:
regex (str): regular expression
vtxs_check (boolean, optional): condition, if to check the vertex count on
top of the regular expression
vtxs (list, optional): vertices count (int)
Returns:
list: polygon meshes that match search parameters
Examples:
>>> find_identical_meshes('(_head|head_)')
'''
# texture:number of used times
head_match = {}
# texture:head using it
head_t_m_data = {}
# texture: sll the heads using it
head_used_data = {}
for head in find_identical_meshes('(_head|head_)'):
if not get_mesh_diffuse_name(head):
continue
head_t = get_mesh_diffuse_name(head)[0]
if head_t not in head_t_m_data.keys():
head_used_data[head_t] = [head]
else:
heads = head_used_data[head_t]
heads.append(head)
head_used_data[head_t] = heads
if head_t not in head_match.keys():
head_match[head_t] = 1
else:
head_match[head_t] += 1
head_t_m_data[head_t] = head
max_t = max(head_match.values())
most_texture = [h for h in head_match.keys() if head_match[h] == max_t][0]
head_of_most_texture = head_t_m_data[most_texture]
another_heads = head_used_data[most_texture]
return head_of_most_texture, another_heads
def get_shading_group(mesh):
'''Gets shading group connected to a given mesh
Args:
mesh (str):
Returns:
list:
'''
shape = cmds.listRelatives(mesh, c=True)[0]
return list(set(cmds.ls(cmds.listConnections(shape), type='shadingEngine')))
def split_mesh_on_parts(mesh):
'''Divides polygonal mesh on the shader assigned zones.
Args:
mesh (str):
Returns:
dict: shading group name as key, list of mesh faces of mesh shape as value
'''
sgs = get_shading_group(mesh)
if not sgs:
return
# Not sure about this. better leave for n
sgs = list(set(sgs))
mesh_data = {}
for sg in sgs:
cmds.hyperShade(objects=sg)
selection = cmds.ls(sl=True)
# filter selection to a given mesh only
selection = [s for s in selection if mesh in s]
mesh_data[sg] = selection
return mesh_data
def add_head_sg(sg, template, count=''):
'''Creates duplicate of a given shading group
with a template name
Args:
sg (str): shading group name
template (str): new name of a duplicated shading group
count (str, optional): vertices count (int)
Returns:
list: updated shader name
'''
# Check base on the only one shader name.
if not count or count <= 1:
shader = template + '_M'
else:
shader = template + '_M' + count
if not cmds.objExists(shader):
tmp_head_name = 'tmp_head_M'
cmds.duplicate(sg, rr=True, un=True, name=tmp_head_name)
rename_sg_nodes(template, sg=tmp_head_name, count=count)
return shader
def fix_sg_names():
'''Iterates through all texture file nodes in scene.
Renames all nodes connected to texture: shaders, shading
groups, 2d textures, files
'''
mel.eval('hyperShadePanelMenuCommand("hyperShadePanel1", "deleteUnusedNodes");')
# Disables because T drive is not set up everywhere
# fix_texture_files_path()
not_diffuse_prs = [ur'_refl\b', ur'_bump\b', ur'_nm\b']
texture_data = {}
counted_used = {}
# Gets {texture name:file texture name} to prevent from
# using the same texture twise, because function rename_sg_nodes()
# goes up to sg and then lists all textures connected to sg.
for texture in sorted(cmds.ls(type='file')):
texture_file = cmds.getAttr("%s.fileTextureName" % texture)
texture_file_name = os.path.basename(texture_file).split('.')[0]
texture_file_name = re.sub(r"[^a-zA-Z0-9_.:]", '_', texture_file_name)
texture_data[texture] = texture_file_name
for texture, texture_file_name in texture_data.iteritems():
if not cmds.objExists(texture):
continue
if [True for regex in not_diffuse_prs if re.search(regex, texture_file_name)]:
continue
# Decided to add counter like this: material, material_2, material_3.
if texture_file_name in counted_used.keys():
counter = counted_used[texture_file_name] + 1
counted_used[texture_file_name] = counter
counter_pr = '_' + str(counter)
else:
counted_used[texture_file_name] = 1
counter_pr = ''
rename_sg_nodes(texture_file_name, texture=texture, count=counter_pr)
def fix_face_names():
'''Renames shading group and connected nodes of head meshes that uses
the same image texture (diffuse). Template 'face' is used as name mask.
If there are other head meshes with different textures most common
texture on head is used. Updated materials applied to head meshes.
Old named shading group is deleted
'''
heads_identical = []
head, heads_identical = get_most_used_texture_and_heads()
# head_sg = get_shading_group(head)[0]
head_template_name = 'face'
if not heads_identical:
return
for head_identical in heads_identical:
head_parts = split_mesh_on_parts(head_identical)
counter = 1
for sg, part in head_parts.iteritems():
if counter == 1:
cmds.select(part)
cmds.hyperShade(assign=add_head_sg(sg, head_template_name, count=''))
else:
cmds.select(part)
cmds.hyperShade(assign=add_head_sg(sg, head_template_name, count='_' + str(counter)))
counter += 1
mel.eval('hyperShadePanelMenuCommand("hyperShadePanel1", "deleteUnusedNodes");')
def fix_materials(*args):
'''General function that organizes scene shading groups
in two passes. This one is the second one,
that renames shading groups - fix_sg_names().
'''
sg_fixed = False
try:
fix_sg_names()
print '# Fixed shading nodes names.'
sg_fixed = True
except Exception as e:
print '# %s. Error while fixing scene shading group names.' % e
'''This one is the second one, that renames shading groups
connected to head meshes - fix_face_names().
'''
if sg_fixed:
try:
fix_face_names()
print '# Fixed face material names.'
except Exception as e:
print '# %s. Error while fixing face names.' % e
else:
print '# Skipping face names fix.'
def enable_checkboxes(*args):
'''General function that organizes scene shading groups.
Enables all checkboxes in the menu - Show
'''
for model_p in ['modelPanel1', 'modelPanel2', 'modelPanel3', 'modelPanel4']:
try:
cmds.modelEditor(model_p,
allObjects=True,
grid=True,
hud=True,
sel=True,
manipulators=True,
edit=True)
except:
pass
# gui
cmds.window('Fix Materials', width=250)
cmds.columnLayout(adjustableColumn=True)
cmds.button(label='Fix',
command=fix_materials,
ann='Fix all materials in scene including heads')
cmds.button(label='Enable Show Checkboxes',
command=enable_checkboxes,
ann='Turns on all checkboxes in show menu')
cmds.showWindow()
| [
"noreply@github.com"
] | AndreySibiryakov.noreply@github.com |
bfde8700e2d3270e1f8eb1bb859935edf5db3bff | 9e3ffeffb98d881f4dee9fb81f2f058f48755fb3 | /unittest/testtask2.py | 8a1f1398578da32f11f98de8040d96511439a500 | [] | no_license | freesense/IFIAC | 32b642636d601fe0b136557be5eb8aa99af67a7e | d8ef65e4bd7b15aedaf9b6af36ecfed2d178a317 | refs/heads/master | 2020-04-26T16:34:00.528970 | 2019-03-04T06:05:45 | 2019-03-04T06:05:45 | 173,683,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import time
from APP.invokerSHARE import TaskBase
class CTest1(TaskBase):
pass
idx = 2
b, c = None, 0
def testtask2(*a, **kw):
time.sleep(idx*60+59*2)
global b, c
c += 1
n = time.localtime()
if c == 1:
b = time.localtime()
else:
print idx+1, n, b, '% 3d % 3d' % (c, (time.mktime(n)-time.mktime(b))/(c-1))
| [
"freesense@126.com"
] | freesense@126.com |
c930ca2104aa8288425358d69ecbd3581df54105 | 78ba9cf8127630a731489c77e32a45f94bc65871 | /library/ix-dev/community/chia/migrations/migrate | 8f739ff4c45710b06a6069e44a16fe4d3c71d18c | [
"BSD-3-Clause"
] | permissive | truenas/charts | ba97b64975985619cfefe27f1b53abbb6efa7d7b | 2735b346592f817fdc568687a6f339b109cdb482 | refs/heads/master | 2023-08-31T22:14:21.054670 | 2023-08-31T07:26:04 | 2023-08-31T07:26:04 | 295,212,900 | 156 | 252 | BSD-3-Clause | 2023-09-14T18:47:59 | 2020-09-13T18:31:04 | Smarty | UTF-8 | Python | false | false | 708 | #!/usr/bin/python3
import json
import os
import sys
def migrate(values):
storageKey = 'chiaStorage'
storages = ['data', 'plots']
for storage in storages:
check_val = values.get(storageKey, {}).get(storage, {})
if not isinstance(check_val, dict) or not check_val or check_val.get('type', 'hostPath') == 'hostPath':
continue
values[storageKey][storage] = {key: value for key, value in check_val.items() if key != 'hostPath'}
return values
if __name__ == '__main__':
if len(sys.argv) != 2:
exit(1)
if os.path.exists(sys.argv[1]):
with open(sys.argv[1], 'r') as f:
print(json.dumps(migrate(json.loads(f.read()))))
| [
"noreply@github.com"
] | truenas.noreply@github.com | |
a7cd21c1b64e44125460cfabf7a1036f6e792eff | 5761f160cdbd97b8c69601cd85bf41788b257ace | /Clases/TAREA_1/QUIZDanielAvila_hw1/integral.py | 15a32ba1e3ea388d16492f6169b0f02f2a5de178 | [] | no_license | daavera/METODOS | 3c63d7636ad880cad96cc6ef4f488c4e7663af58 | 775b47de08bf729e1388be46b61c5456f4524098 | refs/heads/master | 2020-12-02T05:24:41.789369 | 2017-07-30T18:00:34 | 2017-07-30T18:00:34 | 96,900,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | import numpy as np
import matplotlib.pyplot as plt
def f(x):
return np.sin(x)
x = np.linspace(0,np.pi,1000)
y = f(x)
n_random = 10000
def integral(x,y,n_random):
y_rand = (np.random.rand(n_random) * (np.max(y)-np.min(y))) + np.min(y)
x_rand = (np.random.rand(n_random) * (np.max(x)-np.min(x))) + np.min(x)
delta = f(x_rand) - y_rand
int_interval = (np.max(y)-np.min(y)) * (np.max(x)-np.min(x))
integral = int_interval * np.size(np.where(delta>0))/np.size(y_rand)*1.0
return integral
prom_int=[]
for i in range(20):
prom_int.append(integral(x,y,n_random))
prom_int = np.array(prom_int)
prom_int = np.sum(prom_int)/np.size(prom_int)
print('El valor de la integral es %f' %(prom_int))
| [
"da.avila@uniandes.edu.co"
] | da.avila@uniandes.edu.co |
3110be0789bcdd17dae6368e478eff526594e13b | 36cca47c3fa3a00f73fd4dd1ac375297f5c6d0a0 | /docs/conf.py | b7b7ac852b745ce03a6ba9fea10307b5dca628d3 | [
"MIT"
] | permissive | lugq1990/scikit-plot | 49f2adff7235ff5f7ab5d32053e9f3dc8aaa343a | c581bc590ad35e6bfe268a5073f60ec1e598c943 | refs/heads/master | 2022-02-14T03:22:57.896768 | 2019-07-08T09:55:24 | 2019-07-08T09:55:24 | 114,605,293 | 0 | 0 | MIT | 2018-12-04T07:01:26 | 2017-12-18T06:44:05 | Python | UTF-8 | Python | false | false | 4,881 | py | # -*- coding: utf-8 -*-
#
# Scikit-plot documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:56:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Scikit-plot'
copyright = u'2017, Reiichiro S. Nakano'
author = u'Reiichiro S. Nakano'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scikit-plotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Scikit-plot.tex', u'Scikit-plot Documentation',
u'Reiichiro S. Nakano', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scikit-plot', u'Scikit-plot Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Scikit-plot', u'Scikit-plot Documentation',
author, 'Scikit-plot', 'One line description of project.',
'Miscellaneous'),
]
| [
"reii_nakano@yahoo.com"
] | reii_nakano@yahoo.com |
c68ce18e4ce9accdd1f8f3483b8c02924877afc6 | 5f375bc4daecac1ea67036d7fb9f1491a172f885 | /bitbucket/issue_comment.py | 2005dcaeb209515f72618a67ca267a0e1accf754 | [
"ISC"
] | permissive | goodtune/BitBucket-api | 33aa58c265eb65a351b0c400cef960c218311eaa | 7bcad56a135a8dffac138a1fe12373ea6bd0e675 | refs/heads/master | 2023-04-06T14:41:02.943659 | 2013-07-16T09:22:44 | 2013-07-16T09:22:44 | 11,445,517 | 0 | 0 | ISC | 2023-04-04T00:18:44 | 2013-07-16T09:20:43 | Python | UTF-8 | Python | false | false | 3,609 | py | # -*- coding: utf-8 -*-
URLS = {
# Issue comments
'GET_COMMENTS': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/',
'GET_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/%(comment_id)s/',
'CREATE_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/',
'UPDATE_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/%(comment_id)s/',
'DELETE_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/%(comment_id)s/',
}
class IssueComment(object):
""" This class provide issue's comments related methods to Bitbucket objects."""
def __init__(self, issue):
self.issue = issue
self.bitbucket = self.issue.bitbucket
self.bitbucket.URLS.update(URLS)
self.issue_id = issue.issue_id
def all(self, issue_id=None, repo_slug=None):
""" Get issue comments from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_COMMENTS', username=self.bitbucket.username,
repo_slug=repo_slug, issue_id=issue_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, comment_id, issue_id=None, repo_slug=None):
""" Get an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_COMMENT', username=self.bitbucket.username,
repo_slug=repo_slug, issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, issue_id=None, repo_slug=None, **kwargs):
""" Add an issue comment to one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('CREATE_COMMENT', username=self.bitbucket.username,
repo_slug=repo_slug, issue_id=issue_id)
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs)
def update(self, comment_id, issue_id=None, repo_slug=None, **kwargs):
""" Update an issue comment in one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('UPDATE_COMMENT', username=self.bitbucket.username,
repo_slug=repo_slug, issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
def delete(self, comment_id, issue_id=None, repo_slug=None):
""" Delete an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_COMMENT', username=self.bitbucket.username,
repo_slug=repo_slug, issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
| [
"baptiste@smoothie-creative.com"
] | baptiste@smoothie-creative.com |
c0ca5a6f82956f92a7ef0182fded0c2af3d3ef8f | 392d918164b93f5fcee74b570928a28575f4d7e0 | /Fifa/Fifa/settings.py | fb0886bef1f295c9d4ce4e29cab08b79c6efcbe1 | [] | no_license | lenusca/FIFA | 1248bef5741ae35326dabed0a350bf4517d5ecde | f1d152663754ecd8c331d71a194215673bc75a5d | refs/heads/master | 2022-03-21T16:01:39.963732 | 2019-11-03T21:23:45 | 2019-11-03T21:23:45 | 213,355,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | """
Django settings for Fifa project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^*0uh23q^-o01jvf(4shji3se7y8c#tb0^y%&2wd(5zk3!94d3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app.apps.AppConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Fifa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'app/templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Fifa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"helenamncardoso@gmail.com"
] | helenamncardoso@gmail.com |
b7684794f27be04b3fce498506f4aa79417899d1 | 81c4f1f3a363e6b1cb77a1faf7f7eaf96c30bf67 | /vae/src/modules/distributions.py | 0bd3e37371dbeb3db7cd2af7ac281d4b5988d829 | [
"MIT"
] | permissive | TangleSpace/DeepGenerativeModels | 320f8475613f7057fa30fa0a3968b2e12685e7c2 | c6924e91de475be36253f9f20b687d1e1c8b0dde | refs/heads/master | 2022-03-28T07:06:11.777236 | 2020-01-14T22:46:20 | 2020-01-14T22:46:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,710 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from src.utils import args
#####################
### HELPERS ###
#####################
def reparameterize(z_mean, z_log_var):
epsilon = torch.randn_like(z_mean)
return z_mean + torch.exp(0.5*z_log_var)*epsilon
def logsumexp(x, dim=None):
"""
Args:
x: A pytorch tensor (any dimension will do)
dim: int or None, over which to perform the summation. `None`, the
default, performs over all axes.
Returns: The result of the log(sum(exp(...))) operation.
"""
if dim is None:
xmax = x.max()
xmax_ = x.max()
return xmax_ + torch.log(torch.exp(x - xmax).sum())
else:
xmax, _ = x.max(dim, keepdim=True)
xmax_, _ = x.max(dim)
return xmax_ + torch.log(torch.exp(x - xmax).sum(dim))
def mse_loss(x, x_mean, reduction='none'):
loss = F.mse_loss(x, x_mean, reduction='none')
return - loss.view(loss.shape[0], -1).sum(1)
###################################
### NORMAL DISTRIBUTIONS ###
###################################
def log_normal_diag(x, mean, log_var, reduction='sum', dim=None):
log_normal = -0.5 * ( log_var + torch.pow( x - mean, 2 ) / torch.exp( log_var ) )
if dim is None:
return getattr(torch, reduction)(log_normal)
else:
return getattr(torch, reduction)(log_normal, dim)
def log_normal_std(x, dim=None):
if dim is None:
return torch.sum(-0.5*torch.pow(x,2))
else:
return torch.sum(-0.5*torch.pow(x,2), dim)
###################################
### Discretized logistic ###
###################################
def discretized_logistic_loss(x, x_logit, reduction='none'):
""" Discretized logistic loss; or cat for categorical
Computes the cross entropy loss function while
summing over batch dimension.
"""
num_classes = 256
target = (x * (num_classes - 1)).long() # make integer class labels
x_logit = x_logit.view(x_logit.shape[0], num_classes, -1, x_logit.shape[-2], x_logit.shape[-1])
loss = F.cross_entropy(x_logit, target, reduction=reduction)
return -loss.view(loss.size(0), -1).sum(1)
def sample_from_discretized_logistic_loss(logits, nc, random_sample=False):
"""
type: 'max' or 'random'
"""
num_classes = 256
logits = logits.view(logits.shape[0], num_classes, -1, logits.shape[-2], logits.shape[-1])
if random_sample:
tmp = logits.max(dim=1)[1] # TODO put categorical distribution
else:
tmp = logits.max(dim=1)[1]
x_sample = tmp.float() / (num_classes - 1.)
return x_sample
########################################
### Mix of Discretized logistic ####
########################################
def discretized_mix_logistic_loss(x, output, nc=3, nmix=10):
"""
Discretized mix of logistic distributions loss for color images.
HACK to work for grey images also, no changes for color images.
Note that it is assumed that input is scaled to [-1, 1]
"""
# batch_size_size = x.shape[0]
nsampels = args.z_dim
bin_size = 1. / 255.
lower = 1. / 255. - 1.0
upper = 1.0 - 1. / 255.
eps = 1e-12
################################################################
logit_probs = output[:, :nmix]
batch_size, nmix, H, W = logit_probs.size()
# [BATCH, nmix, nc, H, W]
means = output[:, nmix:(nc + 1) * nmix].view(batch_size, nmix, nc, H, W)
logscales = output[:, (nc + 1) * nmix:(nc * 2 + 1) * nmix].view(batch_size, nmix, nc, H, W)
coeffs = output[:, (nc * 2 + 1) * nmix:(nc * 2 + 4) * nmix].view(batch_size, nmix, nc, H, W)
logscales = logscales.clamp(min=-7.)
logit_probs = F.log_softmax(logit_probs, dim=1)
coeffs = coeffs.tanh()
################################################################
x = x.unsqueeze(1)
means = means.view(batch_size, *means.size()[1:])
logscales = logscales.view(batch_size, *logscales.size()[1:])
coeffs = coeffs.view(batch_size, *coeffs.size()[1:])
logit_probs = logit_probs.view(batch_size, *logit_probs.size()[1:])
################################################################
if nc==3:
mean0 = means[:, :, 0]
mean1 = means[:, :, 1] + coeffs[:, :, 0] * x[:, :, 0]
mean2 = means[:, :, 2] + coeffs[:, :, 1] * x[:, :, 0] + coeffs[:, :, 2] * x[:, :, 1]
means = torch.stack([mean0, mean1, mean2], dim=2)
elif nc==1:
means = means[:, :, 0].unsqueeze(2) * coeffs[:, :, 0].unsqueeze(2)
centered_x = x - means
inv_stdv = torch.exp(-logscales)
# [batch_size, nmix, nc, H, W]
min_in = inv_stdv * (centered_x - bin_size)
plus_in = inv_stdv * (centered_x + bin_size)
x_in = inv_stdv * centered_x
# [batch_size, nsamples, nmix, nc, H, W]
cdf_min = torch.sigmoid(min_in)
cdf_plus = torch.sigmoid(plus_in)
# lower < x < upper
cdf_delta = cdf_plus - cdf_min
log_cdf_mid = torch.log(cdf_delta.clamp(min=eps))
log_cdf_approx = x_in - logscales - 2. * F.softplus(x_in) + np.log(2 * bin_size)
# x < lower
log_cdf_low = plus_in - F.softplus(plus_in)
# x > upper
log_cdf_up = -F.softplus(min_in)
mask_delta = cdf_delta.gt(1e-5).float()
log_cdf = log_cdf_mid * mask_delta + log_cdf_approx * (1.0 - mask_delta)
mask_lower = x.ge(lower).float()
mask_upper = x.le(upper).float()
log_cdf = log_cdf_low * (1.0 - mask_lower) + log_cdf * mask_lower
log_cdf = log_cdf_up * (1.0 - mask_upper) + log_cdf * mask_upper
loss = logsumexp(log_cdf.sum(dim=2) + logit_probs, dim=1)
return loss.view(loss.shape[0], -1).sum(1)
def sample_from_discretized_mix_logistic(x_mean, nc=3, nmix=10, random_sample=True):
"""
Args:
means: [batch_size, nmix, nc, H, W]
logscales: [batch_size, nmix, nc, H, W]
coeffs: [batch_size, nmix, nc, H, W]
logit_probs:, [batch_size, nmix, H, W]
random_sample: boolean
Returns:
samples [batch_size, nc, H, W]
"""
logit_probs = x_mean[:, :nmix]
batch_size, nmix, H, W = logit_probs.size()
# [BATCH, nmix, nc, H, W]
means = x_mean[:, nmix:(nc + 1) * nmix].view(batch_size, nmix, nc, H, W)
logscales = x_mean[:, (nc + 1) * nmix:(nc * 2 + 1) * nmix].view(batch_size, nmix, nc, H, W)
coeffs = x_mean[:, (nc * 2 + 1) * nmix:(nc * 2 + 4) * nmix].view(batch_size, nmix, nc, H, W)
logscales = logscales.clamp(min=-7.)
logit_probs = F.log_softmax(logit_probs, dim=1)
coeffs = coeffs.tanh()
# [batch_size, 1, H, W] -> [batch_size, nc, H, W]
index = logit_probs.argmax(dim=1, keepdim=True) + logit_probs.new_zeros(means.size(0), *means.size()[2:]).long()
# [batch_size, nc, H, W] -> [batch_size, 1, nc, H, W]
index = index.unsqueeze(1)
one_hot = means.new_zeros(means.size()).scatter_(1, index, 1)
# [batch_size, nc, H, W]
means = (means * one_hot).sum(dim=1)
logscales = (logscales * one_hot).sum(dim=1)
coeffs = (coeffs * one_hot).sum(dim=1)
x = means
if random_sample:
u = means.new_zeros(means.size()).uniform_(1e-5, 1 - 1e-5)
x = x + logscales.exp() * (torch.log(u) - torch.log(1.0 - u))
# [batch_size, H, W]
if nc==3:
x0 = x[:, 0].clamp(min=-1., max=1.)
x1 = (x[:, 1] + coeffs[:, 0] * x0).clamp(min=-1., max=1.)
x2 = (x[:, 2] + coeffs[:, 1] * x0 + coeffs[:, 2] * x1).clamp(min=-1., max=1.)
x = torch.stack([x0, x1, x2], dim=1)
elif nc==1:
x = (x * coeffs).clamp(min=-1., max=1.)
return x
if __name__ == "__main__":
pass
| [
"johngatop@gmail.com"
] | johngatop@gmail.com |
1b70f2c79a348180971c5ae664a3ee3a8482424a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03378/s251701721.py | 49de078c7467f51e9581f9eab691c6a075c1561c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | n,m,s = [int(x) for x in input().split()]
a = [int(x) for x in input().split()]
low = 0
high = 0
for i in range(m):
if a[i] < s:
low += 1
else:
high += 1
print(min(low,high)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
333c75b551e4d62e7e80906e1b5ab7e2af0653cc | bd28f8a8dbcf7f2b4be3bcc0c0e656009191d379 | /predict_nn/ranlp/rsr_dev/mi/ian.py | 58b47a880118a587446b42c4ca6f575d9f0355ea | [
"MIT"
] | permissive | nicolay-r/attitudes-extraction-ds | e2e5f9218408514ca1f3eff5edf88771e2f368ee | 49a82843e6adbca35321aaaa08d05532e953a0fc | refs/heads/master | 2022-08-30T04:51:14.133899 | 2020-05-28T11:06:01 | 2020-05-28T11:06:01 | 197,908,649 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | #!/usr/bin/python
import sys
sys.path.append('../../../../')
from predict_nn.ranlp.rsr_dev.config import TEST_ON_EPOCHS, MI_CONTEXTS_PER_OPINION
from networks.ranlp.io_rsr_dev import RaNLPConfTaskRuSentRelWithDevIO
from networks.mimlre.base import MIMLRE
from networks.context.architectures.ian import IAN
from networks.context.configurations.ian import IANConfig
from predict_nn.ranlp.mi_names import ModelNames
from networks.ranlp.model_mimlre import RaNLPConfTaskMIMLREModel
from networks.mimlre.configuration.base import MIMLRESettings
import predict_nn.ranlp.utils as utils
def modify_settings(settings):
assert(isinstance(settings, MIMLRESettings))
settings.modify_contexts_per_opinion(MI_CONTEXTS_PER_OPINION)
if __name__ == "__main__":
utils.run_cv_testing(model_name=ModelNames.MI_IAN,
create_network=lambda: MIMLRE(context_network=IAN()),
create_config=lambda: MIMLRESettings(context_settings=IANConfig()),
create_io=RaNLPConfTaskRuSentRelWithDevIO,
create_model=RaNLPConfTaskMIMLREModel,
modify_settings_callback=modify_settings,
test_on_epochs=TEST_ON_EPOCHS)
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
07eb17285033c1e32f63c62d1bc2852e4964c4f9 | 1ec6cc75b884c31205f2edb4495d667742c01095 | /parse_workua.py | e9c89b9200e9ed8d3aea13a795a2a71b2c7ae4fe | [
"MIT"
] | permissive | miha-pavel/hillel_scr | bc4ececf5fdeefaa93adb693a33d75c2720fa9c3 | c176535d75c1ed2477394321d6552a790a6bb9cc | refs/heads/master | 2022-12-09T04:32:36.864579 | 2020-04-05T12:41:53 | 2020-04-05T12:41:53 | 251,305,308 | 0 | 0 | MIT | 2022-12-08T03:56:21 | 2020-03-30T12:58:47 | Python | UTF-8 | Python | false | false | 5,201 | py | import time
import json
import requests
import sqlite3
from user_agent import generate_user_agent
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from utils import random_sleep, save_info
conn = sqlite3.connect('workua_data.sqlite')
cursor = conn.cursor()
try:
cursor.execute('''CREATE TABLE workua_data (
title text,
salary text,
company text,
location text,
condition text,
phone text,
description longtext)''')
except:
pass
# global variables
HOST = 'https://www.work.ua'
ROOT_PATH = '/ru/jobs/'
def main():
page = 1
result = []
result_list = []
while True:
page += 1
payload = {
'ss': 1,
'page': page,
}
user_agent = generate_user_agent()
headers = {
'User-Agent': user_agent,
}
response = requests.get(HOST + ROOT_PATH, params=payload, headers=headers)
response.raise_for_status()
random_sleep()
if response.status_code != 200:
print('something wrong!')
break
html = response.text
soup = BeautifulSoup(html, 'html.parser')
class_ = 'card card-hover card-visited wordwrap job-link'
cards = soup.find_all('div', class_=class_)
if not cards:
cards = soup.find_all('div', class_=class_ + ' js-hot-block')
if not cards:
break
for card in cards:
tag_a = card.find('h2').find('a')
title = tag_a.text
href = tag_a['href']
# get vacancy full info
vacancy_url = HOST + href
response = requests.get(vacancy_url, headers=headers)
print('vacancy_url: ', vacancy_url)
response.raise_for_status()
html = response.text
soup = BeautifulSoup(html, 'html.parser')
# Salary
salary_data = ''
try:
salary_block = soup.findAll("span", {"class": "glyphicon-hryvnia"})[0].parent
salary_data = salary_block.findAll("b", {"class": "text-black"})[0].string
except:
salary_data = None
#Company
company_data = ''
try:
company_block = soup.findAll("span", {"class": "glyphicon-company"})[0].parent
company_data = company_block.findAll("b")[0].string
except:
company_data = None
#Location
location_data = ''
try:
location_block = soup.findAll("span", {"class": "glyphicon-map-marker"})[0].parent
location_data = location_block.contents[2].strip()
except:
location_data = None
#Condition
condition_data = ''
try:
condition_block = soup.findAll("span", {"class": "glyphicon-tick"})[0].parent
condition_data = " ".join(condition_block.contents[2].split())
except:
condition_data = None
#Phone
phone_data = ''
contact_phone = soup.find(id="contact-phone")
if contact_phone:
webdriver_options = Options()
driver = webdriver.Chrome(options=webdriver_options)
driver.get(vacancy_url)
WebDriverWait(driver, 30).until(ec.visibility_of_element_located((By.ID, 'contact-phone')))
driver.find_element_by_class_name('js-get-phone').click()
opened_phone = driver.find_element_by_id('contact-phone')
try:
phone_data = opened_phone.find_element_by_tag_name('a').text
except:
phone_data = opened_phone.text
driver.close()
#Description
description_data = ''
try:
description_block = soup.find(id="job-description")
description_data = " ".join(description_block.get_text().split())
except:
description_data = None
# Load data
result.append({
'title': title,
'salary': salary_data,
'company': company_data,
'location': location_data,
'condition': condition_data,
'phone': phone_data,
'description': description_data,
})
result_list.append((title, salary_data, company_data, location_data, condition_data, phone_data, description_data))
cursor.executemany(f'INSERT INTO workua_data (title, salary, company, location, condition, phone, description) VALUES (?, ?, ?, ?, ?, ?, ?)', result_list)
conn.commit()
cursor.close()
conn.close()
with open('workua_data.txt', 'w') as outfile:
json.dump(result, outfile, ensure_ascii=False)
if __name__ == "__main__":
main() | [
"pavlom@oneplanetops.com"
] | pavlom@oneplanetops.com |
e5eaf4d0bc93e33a655e87b1d3d8dc906d2e7973 | 23414270f524b36972140bd9044300ada3a28136 | /消息认证算法实现/消息认证---CCM/CCM_ui.py | 31c27165f6d826985e072771e3fe2b4295f54124 | [] | no_license | Jing0607101510/CryptoAlgorithms | 421f463f5dc3e4701e8d1a5c7fbea6f772e92367 | a0a78b37b1fd07db75ea7e5ef88c2c9cfee95ced | refs/heads/master | 2021-10-09T06:57:25.105848 | 2018-12-23T06:46:20 | 2018-12-23T06:46:20 | 162,868,862 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,129 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'CCM.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1121, 800)
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(20, 20, 72, 15))
self.label.setObjectName("label")
self.key_line = QtWidgets.QLineEdit(Form)
self.key_line.setGeometry(QtCore.QRect(70, 10, 461, 31))
self.key_line.setObjectName("key_line")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(20, 200, 72, 15))
self.label_2.setObjectName("label_2")
self.textEdit_1 = QtWidgets.QTextEdit(Form)
self.textEdit_1.setGeometry(QtCore.QRect(70, 200, 531, 201))
self.textEdit_1.setObjectName("textEdit_1")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(10, 420, 131, 16))
self.label_3.setObjectName("label_3")
self.textBrowser_1 = QtWidgets.QTextBrowser(Form)
self.textBrowser_1.setGeometry(QtCore.QRect(60, 450, 531, 91))
self.textBrowser_1.setObjectName("textBrowser_1")
self.encry = QtWidgets.QPushButton(Form)
self.encry.setGeometry(QtCore.QRect(70, 760, 93, 28))
self.encry.setObjectName("encry")
self.clear1 = QtWidgets.QPushButton(Form)
self.clear1.setGeometry(QtCore.QRect(180, 760, 93, 28))
self.clear1.setObjectName("clear1")
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(550, 50, 93, 28))
self.pushButton.setObjectName("pushButton")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(20, 60, 72, 15))
self.label_4.setObjectName("label_4")
self.crt_line = QtWidgets.QLineEdit(Form)
self.crt_line.setGeometry(QtCore.QRect(70, 50, 461, 31))
self.crt_line.setObjectName("crt_line")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(10, 110, 72, 15))
self.label_5.setObjectName("label_5")
self.temp_line = QtWidgets.QLineEdit(Form)
self.temp_line.setGeometry(QtCore.QRect(70, 100, 461, 31))
self.temp_line.setObjectName("temp_line")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(10, 160, 72, 15))
self.label_6.setObjectName("label_6")
self.relate_line = QtWidgets.QLineEdit(Form)
self.relate_line.setGeometry(QtCore.QRect(80, 150, 451, 31))
self.relate_line.setObjectName("relate_line")
self.textBrowser_2 = QtWidgets.QTextBrowser(Form)
self.textBrowser_2.setGeometry(QtCore.QRect(60, 580, 531, 161))
self.textBrowser_2.setObjectName("textBrowser_2")
self.label_7 = QtWidgets.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(10, 550, 171, 16))
self.label_7.setObjectName("label_7")
self.line = QtWidgets.QFrame(Form)
self.line.setGeometry(QtCore.QRect(0, 130, 661, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(Form)
self.line_2.setGeometry(QtCore.QRect(650, 0, 16, 801))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_8 = QtWidgets.QLabel(Form)
self.label_8.setGeometry(QtCore.QRect(670, 10, 191, 16))
self.label_8.setObjectName("label_8")
self.textEdit_2 = QtWidgets.QTextEdit(Form)
self.textEdit_2.setGeometry(QtCore.QRect(690, 40, 421, 211))
self.textEdit_2.setObjectName("textEdit_2")
self.label_9 = QtWidgets.QLabel(Form)
self.label_9.setGeometry(QtCore.QRect(670, 270, 151, 16))
self.label_9.setObjectName("label_9")
self.textBrowser_3 = QtWidgets.QTextBrowser(Form)
self.textBrowser_3.setGeometry(QtCore.QRect(680, 300, 431, 61))
self.textBrowser_3.setObjectName("textBrowser_3")
self.label_10 = QtWidgets.QLabel(Form)
self.label_10.setGeometry(QtCore.QRect(670, 380, 301, 16))
self.label_10.setObjectName("label_10")
self.textBrowser_4 = QtWidgets.QTextBrowser(Form)
self.textBrowser_4.setGeometry(QtCore.QRect(680, 400, 431, 61))
self.textBrowser_4.setObjectName("textBrowser_4")
self.label_11 = QtWidgets.QLabel(Form)
self.label_11.setGeometry(QtCore.QRect(670, 660, 151, 16))
self.label_11.setObjectName("label_11")
self.textBrowser_6 = QtWidgets.QTextBrowser(Form)
self.textBrowser_6.setGeometry(QtCore.QRect(680, 680, 431, 31))
self.textBrowser_6.setObjectName("textBrowser_6")
self.decry = QtWidgets.QPushButton(Form)
self.decry.setGeometry(QtCore.QRect(700, 740, 93, 28))
self.decry.setObjectName("decry")
self.clear2 = QtWidgets.QPushButton(Form)
self.clear2.setGeometry(QtCore.QRect(830, 740, 93, 28))
self.clear2.setObjectName("clear2")
self.label_12 = QtWidgets.QLabel(Form)
self.label_12.setGeometry(QtCore.QRect(670, 480, 91, 16))
self.label_12.setObjectName("label_12")
self.textBrowser_5 = QtWidgets.QTextBrowser(Form)
self.textBrowser_5.setGeometry(QtCore.QRect(680, 500, 431, 151))
self.textBrowser_5.setObjectName("textBrowser_5")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "密钥:"))
self.label_2.setText(_translate("Form", "消息:"))
self.label_3.setText(_translate("Form", "CCM消息认证码:"))
self.encry.setText(_translate("Form", "加密"))
self.clear1.setText(_translate("Form", "清除"))
self.pushButton.setText(_translate("Form", "确定"))
self.label_4.setText(_translate("Form", "CTR0:"))
self.label_5.setText(_translate("Form", "临时量:"))
self.label_6.setText(_translate("Form", "相关数据:"))
self.label_7.setText(_translate("Form", "包括消息认证码的密文:"))
self.label_8.setText(_translate("Form", "需要认证的消息(密文):"))
self.label_9.setText(_translate("Form", "提取的消息认证码:"))
self.label_10.setText(_translate("Form", "基于解密后的明文重新计算的消息认证码:"))
self.label_11.setText(_translate("Form", "消息认证结果:"))
self.decry.setText(_translate("Form", "解密"))
self.clear2.setText(_translate("Form", "清除"))
self.label_12.setText(_translate("Form", "消息明文:"))
| [
"1293521172@qq.com"
] | 1293521172@qq.com |
1746a184bbc6c7150a67150cd25059745e6018e1 | 89d7897bbae240871971538682b75b0a53efe4c7 | /data_process/view_data.py | 90d6145ad14399c847c05f186d3c0469600747b5 | [] | no_license | ahashisyuu/CAIL_SCM2019 | 5fc098ec4701276fa0272d649d474bbcb066bd64 | 092788e7c05d7738a6c8f8acb9d89594da634a01 | refs/heads/master | 2020-05-31T07:18:38.794183 | 2019-06-19T12:52:35 | 2019-06-19T12:52:35 | 190,162,484 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import json
def main():
data_file = "../data/input.txt"
with open(data_file, encoding='utf-8') as fr:
count = 0
for line in fr:
data_line = json.loads(line)
print(data_line["A"].split('\n')[2])
# print('\n\n')
print(data_line["B"].split('\n')[2])
print(data_line["C"].split('\n')[2])
count += 1
print("\n")
if count > 5:
break
if __name__ == "__main__":
main()
| [
"1347324360@qq.com"
] | 1347324360@qq.com |
17c6671c419405c07d4edb4081bf917e3399a36e | 8aac5a3085d4a7fb1c61bb5ef80984e5bdd2bbef | /course_2_assessment_8/ac18_7_3.py | c362a5a149c0dc4a997de778795d715c6b65747d | [] | no_license | realyuyangyang/Python3Michigan | a932d3d237526f84fdbcdcc7b92412086f9c807f | 088244e7cc0e5d0270ab98fb7b5b10197d9f2d98 | refs/heads/master | 2023-08-29T22:47:10.947155 | 2021-11-12T13:22:08 | 2021-11-12T13:22:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # The dictionary, medals, shows the medal count for six countries during the Rio Olympics. Sort the country names so they
# appear alphabetically. Save this list to the variable alphabetical.
medals = {'Japan': 41, 'Russia': 56, 'South Korea': 21,
'United States': 121, 'Germany': 42, 'China': 70}
alphabetical = sorted(medals.keys())
print(alphabetical)
| [
"i@danchamorro.com"
] | i@danchamorro.com |
37fcce29634843a7c5c79899d2c6871a27f98257 | 3fb718b33d486d638402e5f5bb4eb028332bd54e | /Objects and Classes/Zoo.py | c657af3653914ff55c24c427eacb63f1fabf3133 | [] | no_license | lion963/SoftUni-Python-Fundamentals- | 1c0aced0d770d0f5d0a4977543e945576425aff1 | 25fca7f88513d9e9b9ceb2741d9cb3b3c067b97b | refs/heads/master | 2023-01-24T16:21:46.517847 | 2020-12-14T13:50:06 | 2020-12-14T13:50:06 | 297,916,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | class Zoo:
__animals = 0
def __init__(self, name):
self.name = name
self.mammals = []
self.fishes = []
self.birds = []
def add_animal(self, species, name):
if species == 'mammal':
self.mammals.append(name)
elif species == 'fish':
self.fishes.append(name)
elif species == 'bird':
self.birds.append(name)
self.__animals += 1
def get_info(self, species):
if species == 'mammal':
species_names = self.mammals
elif species == 'fish':
species_names = self.fishes
elif species == 'bird':
species_names = self.birds
names = ', '.join(species_names)
if species == 'mammal':
return f'Mammals in {zoo.name}: {names}'
elif species == 'fish':
return f'Fishes in {zoo.name}: {names}'
elif species == 'bird':
return f'Birds in {zoo.name}: {names}'
def get_total(self):
return f'Total animals: {self.__animals}'
zoo_name = input()
zoo = Zoo(zoo_name)
n = int(input())
for _ in range(n):
species, name = input().split(' ')
zoo.add_animal(species, name)
species = input()
print(zoo.get_info(species))
print(zoo.get_total())
| [
"lion963@mail.bg"
] | lion963@mail.bg |
2bf9c0a0d2170b1cbd9e4cc23c65b29aced7fdfe | b3c41a006ee3863737c45a6928c630a886d59d01 | /mongoProj.py | 336649d1f2afb9b531364d8d2f93b1f32943d8af | [] | no_license | lawlietl4/mongoFileIO | 1852d0ac0861c9222ad9f5b8ff1cc9abbcc526b0 | f3dae3bb179d1a91dfc77fe1851eb00019cd9f8f | refs/heads/main | 2022-12-31T00:13:37.180367 | 2020-10-20T02:21:31 | 2020-10-20T02:21:31 | 305,211,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | import pymongo
import os
import json
client = pymongo.MongoClient("mongodb://localhost:27017/")
dblist = client.list_database_names()
mydb = client['test']
colle = mydb['clients']
new_dict = {}
collectionList = mydb.list_collection_names()
my_dict = {"name": "test"}
change = {"$set":{"name": "delete_me"}}
changed = {"name": "delete_me"}
class Employee():
def __init__(self, employee_id, firstname, lastname, year):
self.firstname = firstname.title()
self.lastname = lastname.title()
self.year = year
self.employee_id = employee_id
def __str__(self):
string = f"{self.firstname}, {self.lastname} hired in: {self.year} with id number: {self.employee_id}"
return string
def mongoimport():
e = Employee(0,"","",1200)
index = 1
for file in os.listdir(os.curdir+"\\Assignment 1 - data\\simple"):
if index != 10001:
with open(os.path.curdir+"\\Assignment 1 - data\\simple\\"+file, 'r') as f:
n = f.readlines()
for obj in n:
elements = obj.split(', ')
# print(elements)
employee_id = elements[0]
firstname = elements[1]
lastname = elements[2]
year = elements[3]
year = year.strip('\n')
e = Employee(employee_id,firstname,lastname,year)
print(e)
json.JSONEncoder()
colle.insert_one(e)
index += 1
elif index == 10001:
break
# print(client.list_database_names())
# if "test" in dblist:
# print("database exists")
# if "restaurants" in collectionList:
# print("collection exists")
# colle.insert_one(my_dict)
# for x in colle.find(my_dict):
# print(x)
# colle.update_one(my_dict, change)
# for x in colle.find(changed):
# print(x)
# x = colle.delete_many({},{})
# print(x.deleted_count, " documents deleted.")
mongoimport() | [
"noreply@github.com"
] | lawlietl4.noreply@github.com |
8fa7eeaa84cd832efa51784ee7486400ba367077 | d6f6524d2af71da9a68e1cac189c85347441c87e | /recipes/urls.py | fe15116301f3d809410b7fd769bb63fd8c73803c | [] | no_license | powellc/django-recipes | c490e535a1cbe53580fcf2c5c648a4d6a40528db | 6247a41495059207f0db5a826ee54e0113e8bd48 | refs/heads/master | 2021-01-02T09:14:22.681804 | 2010-11-03T02:00:08 | 2010-11-03T02:00:08 | 732,399 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | from django.conf import settings
from django.conf.urls.defaults import *
from recipes import views
from recipes.models import Recipe
from tagging.views import tagged_object_list
def approved_recipes(request, tag):
queryset = Recipe.approved_objects.all()
return tagged_object_list(request, queryset, tag, paginate_by=10,
allow_empty=True, template_object_name='recipes')
# custom views vendors
urlpatterns = patterns('recipes.views',
url(r'^$', view=views.recipe_index, name="recipes-index"),
url(r'^add/$', view=views.recipe_create, name="recipes-create"),
url(r'^submitted/$', view=views.recipe_submitted, name="recipes-submitted"),
url(r'^approve/$', view=views.recipe_approve, name="recipes-approve"),
url(r'^(?P<slug>[-\w]+)/$', view=views.recipe_detail, name="recipes-detail"),
url(r'^(?P<slug>[-\w]+)/delete/$', view=views.recipe_confirm_delete, name="recipes-confirm-delete"),
url(r'^(?P<filter>[-\w]+)/$', view=views.recipe_index, name="recipes-index"),
#url(r'^products/$', view=views.vendor_tags, name="vendor_tag_list"),
#url(r'^product/(?P<tag>[-_A-Za-z0-9]+)/$', view=views.vendors_with_tag, name="vendors_with_tag"),
#url(r'^product/(?P<tag>[-_A-Za-z0-9]+)/page/(?P<page>d+)/$', view=views.vendors_with_tag, name="vendors_with_tags_pages" ),
url(r'^tags/$', view=views.recipe_tag_index, name="recipes-tag-index"),
url(r'^tags/(?P<tag>[^/]+)/$', approved_recipes, name='recipes-tag-detail'),
)
| [
"colin.powell@me.com"
] | colin.powell@me.com |
bcd72bbb8a871128ee180ec96e58f3129974ed75 | 47d2dfe5f95cda06fe580480309e9620d15c7e7a | /pythonBasic/e_file_class/Ex01_readFile.py | 3c39f3b75846ca0187d517b3f94e1d8492ae6270 | [] | no_license | xorms8/pythonPractice | 1318311b691f90fad6fd88700d51e75515b96211 | 2dd1b096630d8341cb4b3184332377e553c55ca3 | refs/heads/main | 2023-07-17T00:11:37.071953 | 2021-08-29T06:43:38 | 2021-08-29T06:43:38 | 374,634,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | """
@ 파일 읽고 쓰기
- 파일을 읽고 쓰기 전에 파일을 열어야 한다
- fileObj = open ( filename, mode )
mode 첫번째 글자 - 작업 표시
r(read) : 파일 읽기
w(write) : 파일 쓰기 ( 파일이 없으면 생성하고 파일이 있으면 덮어쓴다 )
x(write) : 파일 쓰기 ( 파일이 없을 때만 생성하고 쓴다 )
a(append) : 파일 추가 ( 파일이 있으면 파일의 끝에서부터 추가하여 쓴다 )
mode 두번째 글자 - 파일 타입
t : 텍스트(text) 타입 ( 기본값 )
b : 이진(binary) 타입
두번째 글자가 없으면 텍스트 타입이다.
encoding='utf-8' : 한글
- 파일을 열고 사용 후에는 반드시 닫아야 한다
"""
'''
try:
f = open('./data/data.txt' ,'r',encoding='utf-8')
except FileExistsError as e:
print("파일을 찾을 수 없습니다.",e)
else:
while True:
line =f.readline()
if not line: break #더이상 line이 없으면 -> if not line
print(line, end='') #원래 개행인데 print가 한번더 개생해서 end로 개행을 없앰
f.close()
finally:
print('종료')
'''
# try:
# with open('./data/data.txt' ,'r',encoding='utf-8') as f : #with을 쓰는이유 -> close를 안해도 됨
# while True:
# line = f.readline()
# if not line: break # 더이상 line이 없으면 -> if not line
# print(line, end='') # 원래 개행인데 print가 한번더 개생해서 end로 개행을 없앰
# except FileExistsError as e:
# print("파일을 찾을 수 없습니다.", e)
# print('종료')
try:
with open('./data/data.txt' ,'r',encoding='utf-8') as f : #with을 쓰는이유 -> close를 안해도 됨
contents= f.read()
word = contents.split() #단어별로 자르는 split
num = len(word)
print(contents, word, num)
except FileExistsError as e:
print("파일을 찾을 수 없습니다.", e)
else :
print('파일명: {}, 총 단어수 :{}'.format(f.name, num))
print('종료') | [
"xorms8@gmail.com"
] | xorms8@gmail.com |
cdbd2abee686d0a4d7b5e1c105e37112b17f24e8 | ac0d9cd563af31f7ab60c85b45bb6341a4e6f840 | /debug.py | ba76114adb551198c1afd7c6293f4cb74ee719c8 | [] | no_license | huoang/home | 0b940ee2773dd8ec58cf2d6bb1bd0d83d7fa680f | c3d4d1134ba96b06e67caddc87acea39a7396afd | refs/heads/master | 2021-01-16T23:10:36.730487 | 2016-11-27T14:37:50 | 2016-11-27T14:37:50 | 72,357,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | #!/usr/bin/env python
#coding:utf-8
import pandas as pd
import feather as fd
import os
def dfvars(ncol):
dfvars=''
for var in range(1,ncol+1):
dfvars+='x%d,' %var
dfvars=dfvars[:len(dfvars)-1]
return dfvars
vars=dfvars(261)
ncol=261
loop = True
looptimes=0
reader = pd.read_csv( '/mnt/e/data/2015/1502.CSV',
iterator = True)
while loop:
try:
looptimes += 1
df = reader.get_chunk(50000)
df.columns = dfvars(261).split(',')
df_rep = df[['x5','x1']]
rec=df[df['x1']=='子']
df.ix[[24861]].x229
df.ix[[24861]].x258
df.ix[[24860]].x229
df.ix[[24860]].x258
df.ix[[24862]].x229
df.ix[[24862]].x258
df.ix[[24863]].x229
df.ix[[24863]].x258
| [
"huoang@126.com"
] | huoang@126.com |
a1e6752c97c13384efca970a958b0761d12d34cd | d2189145e7be2c836017bea0d09a473bf1bc5a63 | /Reposiciones/reposicionesIsraelFP/reposicion31Ago18IsraelFP/fibonacciISraelFP.py | 692bd0eafb663ca194cd985e7f9b1080a1142875 | [] | no_license | emilianoNM/Tecnicas3 | 12d10ce8d78803c8d2cd6a721786a68f7ee2809d | 6ad7f0427ab9e23643a28ac16889bca8791421d0 | refs/heads/master | 2020-03-25T18:06:34.126165 | 2018-11-24T04:42:14 | 2018-11-24T04:42:14 | 144,013,045 | 3 | 5 | null | 2018-09-14T10:47:26 | 2018-08-08T12:49:57 | Python | UTF-8 | Python | false | false | 306 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 16:04:05 2018
@author: israel
"""
def fib(f):
if f == 1: return 1
if f == 2: return 1
return fib(f-1)+fib(f-2)
print "\t..:Fibonacci:.."
f=input("Cantidad de no. a hacer en Fibonacci: ")
print "> No. Fibonacci: ",fib(f)
| [
"noreply@github.com"
] | emilianoNM.noreply@github.com |
40547c88ef4733a7b77c0d92fa0344e3439c408f | 98efe1aee73bd9fbec640132e6fb2e54ff444904 | /loldib/getratings/models/NA/na_aatrox/__init__.py | edb18cb382f17b02c1036fa9cc09ee67a24a63ab | [
"Apache-2.0"
] | permissive | koliupy/loldib | be4a1702c26546d6ae1b4a14943a416f73171718 | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | refs/heads/master | 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from .na_aatrox_top import *
from .na_aatrox_jng import *
from .na_aatrox_mid import *
from .na_aatrox_bot import *
from .na_aatrox_sup import *
| [
"noreply@github.com"
] | koliupy.noreply@github.com |
390b65607f271bdd88f9fab4359365ad28e4f992 | d92235bce35d7bf1b028ae417c6ceb8891b6c8b4 | /dk_mnist_mlp_weightnorm.py | 10c935941f332df7936c404f15dd57a9d282b466 | [] | no_license | capybaralet/BayesianHypernet | 63faadc83aa95ec80e5d7805ec300c151734f93a | 4d7bdc749b2fb9cf74e45c5b21ccc590b6f781e7 | refs/heads/master | 2020-12-30T15:30:54.687925 | 2017-05-15T21:38:15 | 2017-05-15T21:38:15 | 91,155,018 | 3 | 0 | null | 2017-05-13T06:41:49 | 2017-05-13T06:41:49 | null | UTF-8 | Python | false | false | 5,345 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 12 17:46:38 2017
@author: Chin-Wei
"""
from modules import LinearFlowLayer, IndexLayer, PermuteLayer
from modules import CoupledDenseLayer, stochasticDenseLayer2
from utils import log_normal, log_stdnormal
from ops import load_mnist
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
srng = RandomStreams(seed=427)
floatX = theano.config.floatX
import lasagne
from lasagne import init
from lasagne import nonlinearities
from lasagne.layers import get_output
from lasagne.objectives import categorical_crossentropy as cc
import numpy as np
if 1:#def main():
"""
MNIST example
weight norm reparameterized MLP with prior on rescaling parameters
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--perdatapoint',action='store_true')
parser.add_argument('--coupling',action='store_true')
parser.add_argument('--lrdecay',action='store_true')
parser.add_argument('--lr0',default=0.1,type=float)
parser.add_argument('--lbda',default=0.5,type=float)
parser.add_argument('--bs',default=32,type=int)
args = parser.parse_args()
print args
perdatapoint = args.perdatapoint
coupling = 1#args.coupling
lr0 = args.lr0
lrdecay = args.lrdecay
lbda = np.cast[floatX](args.lbda)
bs = args.bs
size = max(10,min(50000,args.size))
clip_grad = 100
max_norm = 100
# load dataset
filename = '/data/lisa/data/mnist.pkl.gz'
train_x, train_y, valid_x, valid_y, test_x, test_y = load_mnist(filename)
input_var = T.matrix('input_var')
target_var = T.matrix('target_var')
dataset_size = T.scalar('dataset_size')
lr = T.scalar('lr')
# 784 -> 20 -> 10
weight_shapes = [(784, 200),
(200, 10)]
num_params = sum(ws[1] for ws in weight_shapes)
if perdatapoint:
wd1 = input_var.shape[0]
else:
wd1 = 1
# stochastic hypernet
ep = srng.normal(std=0.01,size=(wd1,num_params),dtype=floatX)
logdets_layers = []
h_layer = lasagne.layers.InputLayer([None,num_params])
layer_temp = LinearFlowLayer(h_layer)
h_layer = IndexLayer(layer_temp,0)
logdets_layers.append(IndexLayer(layer_temp,1))
if coupling:
layer_temp = CoupledDenseLayer(h_layer,200)
h_layer = IndexLayer(layer_temp,0)
logdets_layers.append(IndexLayer(layer_temp,1))
h_layer = PermuteLayer(h_layer,num_params)
layer_temp = CoupledDenseLayer(h_layer,200)
h_layer = IndexLayer(layer_temp,0)
logdets_layers.append(IndexLayer(layer_temp,1))
weights = lasagne.layers.get_output(h_layer,ep)
# primary net
t = np.cast['int32'](0)
layer = lasagne.layers.InputLayer([None,784])
inputs = {layer:input_var}
for ws in weight_shapes:
num_param = ws[1]
w_layer = lasagne.layers.InputLayer((None,ws[1]))
weight = weights[:,t:t+num_param].reshape((wd1,ws[1]))
inputs[w_layer] = weight
layer = stochasticDenseLayer2([layer,w_layer],ws[1])
print layer.output_shape
t += num_param
layer.nonlinearity = nonlinearities.softmax
y = T.clip(get_output(layer,inputs), 0.001, 0.999) # stability
# loss terms
logdets = sum([get_output(logdet,ep) for logdet in logdets_layers])
logqw = - (0.5*(ep**2).sum(1) + 0.5*T.log(2*np.pi)*num_params + logdets)
#logpw = log_normal(weights,0.,-T.log(lbda)).sum(1)
logpw = log_stdnormal(weights).sum(1)
kl = (logqw - logpw).mean()
logpyx = - cc(y,target_var).mean()
loss = - (logpyx - kl/T.cast(dataset_size,floatX))
params = lasagne.layers.get_all_params([h_layer,layer])
grads = T.grad(loss, params)
mgrads = lasagne.updates.total_norm_constraint(grads,
max_norm=max_norm)
cgrads = [T.clip(g, -clip_grad, clip_grad) for g in mgrads]
updates = lasagne.updates.adam(cgrads, params,
learning_rate=lr)
train = theano.function([input_var,target_var,dataset_size,lr],
loss,updates=updates)
predict = theano.function([input_var],y.argmax(1))
##################
# TRAIN
X, Y = train_x[:size],train_y[:size]
Xt, Yt = valid_x,valid_y
print 'trainset X.shape:{}, Y.shape:{}'.format(X.shape,Y.shape)
N = X.shape[0]
epochs = 50
records=list()
t = 0
for e in range(epochs):
if lrdecay:
lr = lr0 * 10**(-e/float(epochs-1))
else:
lr = lr0
for i in range(N/bs):
x = X[i*bs:(i+1)*bs]
y = Y[i*bs:(i+1)*bs]
loss = train(x,y,N,lr)
if t%100==0:
print 'epoch: {} {}, loss:{}'.format(e,t,loss)
tr_acc = (predict(X)==Y.argmax(1)).mean()
te_acc = (predict(Xt)==Yt.argmax(1)).mean()
print '\ttrain acc: {}'.format(tr_acc)
print '\ttest acc: {}'.format(te_acc)
t+=1
records.append(loss)
| [
"davidscottkrueger@gmail.com"
] | davidscottkrueger@gmail.com |
d038aed3f5f3cf210f6470d7f30d1e6342dfc62b | 54c38a780280288609d963498484139eb65588a2 | /src/tools/STL_boundingBox.py | a37309bf444d0b72478792ae88494956366a6509 | [] | no_license | floli/flof | c8f56ad2e693d1c968eca7a8b79bf0ae14121427 | 765cbbe0bc526b4e80668aada507917f6e8bc2f0 | refs/heads/master | 2021-01-18T14:38:01.619254 | 2013-07-21T10:31:25 | 2013-07-21T10:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | #!env python2
import fileinput, sys
if len(sys.argv) == 1:
print "Calculates max/min values of vertexes from all files."
print "Usage: %s FILES" % sys.argv[0]
sys.exit()
max_x = max_y = max_z = -sys.maxint -1
min_x = min_y = min_z = sys.maxint
for line in fileinput.input():
if line.strip().startswith("vertex"):
coords = [ float(i) for i in line.split()[1:] ]
max_x = coords[0] if coords[0] > max_x else max_x
min_x = coords[0] if coords[0] < min_x else min_x
max_y = coords[1] if coords[1] > max_y else max_y
min_y = coords[1] if coords[1] < min_y else min_y
max_z = coords[2] if coords[2] > max_z else max_z
min_z = coords[2] if coords[2] < min_z else min_z
print "Max X: %s Min X: %s" % (max_x, min_x)
print "Max Y: %s Min Y: %s" % (max_y, min_y)
print "Max Z: %s Min Z: %s" % (max_z, min_z)
| [
"florian.lindner@xgm.de"
] | florian.lindner@xgm.de |
b733a47e357b49c8b6a62d4657675992a32790d6 | e0cc12d17e0f6a8dac3797c7eb242cf0b0c90273 | /util/usbmux.py | f83ebf7eb07253c45fbff1e672c83e68bf1dac44 | [] | no_license | MockMoocLi/py-ios-device | f99a380d477e26ce62dce875c9b83dfcf74f38e0 | 2f501cb1b3dc4089ab7d9f4465c1f1175d8163e8 | refs/heads/main | 2023-02-02T08:46:24.108146 | 2020-12-21T10:30:20 | 2020-12-21T10:30:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,245 | py | """
USBMux client that handles iDevice descovery via USB.
:author: Doug Skrypa (original: Hector Martin "marcan" <hector@marcansoft.com>)
"""
import select
import socket
import struct
import sys
import plistlib
from typing import Dict, Union, Optional, Tuple, Any, Mapping, List
from .exceptions import MuxError, MuxVersionError, NoMuxDeviceFound
__all__ = ['USBMux', 'MuxConnection', 'MuxDevice', 'UsbmuxdClient']
class MuxDevice:
def __init__(self, devid, usbprod, serial, location, proto_cls, socket_path):
self.devid = devid
self.usbprod = usbprod
self.serial = serial
self.location = location
self._proto_cls = proto_cls
self._socket_path = socket_path
def __repr__(self):
fmt = '<MuxDevice: ID %d ProdID 0x%04x Serial %r Location 0x%x>'
return fmt % (self.devid, self.usbprod, self.serial, self.location)
def connect(self, port):
connector = MuxConnection(self._socket_path, self._proto_cls)
return connector.connect(self, port)
class MuxConnection:
def __init__(self, socketpath, protoclass):
self.socketpath = socketpath
if sys.platform in ('win32', 'cygwin'):
family = socket.AF_INET
address = ('127.0.0.1', 27015)
else:
family = socket.AF_UNIX
address = self.socketpath
self.socket = SafeStreamSocket(address, family)
self.proto = protoclass(self.socket)
self.pkttag = 1
self.devices = [] # type: List[MuxDevice]
def _getreply(self):
while True:
resp, tag, data = self.proto.getpacket()
if resp == self.proto.TYPE_RESULT:
return tag, data
else:
raise MuxError('Invalid packet type received: %d' % resp)
def _processpacket(self):
resp, tag, data = self.proto.getpacket()
if resp == self.proto.TYPE_DEVICE_ADD:
self.devices.append(
MuxDevice(
data['DeviceID'],
data['Properties']['ProductID'],
data['Properties']['SerialNumber'],
data['Properties']['LocationID'],
self.proto.__class__,
self.socketpath
)
)
elif resp == self.proto.TYPE_DEVICE_REMOVE:
for dev in self.devices:
if dev.devid == data['DeviceID']:
self.devices.remove(dev)
elif resp == self.proto.TYPE_RESULT:
raise MuxError('Unexpected result: %d' % resp)
else:
raise MuxError('Invalid packet type received: %d' % resp)
def _exchange(self, req, payload=None):
mytag = self.pkttag
self.pkttag += 1
self.proto.sendpacket(req, mytag, payload or {})
recvtag, data = self._getreply()
if recvtag != mytag:
raise MuxError('Reply tag mismatch: expected %d, got %d' % (mytag, recvtag))
return data['Number']
def listen(self):
ret = self._exchange(self.proto.TYPE_LISTEN)
if ret != 0:
raise MuxError('Listen failed: error %d' % ret)
def process(self, timeout: Optional[float] = None):
if self.proto.connected:
raise MuxError('Socket is connected, cannot process listener events')
rlo, wlo, xlo = select.select([self.socket.sock], [], [self.socket.sock], timeout)
if xlo:
self.socket.sock.close()
raise MuxError('Exception in listener socket')
if rlo:
self._processpacket()
def connect(self, device, port) -> socket.socket:
ret = self._exchange(
self.proto.TYPE_CONNECT, {'DeviceID': device.devid, 'PortNumber': ((port << 8) & 0xFF00) | (port >> 8)}
)
if ret != 0:
raise MuxError('Connect failed: error %d' % ret)
self.proto.connected = True
return self.socket.sock
def close(self):
self.socket.sock.close()
class USBMux:
def __init__(self, socket_path=None):
socket_path = socket_path or '/var/run/usbmuxd'
self.socketpath = socket_path
self.listener = MuxConnection(socket_path, BinaryProtocol)
try:
self.listener.listen()
self.version = 0
self.protoclass = BinaryProtocol
except MuxVersionError:
self.listener = MuxConnection(socket_path, PlistProtocol)
self.listener.listen()
self.protoclass = PlistProtocol
self.version = 1
self.devices = self.listener.devices # type: List[MuxDevice]
def process(self, timeout: float = 0.1):
self.listener.process(timeout)
def find_device(self, serial=None, timeout=0.1, max_attempts=5) -> MuxDevice:
attempts = 0
while not self.devices and attempts < max_attempts:
self.process(timeout)
attempts += 1
if self.devices:
if serial:
for device in self.devices:
if device.serial == serial:
return device
raise NoMuxDeviceFound(f'Found {len(self.devices)} MuxDevice instances, but none with {serial}')
else:
return self.devices[0]
raise NoMuxDeviceFound('No MuxDevice instances were found')
class UsbmuxdClient(MuxConnection):
def __init__(self):
super().__init__('/var/run/usbmuxd', PlistProtocol)
def get_pair_record(self, udid):
tag = self.pkttag
self.pkttag += 1
payload = {'PairRecordID': udid}
self.proto.sendpacket('ReadPairRecord', tag, payload)
_, recvtag, data = self.proto.getpacket()
if recvtag != tag:
raise MuxError('Reply tag mismatch: expected %d, got %d' % (tag, recvtag))
pair_record = data['PairRecordData']
pair_record = plistlib.loads(pair_record)
return pair_record
class BinaryProtocol:
TYPE_RESULT = 1
TYPE_CONNECT = 2
TYPE_LISTEN = 3
TYPE_DEVICE_ADD = 4
TYPE_DEVICE_REMOVE = 5
VERSION = 0
def __init__(self, sock):
self.socket = sock
self.connected = False
def _pack(self, req: int, payload: Optional[Mapping[str, Any]]):
if req == self.TYPE_CONNECT:
connect_data = b'\x00\x00'
return struct.pack('IH', payload['DeviceID'], payload['PortNumber']) + connect_data
elif req == self.TYPE_LISTEN:
return b''
else:
raise ValueError('Invalid outgoing request type %d' % req)
def _unpack(self, resp: int, payload: bytes) -> Dict[str, Any]:
if resp == self.TYPE_RESULT:
return {'Number': struct.unpack('I', payload)[0]}
elif resp == self.TYPE_DEVICE_ADD:
devid, usbpid, serial, pad, location = struct.unpack('IH256sHI', payload)
serial = serial.split(b'\0')[0]
return {
'DeviceID': devid,
'Properties': {
'LocationID': location,
'SerialNumber': serial,
'ProductID': usbpid
}
}
elif resp == self.TYPE_DEVICE_REMOVE:
devid = struct.unpack('I', payload)[0]
return {'DeviceID': devid}
else:
raise MuxError('Invalid incoming response type %d' % resp)
def sendpacket(self, req: int, tag: int, payload: Union[Mapping[str, Any], bytes, None] = None):
payload = self._pack(req, payload or {})
if self.connected:
raise MuxError('Mux is connected, cannot issue control packets')
length = 16 + len(payload)
data = struct.pack('IIII', length, self.VERSION, req, tag) + payload
self.socket.send(data)
def getpacket(self) -> Tuple[int, int, Union[Dict[str, Any], bytes]]:
if self.connected:
raise MuxError('Mux is connected, cannot issue control packets')
dlen = self.socket.recv(4)
dlen = struct.unpack('I', dlen)[0]
body = self.socket.recv(dlen - 4)
version, resp, tag = struct.unpack('III', body[:0xc])
if version != self.VERSION:
raise MuxVersionError('Version mismatch: expected %d, got %d' % (self.VERSION, version))
payload = self._unpack(resp, body[0xc:])
return resp, tag, payload
class PlistProtocol(BinaryProtocol):
TYPE_RESULT = 'Result'
TYPE_CONNECT = 'Connect'
TYPE_LISTEN = 'Listen'
TYPE_DEVICE_ADD = 'Attached'
TYPE_DEVICE_REMOVE = 'Detached' #???
TYPE_PLIST = 8
VERSION = 1
def _pack(self, req: int, payload: bytes) -> bytes:
return payload
def _unpack(self, resp: int, payload: bytes) -> bytes:
return payload
def sendpacket(self, req, tag, payload: Optional[Mapping[str, Any]] = None):
payload = payload or {}
payload['ClientVersionString'] = 'qt4i-usbmuxd'
if isinstance(req, int):
req = [self.TYPE_CONNECT, self.TYPE_LISTEN][req - 2]
payload['MessageType'] = req
payload['ProgName'] = 'tcprelay'
wrapped_payload = plistlib.dumps(payload)
super().sendpacket(self.TYPE_PLIST, tag, wrapped_payload)
def getpacket(self):
resp, tag, payload = super().getpacket()
if resp != self.TYPE_PLIST:
raise MuxError('Received non-plist type %d' % resp)
payload = plistlib.loads(payload)
return payload.get('MessageType', ''), tag, payload
class SafeStreamSocket:
def __init__(self, address, family):
self.sock = socket.socket(family, socket.SOCK_STREAM)
self.sock.connect(address)
def send(self, msg):
totalsent = 0
while totalsent < len(msg):
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise MuxError('socket connection broken')
totalsent = totalsent + sent
def recv(self, size):
msg = b''
while len(msg) < size:
chunk = self.sock.recv(size - len(msg))
empty_chunk = b''
if chunk == empty_chunk:
raise MuxError('socket connection broken')
msg = msg + chunk
return msg
| [
"chenpeijie@rongcloud.cn"
] | chenpeijie@rongcloud.cn |
990dc1f53f29fce4c728726c22bfa670e723a133 | 4af624e5655d6543dc1b6c5d3ce8da67a1171082 | /prices.py | 8a362e0ec5ca128b10ec5ca7e62fa233a6b91a13 | [] | no_license | ehtesham1999/Ecommerce-price-tracker | 38fb7920da6cfd83466db1b0aa889e16dc1856d0 | c70502e184ac9dd0acef81bf02c25c5478d82d7d | refs/heads/master | 2022-11-19T11:32:09.793321 | 2020-07-14T09:43:19 | 2020-07-14T09:43:19 | 279,551,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | import requests as r
from bs4 import BeautifulSoup as bs
import time
import webbrowser as w
import smtplib
def send_email():
sender = "ehteshamhussain1999@gmail.com"
receiver = "ehussain414@gmail.com"
message = "realme 6 is available"
s = smtplib.SMTP(sender,587)
s.starttls()
s.login(sender,"yourballsareinmyfist")
s.sendmail(sender,receiver,message)
s.quit()
URL = "https://www.flipkart.com/realme-6-comet-blue-128-gb/p/itm64975b00cb8e6?pid=MOBFPCX7UQU3CHKG&lid=LSTMOBFPCX7UQU3CHKG2J1SAU&marketplace=FLIPKART&srno=s_1_1&otracker=search&otracker1=search&fm=SEARCH&iid=1aef97f8-7ea0-4228-ad1f-0ea5129c2d04.MOBFPCX7UQU3CHKG.SEARCH&ppt=sp&ppn=sp&ssid=yfp1st24g00000001592410182089&qH=ba2b1763f76b622e"
send_email()
while True:
page = r.get(URL)
soup = bs(page.content, "html.parser")
available = "default"
# Use whatever you see in Inspect Element of the website this keeps changing from web page to webpage
available = soup.find("div", {"class": "_9-sL7L"}).text
# Conver the price which is string to an integer to compare
print(available)
# Use your comparing logic here below
# Example:
if available != "Sold Out":
w.open(URL)
send_email()
break
else:
print("Unavailable")
# Any time you want it to wait to check next time. I gave 5 seconds
time.sleep(5)
| [
"ehteshamhussain1999@gmail.com"
] | ehteshamhussain1999@gmail.com |
fdd837ad5b77db49c7eb7d966b25fb6702ad84fe | dc27d116928edf9ebbbb62d13bbbfb5223bbc184 | /tests/test_s3_dict.py | 6510046d1ccb3ae4f6d9194f4763f7ff92149721 | [
"MIT"
] | permissive | MartinHowarth/s3os | 014a85efae3e3ad6680e4399d190c335e5a74743 | dc94c06a20c067e6dcc13eb8f7e1557fdd615452 | refs/heads/master | 2020-12-08T21:35:20.894917 | 2020-01-11T14:25:10 | 2020-01-11T14:25:10 | 233,101,822 | 1 | 0 | MIT | 2020-01-11T13:51:29 | 2020-01-10T17:54:30 | Python | UTF-8 | Python | false | false | 7,701 | py | """Tests for the S3Dict object."""
import pytest
from mock import MagicMock, call
from s3os.s3_dict import S3Dict, S3DictConfig
from s3os.s3_wrapper import ObjectLocation
@pytest.fixture
def mock_s3_api(mocker):
"""Create mocked versions of the s3 API."""
mocked_store = mocker.patch("s3os.s3_dict.store")
mocked_retrieve = mocker.patch("s3os.s3_dict.retrieve")
mocked_delete = mocker.patch("s3os.s3_dict.delete")
return mocked_store, mocked_retrieve, mocked_delete
def assert_no_calls(*mocks: MagicMock) -> None:
"""Assert that none of the given mocks were called."""
for mock in mocks:
mock.assert_not_called()
def reset_all_mocks(*mocks: MagicMock) -> None:
"""Reset all the given mocks."""
for mock in mocks:
mock.reset_mock()
def test_s3_dict_config(subtests):
"""Test the S3ConfigDict."""
with subtests.test("ID is defaulted to unique id."):
c1 = S3DictConfig()
c2 = S3DictConfig()
assert c1.id != c2.id
with subtests.test("ID not defaulted when given."):
c = S3DictConfig(id="test")
assert c.id == "test"
with subtests.test("`s3_prefix` can be generated correctly."):
c = S3DictConfig(id="test")
assert c.s3_prefix == "test/"
def test_s3_dict_init_no_items(subtests, mock_s3_api):
"""Tests for creating an S3Dict without initial items."""
m_store, m_retrieve, m_delete = mock_s3_api
with subtests.test("Can be created with no arguments."):
S3Dict()
assert_no_calls(*mock_s3_api)
with subtests.test("Can be created with a config object."):
dic = S3Dict(_config=S3DictConfig(id="s3os_test"))
assert dic._config.id == "s3os_test"
assert_no_calls(*mock_s3_api)
@pytest.mark.parametrize("use_cache", [True, False])
@pytest.mark.parametrize(
"init_items", [{"a": 2, "b": [1, 2]}, (("a", 2), ("b", [1, 2]))],
)
def test_s3_dict_init_with_items(subtests, mock_s3_api, init_items, use_cache):
"""
Tests for creating an S3Dict with initial items.
This implicitly tests the `update` method.
"""
m_store, m_retrieve, m_delete = mock_s3_api
config = S3DictConfig(id="s3os_test", use_cache=use_cache)
if isinstance(init_items, dict):
# https://github.com/python/mypy/issues/2582
dic = S3Dict(**init_items, _config=config) # type: ignore
else:
dic = S3Dict(init_items, _config=config)
with subtests.test("Items are cached locally."):
# Check against the inner data dict so we definitely don't
# re-discover the keys from s3.
if use_cache:
assert "a" in dic.data and dic.data["a"] == 2
assert "b" in dic.data and dic.data["b"] == [1, 2]
else:
assert "a" not in dic.data
assert "b" not in dic.data
with subtests.test("Items are uploaded to s3."):
m_store.assert_has_calls(
[
call(ObjectLocation("s3os_test/a"), 2),
call(ObjectLocation("s3os_test/b"), [1, 2]),
],
any_order=True,
)
assert_no_calls(m_retrieve, m_delete)
def test_convert_key(subtests):
"""Test that key conversions are symmetric."""
dic = S3Dict(_config=S3DictConfig(id="s3os_test"))
with subtests.test("Test convert_to_s3_key."):
assert dic.convert_to_s3_key("mykey") == "s3os_test/mykey"
with subtests.test("Test convert_from_s3_key."):
assert dic.convert_from_s3_key("s3os_test/mykey") == "mykey"
with subtests.test(
"Test that convert_from_s3_key only replaces at start of string."
):
assert dic.convert_from_s3_key("asdf/mykey") == "asdf/mykey"
assert dic.convert_from_s3_key("s3os_test/s3os_test/mykey") == "s3os_test/mykey"
@pytest.mark.parametrize("use_cache", [True, False])
def test_setitem(subtests, mock_s3_api, use_cache):
"""Test the __setitem__ method of S3Dict."""
m_store, m_retrieve, m_delete = mock_s3_api
dic = S3Dict(_config=S3DictConfig(id="s3os_test", use_cache=use_cache))
dic["set"] = 5
m_store.assert_has_calls([call(ObjectLocation("s3os_test/set"), 5)])
# Check against the inner "data" dict
if use_cache:
assert dic.data["set"] == 5
else:
assert "set" not in dic.data
assert_no_calls(m_retrieve, m_delete)
@pytest.mark.parametrize("use_cache", [True, False])
def test_getitem(subtests, mock_s3_api, use_cache):
"""Test the __getitem__ method of S3Dict."""
m_store, m_retrieve, m_delete = mock_s3_api
dic = S3Dict(_config=S3DictConfig(id="s3os_test", use_cache=use_cache))
# Initialise some data in the dict, and then reset the store mock so we can
# check it more easily later.
dic["get"] = 12
m_store.reset_mock()
# Pretend that we actually did upload the object to s3.
m_retrieve.return_value = 12
# Actually perform the tests.
value = dic["get"]
assert value == 12
if use_cache:
assert_no_calls(*mock_s3_api)
else:
m_retrieve.assert_has_calls([call(ObjectLocation("s3os_test/get"))])
assert_no_calls(m_store, m_delete)
@pytest.mark.parametrize("use_cache", [True, False])
def test_delitem(subtests, mock_s3_api, use_cache):
"""Test the __delitem__ method of S3Dict."""
m_store, m_retrieve, m_delete = mock_s3_api
dic = S3Dict(_config=S3DictConfig(id="s3os_test", use_cache=use_cache))
dic["del"] = 7
# Reset all the mocks after initialisation
reset_all_mocks(*mock_s3_api)
with subtests.test("Key is deleted from both s3 and cache."):
del dic["del"]
m_delete.assert_has_calls([call(ObjectLocation("s3os_test/del"))])
assert_no_calls(m_retrieve, m_store)
assert "del" not in dic.data
with subtests.test("No error when key does not exist."):
del dic["del2"]
@pytest.mark.parametrize("use_cache", [True, False])
def test_get_all_from_s3(subtests, mock_s3_api, mocker, use_cache):
"""Test the `get_all_from_s3` method."""
m_store, m_retrieve, m_delete = mock_s3_api
location_gen = (ObjectLocation(str(i)) for i in range(3))
mock_generate_items_in_bucket = mocker.patch(
"s3os.s3_dict.generate_items_in_bucket", return_value=location_gen,
)
m_retrieve.side_effect = [str(i * i) for i in range(3)]
s3dict = S3Dict(_config=S3DictConfig(id="s3os_test", use_cache=use_cache))
normal_dict = s3dict.get_all_from_s3()
assert normal_dict == {"0": "0", "1": "1", "2": "4"}
mock_generate_items_in_bucket.assert_called_once()
assert_no_calls(m_store, m_delete)
def test_del(mock_s3_api):
"""Test that __del__ does not delete items from s3."""
# Initialise with some data so that it could be deleted.
s3dict = S3Dict({"a": 1, "b": 2}, _config=S3DictConfig(id="s3os_test"))
# Reset all the mocks after initialisation
reset_all_mocks(*mock_s3_api)
del s3dict
# No calls should have been made.
assert_no_calls(*mock_s3_api)
def test_clear(mocker, mock_s3_api):
"""Test the `clear` method."""
m_store, m_retrieve, m_delete = mock_s3_api
mock_generate_items_in_bucket = mocker.patch(
"s3os.s3_dict.generate_items_in_bucket",
return_value=(ObjectLocation(f"s3os_test/{str(i)}") for i in range(3)),
)
s3dict = S3Dict(_config=S3DictConfig(id="s3os_test"))
s3dict.clear()
m_delete.assert_has_calls(
[
call(ObjectLocation("s3os_test/0")),
call(ObjectLocation("s3os_test/1")),
call(ObjectLocation("s3os_test/2")),
]
)
mock_generate_items_in_bucket.assert_called_once()
assert_no_calls(m_store, m_retrieve)
| [
"noreply@github.com"
] | MartinHowarth.noreply@github.com |
d11ebcc5f146579561cd8f08dc0684481f02d04a | dcfcb05bd9e5dab2b5feaf5d3e57b6ccd286da40 | /content/forms.py | 73fe754833f74fc449dc3aae6af3831704f6a65b | [] | no_license | jsg1504/gori | 13a9bb0aedd350a5fb0b9ba3b645476baeab31d1 | 7db5f8c4477419f6bb6adfe3c57323341c8afef9 | refs/heads/master | 2021-01-10T22:14:28.607396 | 2016-07-24T08:32:24 | 2016-07-24T08:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | from __future__ import unicode_literals
from django import forms
from content.models import Content
class ContentEditForm(forms.ModelForm):
class Meta:
model = Content
fields = ('image_file',)
exclude = ('description',)
| [
"jhlee9870@gmail.com"
] | jhlee9870@gmail.com |
1af1f78994d29c06376f48f05f8ca8dbd6ad1045 | f3a17b77d82bd2660f5306d7aa764f774aebc923 | /accredit/lib/base.py | 74b8abb8b6efd020c93cf74364a58a31135c30a0 | [] | no_license | drmalex07/accredit | b34888e3e61fb20b514a3c8466a95e479a3de7ad | 0899ee342d967ba23320f730e736fc8ddefe3bec | refs/heads/master | 2021-01-15T08:13:13.721513 | 2015-01-02T19:56:33 | 2015-01-02T19:56:33 | 28,724,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | """The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
from pylons.templating import render_genshi as render
from pylons import app_globals as g
from pylons.i18n import (get_lang, set_lang, _)
from accredit.model.meta import Session
class BaseController(WSGIController):
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# Prepare the environment for all controllers
set_lang(g.site_lang)
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
try:
return WSGIController.__call__(self, environ, start_response)
finally:
Session.remove()
| [
"alexakis@imis.athena-innovation.gr"
] | alexakis@imis.athena-innovation.gr |
d0de215878371885b76dad2b28ae5936db0e3b20 | 6a43af03fed9de794acbba957065a29bf5b8113b | /MCS/distrib/hw3_task2.2/reducer.py | 074e0d8b3b0c4f5213980fe8f8237ca4ccc7c6ab | [
"Unlicense"
] | permissive | Wiki-fan/MIPT-all | c80a1313a172efa726e4f7749085a0d18df6e989 | acdb35fa11bc4f0127da4d5de0c33c7d0e91b699 | refs/heads/master | 2021-05-09T04:14:57.412717 | 2018-01-29T08:40:56 | 2018-01-29T08:40:56 | 119,266,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/env python
import sys
sum0 = 0
sum1 = 0
current_key = None
for line in sys.stdin:
arr = line.split('\t')
key, val = arr
if current_key != key:
if current_key:
print("%s\t%d\t%d" % (current_key, sum0, sum1))
sum0 = 0
sum1 = 0
current_key = key
if int(val) == 0:
sum0 += 1
else:
sum1 += 1
if current_key:
print("%s\t%d\t%d" % (current_key, sum0, sum1))
| [
"uberslowpoke@gmail.com"
] | uberslowpoke@gmail.com |
9146101854204eb2ddf6005824a08783642fe417 | 5d3c2005010eb64ed4443f98460520f1fd40a22b | /assignment2/asgn2/classifiers/cnn.py | 0fc9d863a191652042c04161a4506180a275dfe7 | [] | no_license | danielsamfdo/CS682_NeuralNet | 16ffc1c3e1313931ea2b56aea4b20cbc0eb8602a | 6a906ae4864758dddf87249598bfa11d760bcc24 | refs/heads/master | 2021-03-16T08:41:57.987218 | 2018-03-20T15:01:46 | 2018-03-20T15:01:46 | 103,343,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,161 | py | import numpy as np
from asgn2.layers import *
from asgn2.fast_layers import *
from asgn2.layer_utils import *
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.params = {}
self.reg = reg
self.dtype = dtype
C,H,W = input_dim
############################################################################
# TODO: Initialize weights and biases for the three-layer convolutional #
# network. Weights should be initialized from a Gaussian with standard #
# deviation equal to weight_scale; biases should be initialized to zero. #
# All weights and biases should be stored in the dictionary self.params. #
# Store weights and biases for the convolutional layer using the keys 'W1' #
# and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #
# hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #
# of the output affine layer. #
############################################################################
std = weight_scale
self.params['W1'] = std * np.random.randn(num_filters, C, filter_size, filter_size)
self.params['b1'] = np.zeros(num_filters)
self.params['W2'] = std * np.random.randn(num_filters*(H)*(W)/4, hidden_dim)
self.params['b2'] = np.zeros(hidden_dim)
self.params['W3'] = std * np.random.randn(hidden_dim, num_classes)
self.params['b3'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
grads = {}
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
loss = 0
############################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
############################################################################
first_layer_output, first_layer_cache = conv_relu_pool_forward(X,W1,b1,conv_param,pool_param)
# print X.shape, W1.shape, b1.shape, first_layer_output.shape, W2.shape
# sh = np.copy(first_layer_output)
# reshaped_first_x = first_layer_output.reshape((first_layer_output.shape[0], np.prod(first_layer_output.shape[1:])))
second_layer_output, second_layer_cache = affine_relu_forward(first_layer_output,W2,b2)
third_layer_output, third_layer_cache = affine_forward(second_layer_output,W3,b3)
scores = np.copy(third_layer_output)
if y is None:
return scores
loss, dout = softmax_loss(scores,y)
reg = self.reg
loss += (0.5 * reg * np.sum(W1*W1)) + (0.5 * reg * np.sum(W2*W2)) + (0.5 * reg * np.sum(W3*W3))
reg = self.reg
dthird_layer, grads['W3'], grads['b3'] = affine_backward(dout, third_layer_cache)
dsecond_layer, grads['W2'], grads['b2'] = affine_relu_backward(dthird_layer, second_layer_cache)
dfirst_layer, grads['W1'], grads['b1'] = conv_relu_pool_backward(dsecond_layer, first_layer_cache)
grads['W3'] += reg * W3;
grads['W2'] += reg * W2;
grads['W1'] += reg * W1
return loss,grads
############################################################################
# END OF YOUR CODE #
############################################################################
# loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
############################################################################
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
pass
| [
"danielsamfdo@gmail.com"
] | danielsamfdo@gmail.com |
691a689078a23f15d29871c71916d6a51e10f596 | b0a856174eb144baaab59b9b4de2b8654660f9aa | /FlipCoin.py | 3264f2621470268aa3bfaf2020e25e7acfe5618c | [] | no_license | ariprasathsakthivel/BasicCoreProgram | 5c1f302b0b256a7d18dffc0fb48890a70035b4c4 | 26e3c6560d2306e560993dd53413f34c775a971d | refs/heads/master | 2023-07-23T17:21:38.110376 | 2021-09-08T15:10:47 | 2021-09-08T15:10:47 | 404,186,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | '''
@Author: Ariprasath
@Date: 2021-09-08 08:15:00
@Last Modified by: Ariprasath
@Last Modified time: 2021-09-08 08:30:00
@Title : Flip coin and print percentage of head and tail
'''
import random
if __name__=="__main__":
count=abs(int(input("How many times do you want to flip the coin\n")))
percent_count=count
head_count=0
tail_count=0
while count>0:
num=random.randint(0,1)
count-=1
if num==0:
head_count+=1
else:
tail_count+=1
print("Head percentage : {}\nTail percentage : {}".format(int(head_count/percent_count*100),int(tail_count/percent_count*100)))
# print(f"Head percentage : {int(head_count/percent_count*100)}\n Tail percentage : {tail_count/percent_count*100}") | [
"67471289+ariprasathsakthivel@users.noreply.github.com"
] | 67471289+ariprasathsakthivel@users.noreply.github.com |
775a119a67245fdb0d9299d512d4b793d1281268 | 0f931d9e5b74f52a57499364d858819873bdf469 | /15.py | ea1afc8f020b5301aa75fbcffe5bfc0a28df61c1 | [] | no_license | estuprofe/AdventOfCode2019 | 43f4d6f96d580a1732d7932ea863613af270fe56 | 54450df616feef810fbd410ccc9d1b0670195e49 | refs/heads/master | 2022-04-03T11:35:30.553698 | 2019-12-22T03:21:33 | 2019-12-22T03:21:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | import fileinput
import heapq
import intcode
left, right, opposite = [2, 3, 1, 0], [3, 2, 0, 1], [1, 0, 3, 2]
dxs, dys = [0, 0, -1, 1], [-1, 1, 0, 0]
def traverse(program):
buf = []
gen = intcode.run(program, buf)
send = lambda d: buf.append(d + 1) or next(gen)
test = lambda d: send(d) and send(opposite[d])
d, p, cells, oxygen = 0, (0, 0), set(), None
while True:
if test(left[d]):
d = left[d] # turn left if possible
elif not test(d):
d = right[d] # else turn right if can't go straight
s = send(d)
if s == 0:
continue
p = (p[0] + dxs[d], p[1] + dys[d])
cells.add(p)
if s == 2:
oxygen = p
if p == (0, 0):
return cells, oxygen
def shortest_path(cells, source, target):
seen, queue = set(), [(0, source)]
while queue:
d, p = heapq.heappop(queue)
if p == target:
return d
seen.add(p)
for dx, dy in zip(dxs, dys):
q = (p[0] + dx, p[1] + dy)
if q in cells and q not in seen:
heapq.heappush(queue, (d + 1, q))
cells, oxygen = traverse(list(fileinput.input())[0])
print(shortest_path(cells, (0, 0), oxygen))
print(max(shortest_path(cells, cell, oxygen) for cell in cells))
| [
"fogleman@gmail.com"
] | fogleman@gmail.com |
31000a2ec88649069ec3b2d7c7689491fecc6263 | 809d9bef65e328d8782a43cc8f184755163211dd | /page_parsing.py | 0ec6254d0ffdf12b539d5ef0bbcf8ad213bea2d8 | [] | no_license | WangjiayuBerserker/zhonghua_project | 0baffcb9dfde0e83d850de8cff36135c0697fa97 | eb9f2ad4a7450a5c59ee593ebe0fcbc2de6aabd7 | refs/heads/master | 2020-03-10T22:16:07.716384 | 2018-04-15T13:57:22 | 2018-04-15T13:57:22 | 129,615,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import requests
from bs4 import BeautifulSoup
import pymongo
client = pymongo.MongoClient('localhost',27017)
project_zh = client['project_zh']
url_list = project_zh['url_list']
item_info = project_zh['item_info']
# http://www.chinahr.com/haerbin/jobs/23272/2/
def get_links_from(url,page):
start_url = '{}{}/'.format(url,str(page))
web_data = requests.get(start_url)
soup = BeautifulSoup(web_data.text,'lxml')
if soup.find_all('li','l1'):
links = soup.select('li.l1 > span.e1 > a')
for link in links:
item_list = link.get('href')
data = {
'url': item_list
}
url_list.insert_one(data)
else:
pass
def get_item_from(url):
web_data = requests.get(url)
soup = BeautifulSoup(web_data.text,'lxml')
title = soup.select('span.job_name')[0].text if soup.find_all('span','job_name') else None
price = soup.select('span.job_price')[0].text.split('-') if soup.find_all('span','job_price') else None
area = soup.select('div.job_require > span.job_loc')[0].text.split() if soup.find_all('span','job_loc') else None
line = soup.select('div.job_require > span:nth-of-type(4)') if soup.find_all('div','job_require') else None
intro_info = soup.select('div.job_intro_info')[0].text.split()
print(intro_info)
# get_links_from('http://www.chinahr.com/haerbin/jobs/23272/',1)
get_item_from('http://www.chinahr.com/job/5555957736505857.html') | [
"wangjiayuhlj@163.com"
] | wangjiayuhlj@163.com |
f1883475c18fada917ce742224d4c5223a023126 | 659a7a65c877f2eb0adbb6001a1f85f063d01acd | /mscreen/autodocktools_prepare_py3k/AutoDockTools/autoanalyze4Commands.py | 1c6e7002d9350becb7fe81829ce939e0463cab13 | [
"MIT"
] | permissive | e-mayo/mscreen | da59771be250ebe341feb102e0cbf41aab70de43 | a50f0b2f7104007c730baa51b4ec65c891008c47 | refs/heads/main | 2023-06-21T17:47:06.519307 | 2021-08-09T16:06:29 | 2021-08-09T16:06:29 | 345,008,321 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,348 | py | #############################################################################
#
# Author: Ruth HUEY, Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2000
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/autoanalyze4Commands.py,v 1.7 2009/02/26 22:14:47 rhuey Exp $
#
# $Id: autoanalyze4Commands.py,v 1.7 2009/02/26 22:14:47 rhuey Exp $
#
#
#
#
#
#
#
"""
This Module facilitates analyzing results of autodock jobs.
* The first step is 'Read Docking Log' The selected file is parsed
which sets self.docked to a new Docking instance. The Docking class
has attributes:
o dlgParser
x 'dlg': full pathname of dlg
o dpo
o ch:a conformation handler.
x 'clusterNum':
x 'clusterList':
x 'modelList': a list of docked conformations
o macroFile: the Macromolecule file used
o 'macro': filename of macromolecule (eg '1hvrCorr.pdbqt')
o 'macroStem': name of macromolecule up to last '.' (eg '1hvrCorr')
o ligand: the original ligand
o output: lines containing summary of docking
The new Docking is also entered in the dictionary 'dockings' as a separate item
whose key is the file and whose value is the Docking.
After the selected docking log file is parsed, the user can:
* select a displayed docked conformation using the 'Choose A Docked Conformation' menubutton. This opens a DockingChooser widget which is a ListChooser allowing selection either in the widget or in the viewer of any of the displayed docking. Information about each docked conformation is displayed in the information window of the DockingChooser as different entries are high-lighted.
* display the macromolecule via the "Show Macromolecule" menubutton. This menubutton is linked to a file browsers in case the macromolecule whose name is parsed from the docking log file is not in the current directory. (FIX THIS: what if the macromolecule is in a different directory but there is a molecule with the same name here???). The user can change the visibility, sampling, isovalue, renderMode and visibility of bounding box for each of the displayed grids
* display the autogrids used in the docking via the "Show Grids Used For Calc" menubutton. This menubutton is linked to a ListChooser which lets the user select whether to load all or some of the grids. The user can interactively change the visibility of each grid's isosurface, its sampling value, its isovalue, its rendermode (LINE or FILL) and the visibility of its bounding box.
* The user is able to visualize extra grid maps using the "Show Grid" button.
* If the current docking has clusters, the user is able to visualize a results histogram for it with 'Show Histogram'. The histogram can be printed.
* Result Summaries for docking(s) can be viewed, edited and saved with 'Get Output'
* Dockings can be deleted via 'Delete Docking Log'
"""
from ViewerFramework.VFCommand import CommandGUI
from AutoDockTools.autoanalyzeCommands import menuText,\
checkHasInitializedDockings, hideShowHide, toggleShowHide,\
checkNameStr, ADChooseMacro, ADReadMacro, ADEPDBMol,\
ADSeeSpots, ADShowBindingSite, ADMakeAllGrids, ADGetOutput,\
ADGetAGrid, ADSelectDLG, ADDeleteDLG, ADGetDirDLGs, ADGetDLG,\
ClusterDockingChooser, ModelDockingChooser, ADDrawHistogram,\
ADMacroLigandChart, ADDockingChooser, ReadAutoDockStates,\
StatesPlayerWidget, ShowAutoDockStatesBaseCmd, ShowAutoDockStates,\
ShowAutoDockStatesByEnergy, ShowAutoDockPopulation,\
ShowAutoDockStatesHISTOGRAM, ShowAutoDockClusteringStates,\
ReadAutoDockClusteringStates, WriteAutoDockStates,\
WriteAutoDockClustering, MakeAutoDockCLUSTERING,\
MakeAutoDockSubsetCLUSTERING
ADChooseMacroGUI=CommandGUI()
ADChooseMacroGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['chooseMacro'], cascadeName = menuText['MoleculesMB'])
ADReadMacroGUI=CommandGUI()
ADReadMacroGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['readMacro'], cascadeName = menuText['MoleculesMB'])
ADEPDBMolGUI=CommandGUI()
ADEPDBMolGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['epdbMol'], cascadeName = menuText['GridsMB'])
ADSeeSpotsGUI=CommandGUI()
ADSeeSpotsGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['seeSpots'], cascadeName = menuText['DockingLogMB'])
ADShowBindingSiteGUI=CommandGUI()
ADShowBindingSiteGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['showBindingSite'], cascadeName = menuText['DockingLogMB'])
ADMakeAllGridsGUI=CommandGUI()
ADMakeAllGridsGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['showGridsMB'], cascadeName=menuText['GridsMB'])
ADGetOutputGUI=CommandGUI()
ADGetOutputGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['getOutputMB'] , cascadeName=menuText['StatesMB'])
ADGetAGridGUI=CommandGUI()
ADGetAGridGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['addGridMB'], cascadeName=menuText['GridsMB'])
ADSelectDLGGUI=CommandGUI()
ADSelectDLGGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['selectDLG'], cascadeName = menuText['DockingLogMB'])
ADDeleteDLGGUI=CommandGUI()
ADDeleteDLGGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['deleteDLG'], cascadeName = menuText['DockingLogMB'])
ADGetDirDLGsGUI=CommandGUI()
ADGetDirDLGsGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['readDirDLG'], cascadeName = menuText['DockingLogMB'])
ADGetDLGGUI=CommandGUI()
ADGetDLGGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['readDLG'], cascadeName = menuText['DockingLogMB'])
###ADGetDLGGUI.menuBarCfg.update({'background':'tan','relief':'sunken'})
ADDrawHistogramGUI=CommandGUI()
ADDrawHistogramGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['showHistogramMB'], cascadeName=menuText['StatesMB'])
ADMacroLigandChartGUI=CommandGUI()
ADMacroLigandChartGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['showChartMB'], cascadeName=menuText['StatesMB'])
ADDockingChooserGUI=CommandGUI()
ADDockingChooserGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['chooseConfMB'], cascadeName = menuText['StatesMB'])
ReadAutoDockStatesGUI = CommandGUI()
ReadAutoDockStatesGUI.addMenuCommand('AutoTools4Bar', menuText['AnalyzeMB'],
menuText['readStatesMB'],cascadeName=menuText['StatesMB'])
ShowAutoDockStatesGUI = CommandGUI()
ShowAutoDockStatesGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['showStatesMB'],
cascadeName=menuText['StatesMB'])
ShowAutoDockStatesByEnergyGUI = CommandGUI()
ShowAutoDockStatesByEnergyGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['showStatesByEnergyMB'],
cascadeName=menuText['StatesMB'])
ShowAutoDockPopulationGUI = CommandGUI()
ShowAutoDockPopulationGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['showPopulationMB'],
cascadeName=menuText['StatesMB'])
ShowAutoDockStatesHISTOGRAMGUI = CommandGUI()
ShowAutoDockStatesHISTOGRAMGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['showStatesHISTOGRAMMB'],
cascadeName=menuText['StatesMB'])
ShowAutoDockStatesCLUSTERINGGUI = CommandGUI()
ShowAutoDockStatesCLUSTERINGGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['showStatesCLUSTERINGMB'],
cascadeName=menuText['ClusteringMB'])
ReadAutoDockStatesCLUSTERINGGUI = CommandGUI()
ReadAutoDockStatesCLUSTERINGGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['readStatesCLUSTERINGMB'],
cascadeName=menuText['ClusteringMB'])
WriteAutoDockStatesGUI = CommandGUI()
WriteAutoDockStatesGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['writeResultMB'],
cascadeName=menuText['StatesMB'])
WriteAutoDockClusteringGUI = CommandGUI()
WriteAutoDockClusteringGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['writeClusteringMB'],
cascadeName=menuText['ClusteringMB'])
MakeAutoDockCLUSTERINGGUI = CommandGUI()
MakeAutoDockCLUSTERINGGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['makeCLUSTERINGMB'],
cascadeName=menuText['ClusteringMB'])
MakeAutoDockSubsetCLUSTERINGGUI = CommandGUI()
MakeAutoDockSubsetCLUSTERINGGUI.addMenuCommand('AutoTools4Bar',
menuText['AnalyzeMB'], menuText['makeSubsetCLUSTERINGMB'],
cascadeName=menuText['ClusteringMB'])
commandList = [
{'name':'AD4analyze_readDLG','cmd':ADGetDLG(),'gui':ADGetDLGGUI},
{'name':'AD4analyze_readAllDLGInDirectory','cmd':ADGetDirDLGs(),'gui':ADGetDirDLGsGUI},
{'name':'AD4analyze_selectDLG','cmd':ADSelectDLG(),'gui':ADSelectDLGGUI},
{'name':'AD4analyze_deleteDLG','cmd':ADDeleteDLG(),'gui':ADDeleteDLGGUI},
{'name':'AD4analyze_readMacromolecule','cmd':ADReadMacro(),'gui':ADReadMacroGUI},
{'name':'AD4analyze_chooseMacromolecule','cmd':ADChooseMacro(),'gui':ADChooseMacroGUI},
{'name':'AD4analyze_showDockingsAsSpheres','cmd':ADSeeSpots(),'gui':ADSeeSpotsGUI},
{'name':'AD4analyze_showBindingSite','cmd':ADShowBindingSite(),'gui':ADShowBindingSiteGUI},
#{'name':'AD4analyze_readStates','cmd':ReadAutoDockStates(),'gui':ReadAutoDockStatesGUI},
{'name':'AD4analyze_showStates','cmd':ShowAutoDockStates(),'gui':ShowAutoDockStatesGUI},
{'name':'AD4analyze_showStatesByEnergy','cmd':ShowAutoDockStatesByEnergy(),'gui':ShowAutoDockStatesByEnergyGUI},
{'name':'AD4analyze_showPopulation','cmd':ShowAutoDockPopulation(),'gui':ShowAutoDockPopulationGUI},
{'name':'AD4analyze_chooseDockedConformations','cmd':ADDockingChooser(),'gui':ADDockingChooserGUI},
#{'name':'AD4analyze_showStatesHISTOGRAM','cmd':ShowAutoDockStatesHISTOGRAM(),'gui':ShowAutoDockStatesHISTOGRAMGUI},
#{'name':'AD4analyze_showResultsOutput','cmd':ADGetOutput(),'gui':ADGetOutputGUI},
#{'name':'AD4analyze_showHistogram','cmd':ADDrawHistogram(),'gui':ADDrawHistogramGUI},
#{'name':'AD4analyze_getChart','cmd':ADMacroLigandChart(),'gui':ADMacroLigandChartGUI},
#{'name':'AD4analyze_writeStates','cmd':WriteAutoDockStates(),'gui':WriteAutoDockStatesGUI},
{'name':'AD4analyze_showClusteringStates','cmd':ShowAutoDockClusteringStates(),'gui':ShowAutoDockStatesCLUSTERINGGUI},
#{'name':'AD4analyze_readClusteringStates','cmd':ReadAutoDockClusteringStates(),'gui':ReadAutoDockStatesCLUSTERINGGUI},
{'name':'AD4analyze_makeClustering','cmd':MakeAutoDockCLUSTERING(),'gui':MakeAutoDockCLUSTERINGGUI},
{'name':'AD4analyze_makeSubsetClustering','cmd':MakeAutoDockSubsetCLUSTERING(),'gui':MakeAutoDockSubsetCLUSTERINGGUI},
#{'name':'AD4analyze_writeClustering','cmd':WriteAutoDockClustering(),'gui':WriteAutoDockClusteringGUI},
]
try:
from Pmv.Grid import AutoGrid, AutoGridSurfaceGui
for i in [ #{'name':'AD4analyze_epdbMolecule', 'cmd':ADEPDBMol(), 'gui':ADEPDBMolGUI},
{'name':'AD4analyze_addExtraGridIsocontour','cmd':ADGetAGrid(),'gui':ADGetAGridGUI}, {'name':'AD4analyze_showGridIsocontours','cmd':ADMakeAllGrids(),'gui':ADMakeAllGridsGUI}]:
commandList.insert(7,i)
except:
print('skipping the isocontour-dependent commands')
def initModule(vf):
for dict in commandList:
vf.addCommand(dict['cmd'],dict['name'],dict['gui'])
#if not hasattr(vf, 'ADanalyze_showHistogram') and hasattr(vf, 'AD4analyze_showHistogram'):
# vf.ADanalyze_showHistogram = vf.AD4analyze_showHistogram
if not hasattr(vf, 'ADanalyze_showDockingsAsSpheres') and hasattr(vf, 'AD4analyze_showDockingsAsSpheres'):
vf.ADanalyze_showDockingsAsSpheres = vf.AD4analyze_showDockingsAsSpheres
if not hasattr(vf, 'ADanalyze_showGridIsocontours') and hasattr(vf, 'AD4analyze_showGridIsocontours'):
vf.ADanalyze_showGridIsocontours = vf.AD4analyze_showGridIsocontours
if not hasattr(vf, 'ADanalyze_showBindingSite') and hasattr(vf, 'AD4analyze_showBindingSite'):
vf.ADanalyze_showBindingSite = vf.AD4analyze_showBindingSite
if not hasattr(vf, 'ADanalyze_chooseDockedConformations') and hasattr(vf, 'AD4analyze_chooseDockedConformations'):
vf.ADanalyze_chooseDockedConformations = vf.AD4analyze_chooseDockedConformations
if not hasattr(vf, 'ADanalyze_readDLG') and hasattr(vf, 'AD4analyze_readDLG'):
vf.ADanalyze_readDLG = vf.AD4analyze_readDLG
if not hasattr(vf, 'ADanalyze_selectDLG') and hasattr(vf, 'AD4analyze_selectDLG'):
vf.ADanalyze_selectDLG = vf.AD4analyze_selectDLG
if not hasattr(vf, 'ADanalyze_makeSubsetClustering') and hasattr(vf, 'AD4analyze_makeSubsetClustering'):
vf.ADanalyze_makeSubsetClustering = vf.AD4analyze_makeSubsetClustering
if hasattr(vf, 'GUI'):
for item in list(vf.GUI.menuBars['AutoTools4Bar'].menubuttons.values()):
item.configure(background = 'tan')
if not hasattr(vf.GUI, 'adtBar'):
vf.GUI.adtBar = vf.GUI.menuBars['AutoTools4Bar']
vf.GUI.adtFrame = list(vf.GUI.adtBar.menubuttons.values())[0].master
| [
"eduardomayoyanes@gmail.com"
] | eduardomayoyanes@gmail.com |
957ae7bfb14341b990ca697a6415bd9a2e092fb4 | 0920f2b166190ae7d8f282b4476ffcfd7d22a148 | /0x03-python-data_structures/1-element_at.py | 3c9a8abb264b4fa5a9cb9706fb51b990b1885f60 | [] | no_license | nkcornelius/alx-higher_level_programming | c0a92fdfb9dc39c02a31c05fc65f62b711bd22ed | eb0977dfa7fc50daee5214848fd6e8838b5c7557 | refs/heads/main | 2023-08-19T08:02:08.171970 | 2021-10-03T11:56:37 | 2021-10-03T11:56:37 | 404,076,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | #!/usr/bin/python3
def element_at(my_list, idx):
if idx < 0 or idx > len(my_list) - 1:
return 'None'
else:
return my_list[idx]
| [
"ngetichcornelius0@gmail.com"
] | ngetichcornelius0@gmail.com |
e1df19bd7b54072b7916091bc544fd40129d5ef7 | d6cd4c2ffedf8cd4f94c53bc91169c9d8a513521 | /Pypeline/Pypes/Node.py | 5e36ab4605ff50e6c96af2b06b0376c7304d8d7c | [
"MIT"
] | permissive | akiljames83/Pypeline | f66635a1454b79b98138d717b43660144d69c9ae | c06a7e954ff59e1a6cfb7e4e81640720e61d8bfd | refs/heads/master | 2021-12-27T01:37:26.002529 | 2021-05-09T20:23:26 | 2021-05-09T20:23:26 | 149,651,390 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | """
Implementation of the node object
"""
from typing import Any, Optional
class Node(object):
def __init__(self, val: Any=None):
'''
Base for node object containing node value and visited state.
val -> Any data type
visited -> bool
'''
self.val: Any = val
self.visited: bool = False
def setVisited(self) -> None:
self.visited = True
def swapVisited(self) -> None:
self.visited = not self.visited
def isVisited(self) -> bool:
return self.visited
def setVal(self, val) -> None:
self.val = val
def getVal(self) -> Any:
return self.val | [
"akil.james83@gmail.com"
] | akil.james83@gmail.com |
a5888f45449d9d8bb0e3f5a7ffc919981ad9291f | a1b8d59a2a297e3404c12efbe51dcd80920d767a | /tests/requests/test_pokemon_controller.py | eea329f8dd2c12c3eec62a74c35965d4164fd9aa | [] | no_license | rafaelsmedina/pokemon-team-predictor | 7edf8dd3ea5113e696c0da381656ca859a927300 | 2edd8ec4a00bf6781affb7b5b850d01824593ae3 | refs/heads/master | 2020-06-15T04:56:34.699350 | 2017-01-09T19:41:33 | 2017-01-09T19:41:33 | 75,327,291 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | from test_base import BaseTestCase
from app.models.pokemon import Pokemon
from factories.pokemon_factory import PokemonFactory
class TestPokemonController(BaseTestCase):
render_templates = False
def setUp(self):
PokemonFactory.create_batch(1)
def tearDown(self):
Pokemon.query.delete()
def test_pokemon_template_should_be_used(self):
response = self.client.get("/pokemon/1/")
self.assert_template_used('pokemon/pokemon.html')
def test_list_path_should_be_ok(self):
response = self.client.get("/pokemon/all/")
self.assert_200(response) | [
"r.salesmf@gmail.com"
] | r.salesmf@gmail.com |
c86215f987c2ba0c2ffd91294eb7e3c2e8b8c5da | 26a864ff6a8f97f5dc06251f49aaa68379e7b6cd | /app/views.py | 3796664041ceaa771f0718c3b79fba78a82a9e6c | [] | no_license | isradesu/Agenda | 49973f435feebf6ca7cd179d70be0dc531a064ff | ce4367933231d2a6033f6045c06caecb6d663d6a | refs/heads/main | 2023-06-24T18:27:35.218185 | 2021-07-14T18:55:03 | 2021-07-14T18:55:03 | 384,508,640 | 0 | 0 | null | 2021-07-09T17:30:38 | 2021-07-09T17:30:38 | null | UTF-8 | Python | false | false | 5,027 | py | import json
from flask import Blueprint, render_template, request, redirect, url_for, flash, jsonify
from flask_login import current_user, login_required
from app.jsons.funcs_jsons import carrega_addressbooks, guarda_addressbooks
from app.models.models import ClassFactory
views = Blueprint('views', __name__)
def procura(campo, dicionario):
"""
Função de pesquisa da página Home.
:param campo:
:param dicionario:
:return contato or None:
"""
for nome, contatos in dicionario.items():
for contato in contatos:
for valor in contato.values():
if campo == valor:
return contato
return None
@views.route('/home', methods=['GET', 'POST'])
@login_required
def home():
if request.method == "POST":
campo = request.form.get('pesquisa')
addressbook = carrega_addressbooks()
contato = procura(campo, addressbook)
if contato is None:
flash('Contato não encontrado. Tente novamente.', category='error')
return redirect(url_for('views.home'))
return render_template('pesquisa.html', user=current_user, contato=contato)
return render_template('home.html', user=current_user, addressbook=carrega_addressbooks())
def lista_jinja(dicionario, campo):
"""
Lista os contatos de acordo com o tipo de campo introduzido e retorna-os numa lista.
:param dicionario:
:param campo:
:return list_listagem:
"""
nome_user = str(current_user.name.lower())
list_listagem = []
for contato in dicionario[nome_user]:
list_listagem.append(contato[campo])
return list_listagem
@views.route('/listar', methods=['GET', 'POST'])
@login_required
def listar():
if request.method == 'POST':
addressbook = carrega_addressbooks()
campo = request.form.get('campo')
list_listagem = lista_jinja(addressbook, campo)
return render_template('listagem.html', user=current_user, listagem=list_listagem)
return render_template('listagem_form.html', user=current_user)
@views.route('/inserir', methods=['GET', 'POST'])
@login_required
def inserir():
if request.method == "POST":
name = request.form.get('name')
email = request.form.get('email')
number = request.form.get('number')
address = request.form.get('address')
new_contact = ClassFactory.build_obj(2, name, email, number, address)
addressbook = carrega_addressbooks()
nome_user = str(current_user.name.lower())
for nome, contatos in addressbook.items():
if nome == nome_user:
contatos.append(new_contact.to_json())
guarda_addressbooks(addressbook)
flash('Contato adicionado!', category='sucess')
return redirect(url_for('views.home'))
return render_template('inserir.html', user=current_user, addressbook=carrega_addressbooks())
@views.route('/update', methods=['GET', 'POST'])
@views.route('/atualizar', methods=['GET', 'POST'])
@login_required
def atualizar():
if request.method == "POST":
name = request.form.get('name')
email = request.form.get('email')
number = request.form.get('number')
address = request.form.get('address')
contact = ClassFactory.build_obj(2, name, email, number, address)
return redirect(url_for('views.atualizar2', contato=contact.to_json()))
return render_template('atualizar.html', user=current_user, addressbook=carrega_addressbooks())
@views.route('/update2/<contato>', methods=['GET', 'POST'])
@views.route('/atualizar2/<contato>', methods=['GET', 'POST'])
@login_required
def atualizar2(contato):
if request.method == 'POST':
name = request.form.get('name')
email = request.form.get('email')
number = request.form.get('number')
address = request.form.get('address')
new_contact = ClassFactory.build_obj(2, name, email, number, address)
addressbook = carrega_addressbooks()
nome_user = str(current_user.name.lower())
for nome, contatos in addressbook.items():
if nome == nome_user:
for contact in contatos:
if str(contact) == str(contato):
print('==LOOP PARA ATUALIZAR!==')
contatos.remove(contact)
contatos.append(new_contact.to_json())
guarda_addressbooks(addressbook)
return redirect(url_for('views.home'))
return render_template('atualizar2.html', user=current_user)
@views.route('/delete-contact', methods=['POST'])
def deletecontact():
dado = json.loads(request.data)
contact = dado['contato']
addressbook = carrega_addressbooks()
nome_user = str(current_user.name.lower())
for nome, contatos in addressbook.items():
if nome == nome_user:
contatos.remove(contact)
guarda_addressbooks(addressbook)
return jsonify({})
| [
"ruannyury1@outlook.com"
] | ruannyury1@outlook.com |
ef907923a1970b33a70abe7364cdcf42e701a3d2 | 3cea6c6664d9489b4cfb33ea8580f8189b5839ff | /torchex/nn/modules/padding.py | ca8bc82e42fac577d1304747aa66ed99bb511ce6 | [
"MIT"
] | permissive | tarokiritani/torchex | 81c24457337bdbf6ad103dd9ded5488b69b468bd | 5e9d8f7f08a3931c2271e108d73226b1ee6b3efa | refs/heads/master | 2020-04-12T17:55:02.960878 | 2018-12-14T09:37:46 | 2018-12-14T09:37:46 | 162,661,997 | 0 | 0 | null | 2018-12-21T03:40:19 | 2018-12-21T03:40:19 | null | UTF-8 | Python | false | false | 2,265 | py | import torch
import torch.nn as nn
class PeriodicPad2d(nn.Module):
"""
:params torch.Tensor input: Input(B, C, W, H)
# https://github.com/ZichaoLong/aTEAM/blob/master/nn/functional/utils.py
"""
def __init__(self,
pad_left: int=0, pad_right: int=0,
pad_top: int=0, pad_bottom: int=0):
super(PeriodicPad2d, self).__init__()
self.__doc__ = 'hello'
self.pad_left = pad_left
self.pad_right = pad_right
self.pad_top = pad_top
self.pad_bottom = pad_bottom
def forward(self, input):
assert input.dim() == 4, 'only support Input(B, C, W, H) or Input(B, C, H, W)'
B, C, H, W = input.size()
left_pad = input[:, :, :, W-(self.pad_left):]
right_pad = input[:, :, :, :self.pad_right]
input = torch.cat([left_pad, input, right_pad], dim=3)
top_pad = input[:, :, H-(self.pad_top):, :]
bottom_pad = input[:, :, :self.pad_bottom, :]
input = torch.cat([top_pad, input, bottom_pad], dim=2)
return input
class PeriodicPad3d(nn.Module):
'''
Only support isotropic padding
'''
def __init__(self, pad: int=0):
super(PeriodicPad3d, self).__init__()
self.pad = pad
def forward(self, input):
'''
:params torch.Tensor input: Input(B, C, D, W, H)
'''
assert input.dim() == 5, 'only support Input(B, C, D, W, H)'
B, C, D, H, W = input.size()
pad_0 = input[:, :, D-(self.pad):, :, :]
pad_1 = input[:, :, :self.pad, :, :]
input = torch.cat([pad_0, input, pad_1], dim=2)
pad_0 = input[:, :, :, H-(self.pad):, :]
pad_1 = input[:, :, :, :self.pad, :]
input = torch.cat([pad_0, input, pad_1], dim=3)
pad_0 = input[:, :, :, :, W-(self.pad):]
pad_1 = input[:, :, :, :, :self.pad]
input = torch.cat([pad_0, input, pad_1], dim=4)
return input
if __name__ == '__main__':
x = torch.range(1, 25).view(1, 1, 5, 5)
print(x)
pad = PeriodicPad2d(2, 2, 2, 1)
print(pad(x))
print(pad(x).shape)
x = torch.range(1, 27).view(1, 1, 3, 3, 3)
pad = PeriodicPad3d(1)
print(pad(x))
| [
"kbu94982@gmail.com"
] | kbu94982@gmail.com |
f303c4c5c52b859986065ba36976c2cd24f5fa30 | 4e8e9ed2a8fb69ed8b46066a8d967e4c107013a4 | /main/control/comment.py | 74b22b2e72d524f3e59cb31990a4cf5d1b395682 | [
"MIT"
] | permissive | welovecoding/vote4code | a57b3d155096d362dca47587ad2985b4201ef036 | be265d553af35dc6c5322ecb3f7d5b3cf7691b75 | refs/heads/master | 2021-08-11T22:46:40.884030 | 2019-11-15T16:15:05 | 2019-11-15T16:15:05 | 90,191,931 | 14 | 0 | MIT | 2021-08-10T22:50:49 | 2017-05-03T20:46:02 | Python | UTF-8 | Python | false | false | 5,801 | py | # coding: utf-8
from google.appengine.ext import ndb
import flask
import flask_wtf
import wtforms
import auth
import config
import model
import util
from main import app
###############################################################################
# Update
###############################################################################
class CommentUpdateForm(flask_wtf.FlaskForm):
content = wtforms.TextAreaField(
model.Comment.content._verbose_name,
[wtforms.validators.required()],
filters=[util.strip_filter],
)
post_key = wtforms.SelectField(
model.Comment.post_key._verbose_name,
[wtforms.validators.required()],
choices=[],
)
@app.route('/comment/create/', methods=['GET', 'POST'])
@app.route('/comment/<int:comment_id>/update/', methods=['GET', 'POST'])
@auth.login_required
def comment_update(comment_id=0):
if comment_id:
comment_db = model.Comment.get_by_id(comment_id)
else:
comment_db = model.Comment(user_key=auth.current_user_key())
if not comment_db or comment_db.user_key != auth.current_user_key():
flask.abort(404)
form = CommentUpdateForm(obj=comment_db)
user_dbs, user_cursor = model.User.get_dbs(limit=-1)
post_dbs, post_cursor = model.Post.get_dbs(limit=-1)
form.post_key.choices = [(c.key.urlsafe(), c.title) for c in post_dbs]
if flask.request.method == 'GET' and not form.errors:
form.post_key.data = comment_db.post_key.urlsafe() if comment_db.post_key else None
if form.validate_on_submit():
form.post_key.data = ndb.Key(urlsafe=form.post_key.data) if form.post_key.data else None
form.populate_obj(comment_db)
comment_db.put()
return flask.redirect(flask.url_for('comment_view', comment_id=comment_db.key.id()))
return flask.render_template(
'comment/comment_update.html',
title=comment_db.content if comment_id else 'New Comment',
html_class='comment-update',
form=form,
comment_db=comment_db,
)
###############################################################################
# List
###############################################################################
@app.route('/comment/')
def comment_list():
comment_dbs, comment_cursor = model.Comment.get_dbs()
return flask.render_template(
'comment/comment_list.html',
html_class='comment-list',
title='Comment List',
comment_dbs=comment_dbs,
next_url=util.generate_next_url(comment_cursor),
api_url=flask.url_for('api.comment.list'),
)
###############################################################################
# View
###############################################################################
@app.route('/comment/<int:comment_id>/')
def comment_view(comment_id):
comment_db = model.Comment.get_by_id(comment_id)
if not comment_db:
flask.abort(404)
return flask.render_template(
'comment/comment_view.html',
html_class='comment-view',
title=comment_db.content,
comment_db=comment_db,
api_url=flask.url_for('api.comment', comment_key=comment_db.key.urlsafe() if comment_db.key else ''),
)
###############################################################################
# Admin List
###############################################################################
@app.route('/admin/comment/')
@auth.admin_required
def admin_comment_list():
comment_dbs, comment_cursor = model.Comment.get_dbs(
order=util.param('order') or '-modified',
)
return flask.render_template(
'comment/admin_comment_list.html',
html_class='admin-comment-list',
title='Comment List',
comment_dbs=comment_dbs,
next_url=util.generate_next_url(comment_cursor),
api_url=flask.url_for('api.admin.comment.list'),
)
###############################################################################
# Admin Update
###############################################################################
class CommentUpdateAdminForm(CommentUpdateForm):
pass
@app.route('/admin/comment/create/', methods=['GET', 'POST'])
@app.route('/admin/comment/<int:comment_id>/update/', methods=['GET', 'POST'])
@auth.admin_required
def admin_comment_update(comment_id=0):
if comment_id:
comment_db = model.Comment.get_by_id(comment_id)
else:
comment_db = model.Comment(user_key=auth.current_user_key())
if not comment_db:
flask.abort(404)
form = CommentUpdateAdminForm(obj=comment_db)
user_dbs, user_cursor = model.User.get_dbs(limit=-1)
post_dbs, post_cursor = model.Post.get_dbs(limit=-1)
form.post_key.choices = [(c.key.urlsafe(), c.title) for c in post_dbs]
if flask.request.method == 'GET' and not form.errors:
form.post_key.data = comment_db.post_key.urlsafe() if comment_db.post_key else None
if form.validate_on_submit():
form.post_key.data = ndb.Key(urlsafe=form.post_key.data) if form.post_key.data else None
form.populate_obj(comment_db)
comment_db.put()
return flask.redirect(flask.url_for('admin_comment_list', order='-modified'))
return flask.render_template(
'comment/admin_comment_update.html',
title=comment_db.content,
html_class='admin-comment-update',
form=form,
comment_db=comment_db,
back_url_for='admin_comment_list',
api_url=flask.url_for('api.admin.comment', comment_key=comment_db.key.urlsafe() if comment_db.key else ''),
)
###############################################################################
# Admin Delete
###############################################################################
@app.route('/admin/comment/<int:comment_id>/delete/', methods=['POST'])
@auth.admin_required
def admin_comment_delete(comment_id):
comment_db = model.Comment.get_by_id(comment_id)
comment_db.key.delete()
flask.flash('Comment deleted.', category='success')
return flask.redirect(flask.url_for('admin_comment_list'))
| [
"lipiridis@gmail.com"
] | lipiridis@gmail.com |
224115799dcddd421f082f520cd9f670ef3cd9cc | e81fabdd6988c787524755fac73aa9d3631fc64c | /tests/test_ops/test_early_stopping.py | 286560c5fd38fb4cc2edbac48b85b01eeecdd9e7 | [
"MIT"
] | permissive | granularai/polyaxon-schemas | 0aa06f15b7353ceb6d31f1e5cf63c269ab0e2ce4 | 017ae74701f21f12f0b25e75379681ea5d8baa9e | refs/heads/master | 2022-08-30T00:05:40.888476 | 2020-05-19T17:22:46 | 2020-05-19T17:22:46 | 265,312,701 | 0 | 0 | MIT | 2020-05-19T17:16:38 | 2020-05-19T17:16:37 | null | UTF-8 | Python | false | false | 1,874 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from tests.utils import assert_equal_dict
from polyaxon_schemas.ops.group.early_stopping_policies import EarlyStoppingConfig
from polyaxon_schemas.ops.group.metrics import Optimization
class TestEarlyStoppingConfigs(TestCase):
def test_early_stopping(self):
config_dict = {
'metric': 'loss',
'value': 0.1,
}
config = EarlyStoppingConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert config_to_dict.pop('optimization') == Optimization.MAXIMIZE
assert_equal_dict(config_to_dict, config_dict)
def test_early_stopping_with_median_policy(self):
config_dict = {
'metric': 'loss',
'value': 0.1,
'optimization': Optimization.MINIMIZE,
'policy': {'kind': 'median', 'evaluation_interval': 1}
}
config = EarlyStoppingConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
def test_early_stopping_with_average_policy(self):
config_dict = {
'metric': 'loss',
'value': 0.1,
'optimization': Optimization.MINIMIZE,
'policy': {'kind': 'average', 'evaluation_interval': 1}
}
config = EarlyStoppingConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
def test_early_stopping_with_truncation_policy(self):
config_dict = {
'metric': 'loss',
'value': 0.1,
'optimization': Optimization.MAXIMIZE,
'policy': {'kind': 'truncation', 'percent': 50, 'evaluation_interval': 1}
}
config = EarlyStoppingConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
9ff9b1b4f5e88031f1b4c71bf900b366103e5a6f | b67efb7ac1832f2a70aa570f8025c69498a8cd71 | /pgoapi/protos/POGOProtos/Data/Logs/FortSearchLogEntry_pb2.py | 7469fad7bf20a643ec48fffd8c8889493a9bf5e5 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | PogoHop/pgoapi-hsvr | f1513d7548075a7defd21f1018bd59afcb79d78f | b5761159e0240bbb81ef6c257fe2eb1bc1ce2d47 | refs/heads/master | 2021-01-12T11:17:55.334203 | 2016-11-05T12:48:38 | 2016-11-05T12:48:38 | 72,892,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,709 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/Logs/FortSearchLogEntry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory.Item import ItemData_pb2 as POGOProtos_dot_Inventory_dot_Item_dot_ItemData__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/Logs/FortSearchLogEntry.proto',
package='POGOProtos.Data.Logs',
syntax='proto3',
serialized_pb=_b('\n-POGOProtos/Data/Logs/FortSearchLogEntry.proto\x12\x14POGOProtos.Data.Logs\x1a(POGOProtos/Inventory/Item/ItemData.proto\"\xca\x01\n\x12\x46ortSearchLogEntry\x12?\n\x06result\x18\x01 \x01(\x0e\x32/.POGOProtos.Data.Logs.FortSearchLogEntry.Result\x12\x0f\n\x07\x66ort_id\x18\x02 \x01(\t\x12\x32\n\x05items\x18\x03 \x03(\x0b\x32#.POGOProtos.Inventory.Item.ItemData\x12\x0c\n\x04\x65ggs\x18\x04 \x01(\x05\" \n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_Item_dot_ItemData__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FORTSEARCHLOGENTRY_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Data.Logs.FortSearchLogEntry.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=284,
serialized_end=316,
)
_sym_db.RegisterEnumDescriptor(_FORTSEARCHLOGENTRY_RESULT)
_FORTSEARCHLOGENTRY = _descriptor.Descriptor(
name='FortSearchLogEntry',
full_name='POGOProtos.Data.Logs.FortSearchLogEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Data.Logs.FortSearchLogEntry.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fort_id', full_name='POGOProtos.Data.Logs.FortSearchLogEntry.fort_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='items', full_name='POGOProtos.Data.Logs.FortSearchLogEntry.items', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eggs', full_name='POGOProtos.Data.Logs.FortSearchLogEntry.eggs', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FORTSEARCHLOGENTRY_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=114,
serialized_end=316,
)
_FORTSEARCHLOGENTRY.fields_by_name['result'].enum_type = _FORTSEARCHLOGENTRY_RESULT
_FORTSEARCHLOGENTRY.fields_by_name['items'].message_type = POGOProtos_dot_Inventory_dot_Item_dot_ItemData__pb2._ITEMDATA
_FORTSEARCHLOGENTRY_RESULT.containing_type = _FORTSEARCHLOGENTRY
DESCRIPTOR.message_types_by_name['FortSearchLogEntry'] = _FORTSEARCHLOGENTRY
FortSearchLogEntry = _reflection.GeneratedProtocolMessageType('FortSearchLogEntry', (_message.Message,), dict(
DESCRIPTOR = _FORTSEARCHLOGENTRY,
__module__ = 'POGOProtos.Data.Logs.FortSearchLogEntry_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Logs.FortSearchLogEntry)
))
_sym_db.RegisterMessage(FortSearchLogEntry)
# @@protoc_insertion_point(module_scope)
| [
"hoptional@gmail.com"
] | hoptional@gmail.com |
8f6fd8ea81510568275d7b78dfb4006186d7155e | 872c4a47c53cd81ddb1e8b6e3645c67119153d5d | /Reverse Integer.py | ad0436d3d8245245baccedd6429630598d773e01 | [] | no_license | Zahidsqldba07/codesignal_and_leetcode | c58d4714dcdcda76ed43452620a4adb7b9d2be5d | 3d35d711d6edcfb71f49378222a1f57229001dac | refs/heads/master | 2023-08-28T01:13:49.410945 | 2021-10-22T06:22:40 | 2021-10-22T06:22:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | # Given a signed 32-bit integer x, return x with its digits reversed.
# If reversing x causes the value to go outside the signed 32-bit integer range [-231, 231 - 1], then return 0.
# Assume the environment does not allow you to store 64-bit integers (signed or unsigned).
class Solution:
def reverse(self, x: int) -> int:
i = len(str(x)) - 1
reversed = 0
b = False
if x < 0:
x = -1 * x
i = len(str(x)) - 1
b = True
while i >= 0:
current_num = x % 10
reversed = reversed + current_num * pow(10, i)
x = x // 10
i = i - 1
if pow(2, 31) * -1 <= reversed <= pow(2, 31) - 1:
if b:
return reversed * -1
else:
return reversed
else:
return 0 | [
"danieldavtyan99@gmail.com"
] | danieldavtyan99@gmail.com |
67dcd3ec7cdb0cc71d9f3b762d542f02506fbeb3 | 49ba5356bdc5df7dd9803b56fe507c5164a90716 | /surface-area-of-3d-shapes/solution.py | a1de598aa85c92a605d01dfaf2403263d9ecf1e5 | [] | no_license | uxlsl/leetcode_practice | d80ad481c9d8ee71cce0f3c66e98446ced149635 | d8ed762d1005975f0de4f07760c9671195621c88 | refs/heads/master | 2021-04-25T18:12:28.136504 | 2020-03-11T07:54:15 | 2020-03-11T07:54:15 | 121,472,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | # leetcode
# https://leetcode-cn.com/problems/surface-area-of-3d-shapes/
# 解法:
# 求六个方向的表面积
class Solution(object):
def surfaceArea(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
N = len(grid)
area = 0
# xy 正反面
for i in range(N):
h = 0
v = 0
for j in range(N):
if grid[i][j] > 0:
area += 2
if grid[i][j] > h:
h = grid[i][j]
if grid[j][i] > v:
v = grid[j][i]
if j > 0 and j+1 < N and grid[i][j-1] > grid[i][j] < grid[i][j+1]:
area +=2*(grid[i][j-1] - grid[i][j])
if i > 0 and i+1 < N and grid[i-1][j] > grid[i][j] < grid[i+1][j]:
area +=2*(grid[i-1][j] - grid[i][j])
area += 2*v
area += 2*h
return area
class Solution(object):
def surfaceArea(self, grid):
N = len(grid)
ans = 0
for r in xrange(N):
for c in xrange(N):
if grid[r][c]:
ans += 2
for nr, nc in ((r-1, c), (r+1, c), (r, c-1), (r,c+1)):
if 0 <= nr < N and 0 <= nc < N:
nval = grid[nr][nc]
else:
nval = 0
ans += max(grid[r][c] - nval, 0)
return ans
| [
"songlin.lin@yunfangdata.com"
] | songlin.lin@yunfangdata.com |
d18e1388e6202f3e3374a6deb68cdb51baa1c64d | f25185fcf2361463a8a12605843fe05a053f6488 | /NeuralCRF/utils/data.py | 1b3d5a6dbaa5f3de16b8170d6960a15c3dac486c | [
"Apache-2.0"
] | permissive | evijit/ledam | 13373c92e8b5354d902721fb4d948c3380e17d45 | a0003cd95ee8a8954581db0bd42a09dc6b1da92f | refs/heads/master | 2021-06-22T10:52:44.358450 | 2020-12-18T18:56:18 | 2020-12-18T18:56:18 | 141,115,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,107 | py | # -*- coding: utf-8 -*-
# @Author: Jie
# @Date: 2017-06-14 17:34:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2018-06-22 00:01:47
from __future__ import print_function
from __future__ import absolute_import
import sys
from .alphabet import Alphabet
from .functions import *
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
START = "</s>"
UNKNOWN = "</unk>"
PADDING = "</pad>"
class Data:
def __init__(self):
self.MAX_SENTENCE_LENGTH = 250
self.MAX_WORD_LENGTH = -1
self.number_normalized = True
self.norm_word_emb = False
self.norm_char_emb = False
self.word_alphabet = Alphabet('word')
self.char_alphabet = Alphabet('character')
self.feature_name = []
self.feature_alphabets = []
self.feature_num = len(self.feature_alphabets)
self.feat_config = None
self.label_alphabet = Alphabet('label',True)
self.tagScheme = "NoSeg" ## BMES/BIO
self.seg = True
### I/O
self.train_dir = None
self.dev_dir = None
self.test_dir = None
self.raw_dir = None
self.decode_dir = None
self.dset_dir = None ## data vocabulary related file
self.model_dir = None ## model save file
self.load_model_dir = None ## model load file
self.word_emb_dir = None
self.char_emb_dir = None
self.feature_emb_dirs = []
self.train_texts = []
self.dev_texts = []
self.test_texts = []
self.raw_texts = []
self.train_Ids = []
self.dev_Ids = []
self.test_Ids = []
self.raw_Ids = []
self.pretrain_word_embedding = None
self.pretrain_char_embedding = None
self.pretrain_feature_embeddings = []
self.label_size = 0
self.word_alphabet_size = 0
self.char_alphabet_size = 0
self.label_alphabet_size = 0
self.feature_alphabet_sizes = []
self.feature_emb_dims = []
self.norm_feature_embs = []
self.word_emb_dim = 50
self.char_emb_dim = 30
###Networks
self.word_feature_extractor = "LSTM" ## "LSTM"/"CNN"/"GRU"/
self.use_char = True
self.char_feature_extractor = "CNN" ## "LSTM"/"CNN"/"GRU"/None
self.use_crf = True
self.nbest = None
## Training
self.average_batch_loss = False
self.optimizer = "SGD" ## "SGD"/"AdaGrad"/"AdaDelta"/"RMSProp"/"Adam"
self.status = "train"
### Hyperparameters
self.HP_cnn_layer = 4
self.HP_iteration = 100
self.HP_batch_size = 10
self.HP_char_hidden_dim = 50
self.HP_hidden_dim = 200
self.HP_dropout = 0.5
self.HP_lstm_layer = 1
self.HP_bilstm = True
self.HP_gpu = False
self.HP_lr = 0.015
self.HP_lr_decay = 0.05
self.HP_clip = None
self.HP_momentum = 0
self.HP_l2 = 1e-8
def show_data_summary(self):
print("++"*50)
print("DATA SUMMARY START:")
print(" I/O:")
print(" Tag scheme: %s"%(self.tagScheme))
print(" MAX SENTENCE LENGTH: %s"%(self.MAX_SENTENCE_LENGTH))
print(" MAX WORD LENGTH: %s"%(self.MAX_WORD_LENGTH))
print(" Number normalized: %s"%(self.number_normalized))
print(" Word alphabet size: %s"%(self.word_alphabet_size))
print(" Char alphabet size: %s"%(self.char_alphabet_size))
print(" Label alphabet size: %s"%(self.label_alphabet_size))
print(" Word embedding dir: %s"%(self.word_emb_dir))
print(" Char embedding dir: %s"%(self.char_emb_dir))
print(" Word embedding size: %s"%(self.word_emb_dim))
print(" Char embedding size: %s"%(self.char_emb_dim))
print(" Norm word emb: %s"%(self.norm_word_emb))
print(" Norm char emb: %s"%(self.norm_char_emb))
print(" Train file directory: %s"%(self.train_dir))
print(" Dev file directory: %s"%(self.dev_dir))
print(" Test file directory: %s"%(self.test_dir))
print(" Raw file directory: %s"%(self.raw_dir))
print(" Dset file directory: %s"%(self.dset_dir))
print(" Model file directory: %s"%(self.model_dir))
print(" Loadmodel directory: %s"%(self.load_model_dir))
print(" Decode file directory: %s"%(self.decode_dir))
print(" Train instance number: %s"%(len(self.train_texts)))
print(" Dev instance number: %s"%(len(self.dev_texts)))
print(" Test instance number: %s"%(len(self.test_texts)))
print(" Raw instance number: %s"%(len(self.raw_texts)))
print(" FEATURE num: %s"%(self.feature_num))
for idx in range(self.feature_num):
print(" Fe: %s alphabet size: %s"%(self.feature_alphabets[idx].name, self.feature_alphabet_sizes[idx]))
print(" Fe: %s embedding dir: %s"%(self.feature_alphabets[idx].name, self.feature_emb_dirs[idx]))
print(" Fe: %s embedding size: %s"%(self.feature_alphabets[idx].name, self.feature_emb_dims[idx]))
print(" Fe: %s norm emb: %s"%(self.feature_alphabets[idx].name, self.norm_feature_embs[idx]))
print(" "+"++"*20)
print(" Model Network:")
print(" Model use_crf: %s"%(self.use_crf))
print(" Model word extractor: %s"%(self.word_feature_extractor))
print(" Model use_char: %s"%(self.use_char))
if self.use_char:
print(" Model char extractor: %s"%(self.char_feature_extractor))
print(" Model char_hidden_dim: %s"%(self.HP_char_hidden_dim))
print(" "+"++"*20)
print(" Training:")
print(" Optimizer: %s"%(self.optimizer))
print(" Iteration: %s"%(self.HP_iteration))
print(" BatchSize: %s"%(self.HP_batch_size))
print(" Average batch loss: %s"%(self.average_batch_loss))
print(" "+"++"*20)
print(" Hyperparameters:")
print(" Hyper lr: %s"%(self.HP_lr))
print(" Hyper lr_decay: %s"%(self.HP_lr_decay))
print(" Hyper HP_clip: %s"%(self.HP_clip))
print(" Hyper momentum: %s"%(self.HP_momentum))
print(" Hyper l2: %s"%(self.HP_l2))
print(" Hyper hidden_dim: %s"%(self.HP_hidden_dim))
print(" Hyper dropout: %s"%(self.HP_dropout))
print(" Hyper lstm_layer: %s"%(self.HP_lstm_layer))
print(" Hyper bilstm: %s"%(self.HP_bilstm))
print(" Hyper GPU: %s"%(self.HP_gpu))
print("DATA SUMMARY END.")
print("++"*50)
sys.stdout.flush()
def initial_feature_alphabets(self):
items = open(self.train_dir,'r',encoding="ISO-8859-1").readline().strip('\n').split()
total_column = len(items)
if total_column > 2:
for idx in range(1, total_column-1):
feature_prefix = items[idx].split(']',1)[0]+"]"
self.feature_alphabets.append(Alphabet(feature_prefix))
self.feature_name.append(feature_prefix)
print("Find feature: ", feature_prefix)
self.feature_num = len(self.feature_alphabets)
self.pretrain_feature_embeddings = [None]*self.feature_num
self.feature_emb_dims = [20]*self.feature_num
self.feature_emb_dirs = [None]*self.feature_num
self.norm_feature_embs = [False]*self.feature_num
self.feature_alphabet_sizes = [0]*self.feature_num
if self.feat_config:
for idx in range(self.feature_num):
if self.feature_name[idx] in self.feat_config:
self.feature_emb_dims[idx] = self.feat_config[self.feature_name[idx]]['emb_size']
self.feature_emb_dirs[idx] = self.feat_config[self.feature_name[idx]]['emb_dir']
self.norm_feature_embs[idx] = self.feat_config[self.feature_name[idx]]['emb_norm']
# exit(0)
def build_alphabet(self, input_file):
in_lines = open(input_file,'r',encoding="ISO-8859-1").readlines()
for line in in_lines:
if len(line) > 2:
pairs = line.strip().split()
word = pairs[0]
if sys.version_info[0] < 3:
word = word.decode('utf-8')
if self.number_normalized:
word = normalize_word(word)
label = pairs[-1]
self.label_alphabet.add(label)
self.word_alphabet.add(word)
## build feature alphabet
for idx in range(self.feature_num):
feat_idx = pairs[idx+1].split(']',1)[-1]
self.feature_alphabets[idx].add(feat_idx)
for char in word:
self.char_alphabet.add(char)
self.word_alphabet_size = self.word_alphabet.size()
self.char_alphabet_size = self.char_alphabet.size()
self.label_alphabet_size = self.label_alphabet.size()
for idx in range(self.feature_num):
self.feature_alphabet_sizes[idx] = self.feature_alphabets[idx].size()
startS = False
startB = False
for label,_ in self.label_alphabet.iteritems():
if "S-" in label.upper():
startS = True
elif "B-" in label.upper():
startB = True
if startB:
if startS:
self.tagScheme = "BMES"
else:
self.tagScheme = "BIO"
def fix_alphabet(self):
self.word_alphabet.close()
self.char_alphabet.close()
self.label_alphabet.close()
for idx in range(self.feature_num):
self.feature_alphabets[idx].close()
def build_pretrain_emb(self):
if self.word_emb_dir:
print("Load pretrained word embedding, norm: %s, dir: %s"%(self.norm_word_emb, self.word_emb_dir))
self.pretrain_word_embedding, self.word_emb_dim = build_pretrain_embedding(self.word_emb_dir, self.word_alphabet, self.word_emb_dim, self.norm_word_emb)
if self.char_emb_dir:
print("Load pretrained char embedding, norm: %s, dir: %s"%(self.norm_char_emb, self.char_emb_dir))
self.pretrain_char_embedding, self.char_emb_dim = build_pretrain_embedding(self.char_emb_dir, self.char_alphabet, self.char_emb_dim, self.norm_char_emb)
for idx in range(self.feature_num):
if self.feature_emb_dirs[idx]:
print("Load pretrained feature %s embedding:, norm: %s, dir: %s"%(self.feature_name[idx], self.norm_feature_embs[idx], self.feature_emb_dirs[idx]))
self.pretrain_feature_embeddings[idx], self.feature_emb_dims[idx] = build_pretrain_embedding(self.feature_emb_dirs[idx], self.feature_alphabets[idx], self.feature_emb_dims[idx], self.norm_feature_embs[idx])
def generate_instance(self, name):
self.fix_alphabet()
if name == "train":
self.train_texts, self.train_Ids = read_instance(self.train_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH)
elif name == "dev":
self.dev_texts, self.dev_Ids = read_instance(self.dev_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH)
elif name == "test":
self.test_texts, self.test_Ids = read_instance(self.test_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH)
elif name == "raw":
self.raw_texts, self.raw_Ids = read_instance(self.raw_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH)
else:
print("Error: you can only generate train/dev/test instance! Illegal input:%s"%(name))
def write_decoded_results(self, predict_results, name):
fout = open(self.decode_dir,'w')
sent_num = len(predict_results)
content_list = []
if name == 'raw':
content_list = self.raw_texts
elif name == 'test':
content_list = self.test_texts
elif name == 'dev':
content_list = self.dev_texts
elif name == 'train':
content_list = self.train_texts
else:
print("Error: illegal name during writing predict result, name should be within train/dev/test/raw !")
assert(sent_num == len(content_list))
for idx in range(sent_num):
sent_length = len(predict_results[idx])
for idy in range(sent_length):
## content_list[idx] is a list with [word, char, label]
fout.write(content_list[idx][0][idy].encode('utf-8') + " " + predict_results[idx][idy] + '\n')
fout.write('\n')
fout.close()
print("Predict %s result has been written into file. %s"%(name, self.decode_dir))
def load(self,data_file):
f = open(data_file, 'rb')
tmp_dict = pickle.load(f)
f.close()
self.__dict__.update(tmp_dict)
def save(self,save_file):
f = open(save_file, 'wb')
pickle.dump(self.__dict__, f, 2)
f.close()
def write_nbest_decoded_results(self, predict_results, pred_scores, name):
## predict_results : [whole_sent_num, nbest, each_sent_length]
## pred_scores: [whole_sent_num, nbest]
fout = open(self.decode_dir,'w')
sent_num = len(predict_results)
content_list = []
if name == 'raw':
content_list = self.raw_texts
elif name == 'test':
content_list = self.test_texts
elif name == 'dev':
content_list = self.dev_texts
elif name == 'train':
content_list = self.train_texts
else:
print("Error: illegal name during writing predict result, name should be within train/dev/test/raw !")
assert(sent_num == len(content_list))
assert(sent_num == len(pred_scores))
for idx in range(sent_num):
sent_length = len(predict_results[idx][0])
nbest = len(predict_results[idx])
score_string = "# "
for idz in range(nbest):
score_string += format(pred_scores[idx][idz], '.4f')+" "
fout.write(score_string.strip() + "\n")
for idy in range(sent_length):
try: # Will fail with python3
label_string = content_list[idx][0][idy].encode('utf-8') + " "
except:
label_string = content_list[idx][0][idy] + " "
for idz in range(nbest):
label_string += predict_results[idx][idz][idy]+" "
label_string = label_string.strip() + "\n"
fout.write(label_string)
fout.write('\n')
fout.close()
print("Predict %s %s-best result has been written into file. %s"%(name,nbest, self.decode_dir))
def read_config(self,config_file):
config = config_file_to_dict(config_file)
## read data:
the_item = 'train_dir'
if the_item in config:
self.train_dir = config[the_item]
the_item = 'dev_dir'
if the_item in config:
self.dev_dir = config[the_item]
the_item = 'test_dir'
if the_item in config:
self.test_dir = config[the_item]
the_item = 'raw_dir'
if the_item in config:
self.raw_dir = config[the_item]
the_item = 'decode_dir'
if the_item in config:
self.decode_dir = config[the_item]
the_item = 'dset_dir'
if the_item in config:
self.dset_dir = config[the_item]
the_item = 'model_dir'
if the_item in config:
self.model_dir = config[the_item]
the_item = 'load_model_dir'
if the_item in config:
self.load_model_dir = config[the_item]
the_item = 'word_emb_dir'
if the_item in config:
self.word_emb_dir = config[the_item]
the_item = 'char_emb_dir'
if the_item in config:
self.char_emb_dir = config[the_item]
the_item = 'MAX_SENTENCE_LENGTH'
if the_item in config:
self.MAX_SENTENCE_LENGTH = int(config[the_item])
the_item = 'MAX_WORD_LENGTH'
if the_item in config:
self.MAX_WORD_LENGTH = int(config[the_item])
the_item = 'norm_word_emb'
if the_item in config:
self.norm_word_emb = str2bool(config[the_item])
the_item = 'norm_char_emb'
if the_item in config:
self.norm_char_emb = str2bool(config[the_item])
the_item = 'number_normalized'
if the_item in config:
self.number_normalized = str2bool(config[the_item])
the_item = 'seg'
if the_item in config:
self.seg = str2bool(config[the_item])
the_item = 'word_emb_dim'
if the_item in config:
self.word_emb_dim = int(config[the_item])
the_item = 'char_emb_dim'
if the_item in config:
self.char_emb_dim = int(config[the_item])
## read network:
the_item = 'use_crf'
if the_item in config:
self.use_crf = str2bool(config[the_item])
the_item = 'use_char'
if the_item in config:
self.use_char = str2bool(config[the_item])
the_item = 'word_seq_feature'
if the_item in config:
self.word_feature_extractor = config[the_item]
the_item = 'char_seq_feature'
if the_item in config:
self.char_feature_extractor = config[the_item]
the_item = 'nbest'
if the_item in config:
self.nbest = int(config[the_item])
the_item = 'feature'
if the_item in config:
self.feat_config = config[the_item] ## feat_config is a dict
## read training setting:
the_item = 'optimizer'
if the_item in config:
self.optimizer = config[the_item]
the_item = 'ave_batch_loss'
if the_item in config:
self.average_batch_loss = str2bool(config[the_item])
the_item = 'status'
if the_item in config:
self.status = config[the_item]
## read Hyperparameters:
the_item = 'cnn_layer'
if the_item in config:
self.HP_cnn_layer = int(config[the_item])
the_item = 'iteration'
if the_item in config:
self.HP_iteration = int(config[the_item])
the_item = 'batch_size'
if the_item in config:
self.HP_batch_size = int(config[the_item])
the_item = 'char_hidden_dim'
if the_item in config:
self.HP_char_hidden_dim = int(config[the_item])
the_item = 'hidden_dim'
if the_item in config:
self.HP_hidden_dim = int(config[the_item])
the_item = 'dropout'
if the_item in config:
self.HP_dropout = float(config[the_item])
the_item = 'lstm_layer'
if the_item in config:
self.HP_lstm_layer = int(config[the_item])
the_item = 'bilstm'
if the_item in config:
self.HP_bilstm = str2bool(config[the_item])
the_item = 'gpu'
if the_item in config:
self.HP_gpu = str2bool(config[the_item])
the_item = 'learning_rate'
if the_item in config:
self.HP_lr = float(config[the_item])
the_item = 'lr_decay'
if the_item in config:
self.HP_lr_decay = float(config[the_item])
the_item = 'clip'
if the_item in config:
self.HP_clip = float(config[the_item])
the_item = 'momentum'
if the_item in config:
self.HP_momentum = float(config[the_item])
the_item = 'l2'
if the_item in config:
self.HP_l2 = float(config[the_item])
def config_file_to_dict(input_file):
config = {}
fins = open(input_file,'r').readlines()
for line in fins:
if len(line) > 0 and line[0] == "#":
continue
if "=" in line:
pair = line.strip().split('#',1)[0].split('=',1)
item = pair[0]
if item=="feature":
if item not in config:
feat_dict = {}
config[item]= feat_dict
feat_dict = config[item]
new_pair = pair[-1].split()
feat_name = new_pair[0]
one_dict = {}
one_dict["emb_dir"] = None
one_dict["emb_size"] = 10
one_dict["emb_norm"] = False
if len(new_pair) > 1:
for idx in range(1,len(new_pair)):
conf_pair = new_pair[idx].split('=')
if conf_pair[0] == "emb_dir":
one_dict["emb_dir"]=conf_pair[-1]
elif conf_pair[0] == "emb_size":
one_dict["emb_size"]=int(conf_pair[-1])
elif conf_pair[0] == "emb_norm":
one_dict["emb_norm"]=str2bool(conf_pair[-1])
feat_dict[feat_name] = one_dict
# print "feat",feat_dict
else:
if item in config:
print("Warning: duplicated config item found: %s, updated."%(pair[0]))
config[item] = pair[-1]
return config
def str2bool(string):
if string == "True" or string == "true" or string == "TRUE":
return True
else:
return False
| [
"avijitg22@gmail.com"
] | avijitg22@gmail.com |
60e90179d65c27f5cfc3f69124e458ba34c5ba8b | cdb60f4688bedae2b77cdeb5296f772e6d5af96b | /1144-Decrease_Elements_to_Make_Array_Zigzag.py | edb48730f2e2328a57c90a5968aa23453fdec240 | [] | no_license | FomalhautA/Algs | 2c5034dbae6a9c834fabd7a54ffeb32b56fc0138 | c7964d1b921d9a4e821abec02212a69b12891c83 | refs/heads/master | 2023-03-14T07:18:00.972102 | 2021-03-08T09:20:16 | 2021-03-08T09:20:16 | 275,773,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | import copy
class Solution(object):
def movesToMakeZigzag(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 1:
return 0
if len(nums) == 2:
return 0 if nums[0] != nums[1] else 1
temp = []
for peak in [True, False]:
nums_init = copy.deepcopy(nums)
count = 0
i = 0
while i < len(nums_init) - 1:
if peak:
if nums_init[i] <= nums_init[i + 1]:
count += nums_init[i + 1] - nums_init[i] + 1
nums_init[i + 1] = nums_init[i] - 1
else:
if nums_init[i] >= nums_init[i + 1]:
count += nums_init[i] - nums_init[i + 1] + 1
nums_init[i + 1] = nums_init[i] + 1
i += 1
peak = not peak
temp.append(count)
return min(temp)
if __name__ == '__main__':
s = Solution()
nums = [10, 4, 4, 10, 10, 6, 2, 3]
steps = s.movesToMakeZigzag(nums)
print(steps)
| [
"qinhf@taiyi-tech.com"
] | qinhf@taiyi-tech.com |
22952dde76746bc41fd526ad79a60b70f792c86d | 77eedce13cd5343dc5e3fb5806819519524c677b | /Lib/site-packages/ebcli/controllers/abort.py | 5422dfbb7f77c48ed11e37df45340a7b4e67e6f6 | [] | no_license | brianpiperato/Flask-SocialNetwork | ebd13007a88d19b07cc3b46530d64c7b0ed2fcb9 | 6006d7a6f812d3b32154e0d1bb793c7535464283 | refs/heads/master | 2022-11-07T15:48:19.723827 | 2019-11-05T15:35:54 | 2019-11-05T15:35:54 | 150,278,662 | 0 | 1 | null | 2022-10-25T04:13:05 | 2018-09-25T14:26:43 | Python | UTF-8 | Python | false | false | 1,913 | py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, prompts
from ..objects.exceptions import NotFoundError
from ..core import io
from ..lib import utils
from ..operations import abortops
class AbortController(AbstractBaseController):
class Meta:
label = 'abort'
description = strings['abort.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
def do_command(self):
app_name = self.get_app_name()
env_name = self.get_env_name(noerror=True)
provided_env_name = bool(self.app.pargs.environment_name)
if not provided_env_name:
# Ask interactively for an env to abort
envs = abortops.get_abortable_envs(app_name)
if len(envs) < 1:
raise NotFoundError(strings['abort.noabortableenvs'])
if len(envs) == 1:
# Don't ask for env, just abort only abortable environment
env_name = envs[0].name
else:
# Ask for env to abort
io.echo()
io.echo(prompts['abort.envprompt'])
env_name = utils.prompt_for_item_in_list(envs).name
else:
# Just do the abort if env_name is provided
pass
abortops.abort_operation(env_name)
| [
"piperatob1@gmail.com"
] | piperatob1@gmail.com |
9ecf37ebe0a84489aef5072cc5f52747306442c4 | 1281562143ae64bc3d4d874938ca5363c81a4de8 | /python/tools/simple-player-ctrl-remote | cd87a66926e243046ede11a46f7275a9a00a8b81 | [] | no_license | javier-lopez/learn | a5e46afe701ce6012f89fde93d5d78b1ee683dbf | c3072383aec50fd89ecc5bd5f68f6a3746959b0d | refs/heads/master | 2023-04-07T23:55:13.645601 | 2023-03-17T03:02:20 | 2023-03-17T03:02:20 | 673,834 | 99 | 28 | null | 2022-08-26T20:17:50 | 2010-05-18T20:07:45 | HTML | UTF-8 | Python | false | false | 15,573 | #!/usr/bin/env python
#usage: simple-player-ctrl-remote [options]
#based on mplayer-remote v0.0.4, http://www.gwenn.dk/mplayer-remote.html
import BaseHTTPServer, commands, sys, os, pwd, grp, socket, subprocess
from optparse import OptionParser
parser = OptionParser(description='%prog - a multimedia player web control', usage='%prog [options]')
parser.add_option("-p", "--port", dest="port", action="store", help='local port to bind (default 80).')
(options, args) = parser.parse_args()
if options.port:
port = int(options.port)
else:
port = 80
if port < 1024:
if not os.geteuid() == 0:
sys.exit(os.path.basename(__file__) + ": root permitions are necessary to bind to port " + str(port) + ", or use -p to specify a non privileged port.")
pagehead = """
<html><head>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<TITLE>Remote player ctrl</TITLE>
<style type="text/css" media="screen">
body { font-family: Arial, Helvetica, Sans-Serif;
font-size: 16px; background-color:black; color: gray;
min-height:540px; }
body[orient="portrait"] { min-height:540px; }
body[orient="landscape"] { min-height:400px; }width:100%;
a { text-decoration: none; display: block; }
table { width: 90%; border: 1px solid white; }
td { border: none; color: white; text-align: center;}
td.volbar { background-color: lightgray; color: black;}
table.nav { width: 100%; border: none; }
input.txt { width: 10%; height: 5%; font-size: 20px;
background-color:black; color:#fff; border: 1px solid white;
margin: 10px; }
input.icon {display:none}
.svg-icon { width: 2em; height: 2em; padding-right: 1em;}
.svg-icon path, .svg-icon polygon, .svg-icon rect { fill: #fff; }
</style>
<script>
function hideAddressBar() {
if(!window.location.hash) {
if(document.height < window.outerHeight) {
document.body.style.height = (window.outerHeight + 50) + 'px';
}
setTimeout( function(){ window.scrollTo(0, 1); }, 50 );
}
}
window.addEventListener("load", function(){ hideAddressBar(); });
window.addEventListener("orientationchange", hideAddressBar );
</script>
</head>
<body>
<center>
<h1>Player Control</h1>
"""
pagectrl = """
<br/><br/><form action="/control" method="get">
<input type="submit" value="|<" name="player" class="txt">
<input type="submit" value=">||" name="player" class="txt">
<input type="submit" value=">|" name="player" class="txt">
<br/>
<input type="submit" value="<<" name="player" class="txt">
<input type="submit" value="<" name="player" class="txt">
<input type="submit" value=">" name="player" class="txt">
<input type="submit" value=">>" name="player" class="txt">
<br/><h2>Volume</h2>
<label>
<input type="submit" value="-" name="volume" class="icon">
<svg class="svg-icon" viewBox="0 0 20 20">
<path d="M15.938,10c0,1.165 -0.305,2.319 -0.88,3.339c-0.074,0.129 -0.21,0.201 -0.347,0.201c-0.068,0 -0.134,-0.016 -0.197,-0.052c-0.191,-0.107 -0.259,-0.351 -0.149,-0.542c0.508,-0.9 0.776,-1.918 0.776,-2.946c0,-1.028 -0.269,-2.046 -0.776,-2.946c-0.109,-0.191 -0.042,-0.434 0.149,-0.542c0.193,-0.109 0.436,-0.042 0.544,0.149c0.576,1.02 0.88,2.173 0.88,3.339m-2.028,0c0,0.629 -0.119,1.237 -0.354,1.811c-0.063,0.153 -0.211,0.247 -0.368,0.247c-0.05,0 -0.102,-0.01 -0.151,-0.029c-0.203,-0.084 -0.301,-0.317 -0.217,-0.521c0.194,-0.476 0.294,-0.984 0.294,-1.508s-0.1,-1.032 -0.294,-1.508c-0.084,-0.203 0.014,-0.437 0.217,-0.52c0.203,-0.084 0.437,0.014 0.52,0.217c0.234,0.574 0.353,1.184 0.353,1.811m-2.316,-6.773l0,13.546c0,0.161 -0.098,0.307 -0.245,0.368c-0.05,0.021 -0.102,0.03 -0.153,0.03c-0.104,0 -0.205,-0.04 -0.281,-0.117l-3.669,-3.668l-4.816,0c-0.219,0 -0.398,-0.18 -0.398,-0.398l0,-5.976c0,-0.219 0.179,-0.398 0.398,-0.398l4.815,0l3.669,-3.668c0.114,-0.115 0.285,-0.149 0.435,-0.087c0.147,0.061 0.245,0.206 0.245,0.368m-4.582,4.183l-4.184,0l0,5.18l4.184,0l0,-5.18zm3.785,-3.221l-2.988,2.988l0,5.646l2.988,2.988l0,-11.622z"></path>
</svg>
</label>
<label>
<input type="submit" value="mute" name="volume" class="icon">
<svg class="svg-icon" viewBox="0 0 20 20">
<path d="M18.084,11.639c0.168,0.169,0.168,0.442,0,0.611c-0.084,0.084-0.195,0.127-0.306,0.127c-0.111,0-0.221-0.043-0.306-0.127l-1.639-1.639l-1.639,1.639c-0.084,0.084-0.195,0.127-0.306,0.127c-0.111,0-0.222-0.043-0.307-0.127c-0.168-0.169-0.168-0.442,0-0.611L15.223,10l-1.64-1.639c-0.168-0.168-0.168-0.442,0-0.61c0.17-0.169,0.442-0.169,0.612,0l1.639,1.639l1.639-1.639c0.169-0.169,0.442-0.169,0.611,0c0.168,0.168,0.168,0.442,0,0.61L16.445,10L18.084,11.639z M12.161,2.654v14.691c0,0.175-0.105,0.333-0.267,0.4c-0.054,0.021-0.109,0.032-0.166,0.032c-0.111,0-0.223-0.043-0.305-0.127l-3.979-3.979H2.222c-0.237,0-0.432-0.194-0.432-0.432V6.759c0-0.237,0.195-0.432,0.432-0.432h5.222l3.979-3.978c0.123-0.125,0.309-0.163,0.471-0.095C12.056,2.322,12.161,2.479,12.161,2.654 M7.192,7.192H2.654v5.617h4.538V7.192z M11.296,3.698l-3.24,3.241v6.123l3.24,3.24V3.698z"></path>
</svg>
</label>
<label>
<input type="submit" value="+" name="volume" class="icon">
<svg class="svg-icon" viewBox="0 0 20 20">
<path d="M17.969,10c0,1.707-0.5,3.366-1.446,4.802c-0.076,0.115-0.203,0.179-0.333,0.179c-0.075,0-0.151-0.022-0.219-0.065c-0.184-0.122-0.233-0.369-0.113-0.553c0.86-1.302,1.314-2.812,1.314-4.362s-0.454-3.058-1.314-4.363c-0.12-0.183-0.07-0.43,0.113-0.552c0.186-0.12,0.432-0.07,0.552,0.114C17.469,6.633,17.969,8.293,17.969,10 M15.938,10c0,1.165-0.305,2.319-0.88,3.339c-0.074,0.129-0.21,0.201-0.347,0.201c-0.068,0-0.134-0.016-0.197-0.052c-0.191-0.107-0.259-0.351-0.149-0.542c0.508-0.9,0.776-1.918,0.776-2.946c0-1.028-0.269-2.046-0.776-2.946c-0.109-0.191-0.042-0.434,0.149-0.542c0.193-0.109,0.436-0.042,0.544,0.149C15.634,7.681,15.938,8.834,15.938,10 M13.91,10c0,0.629-0.119,1.237-0.354,1.811c-0.063,0.153-0.211,0.247-0.368,0.247c-0.05,0-0.102-0.01-0.151-0.029c-0.203-0.084-0.301-0.317-0.217-0.521c0.194-0.476,0.294-0.984,0.294-1.508s-0.1-1.032-0.294-1.508c-0.084-0.203,0.014-0.437,0.217-0.52c0.203-0.084,0.437,0.014,0.52,0.217C13.791,8.763,13.91,9.373,13.91,10 M11.594,3.227v13.546c0,0.161-0.098,0.307-0.245,0.368c-0.05,0.021-0.102,0.03-0.153,0.03c-0.104,0-0.205-0.04-0.281-0.117l-3.669-3.668H2.43c-0.219,0-0.398-0.18-0.398-0.398V7.012c0-0.219,0.179-0.398,0.398-0.398h4.815l3.669-3.668c0.114-0.115,0.285-0.149,0.435-0.087C11.496,2.92,11.594,3.065,11.594,3.227 M7.012,7.41H2.828v5.18h4.184V7.41z M10.797,4.189L7.809,7.177v5.646l2.988,2.988V4.189z"></path>
</svg>
</label>
<br/><h2>Session</h2>
<label>
<input type="submit" value="lock" name="session" class="icon">
<svg class="svg-icon" viewBox="0 0 20 20">
<path d="M17.308,7.564h-1.993c0-2.929-2.385-5.314-5.314-5.314S4.686,4.635,4.686,7.564H2.693c-0.244,0-0.443,0.2-0.443,0.443v9.3c0,0.243,0.199,0.442,0.443,0.442h14.615c0.243,0,0.442-0.199,0.442-0.442v-9.3C17.75,7.764,17.551,7.564,17.308,7.564 M10,3.136c2.442,0,4.43,1.986,4.43,4.428H5.571C5.571,5.122,7.558,3.136,10,3.136 M16.865,16.864H3.136V8.45h13.729V16.864z M10,10.664c-0.854,0-1.55,0.696-1.55,1.551c0,0.699,0.467,1.292,1.107,1.485v0.95c0,0.243,0.2,0.442,0.443,0.442s0.443-0.199,0.443-0.442V13.7c0.64-0.193,1.106-0.786,1.106-1.485C11.55,11.36,10.854,10.664,10,10.664 M10,12.878c-0.366,0-0.664-0.298-0.664-0.663c0-0.366,0.298-0.665,0.664-0.665c0.365,0,0.664,0.299,0.664,0.665C10.664,12.58,10.365,12.878,10,12.878"></path>
</svg>
</label>
<label>
<input type="submit" value="suspend" name="session" class="icon">
<svg class="svg-icon" viewBox="0 0 20 20">
<path fill="none" d="M5.163,5.768c0.167,0.167,0.438,0.167,0.605,0c0.167-0.167,0.167-0.438,0-0.604L3.953,3.349c-0.167-0.167-0.438-0.167-0.604,0c-0.167,0.167-0.167,0.437,0,0.604L5.163,5.768z M14.837,5.768l1.814-1.814c0.167-0.167,0.167-0.438,0-0.604c-0.168-0.167-0.438-0.167-0.605,0l-1.813,1.814c-0.167,0.167-0.167,0.437,0,0.604C14.399,5.935,14.67,5.935,14.837,5.768 M10,4.014c0.236,0,0.428-0.191,0.428-0.428V1.021c0-0.236-0.192-0.428-0.428-0.428S9.572,0.785,9.572,1.021v2.565C9.572,3.823,9.764,4.014,10,4.014 M18.979,10h-3.848c0-2.833-2.297-5.131-5.131-5.131c-2.833,0-5.131,2.297-5.131,5.131H1.021c-0.236,0-0.428,0.191-0.428,0.428s0.192,0.428,0.428,0.428h17.957c0.236,0,0.428-0.191,0.428-0.428S19.215,10,18.979,10 M5.725,10c0-2.361,1.914-4.275,4.275-4.275S14.276,7.639,14.276,10H5.725zM12.565,15.985H11.71v-2.138c0-0.235-0.191-0.427-0.428-0.427H8.717c-0.236,0-0.428,0.191-0.428,0.427v2.138H7.435c-0.235,0-0.427,0.191-0.427,0.428c0,0.118,0.047,0.226,0.125,0.304l2.565,2.564c0.077,0.078,0.185,0.125,0.302,0.125s0.225-0.047,0.302-0.125l2.565-2.564c0.078-0.078,0.126-0.186,0.126-0.304C12.993,16.177,12.802,15.985,12.565,15.985 M10,18.374l-1.533-1.533h0.25c0.236,0,0.428-0.191,0.428-0.428v-2.138h1.709v2.138c0,0.236,0.192,0.428,0.428,0.428h0.251L10,18.374z"></path>
</svg>
</label>
<label>
<input type="submit" value="hibernate" name="session" class="icon">
<svg class="svg-icon" viewBox="0 0 20 20">
<path fill="none" d="M10.544,8.717l1.166-0.855l1.166,0.855l-0.467-1.399l1.012-0.778h-1.244L11.71,5.297l-0.466,1.244H10l1.011,0.778L10.544,8.717z M15.986,9.572l-0.467,1.244h-1.244l1.011,0.777l-0.467,1.4l1.167-0.855l1.165,0.855l-0.466-1.4l1.011-0.777h-1.244L15.986,9.572z M7.007,6.552c0-2.259,0.795-4.33,2.117-5.955C4.34,1.042,0.594,5.07,0.594,9.98c0,5.207,4.211,9.426,9.406,9.426c2.94,0,5.972-1.354,7.696-3.472c-0.289,0.026-0.987,0.044-1.283,0.044C11.219,15.979,7.007,11.759,7.007,6.552 M10,18.55c-4.715,0-8.551-3.845-8.551-8.57c0-3.783,2.407-6.999,5.842-8.131C6.549,3.295,6.152,4.911,6.152,6.552c0,5.368,4.125,9.788,9.365,10.245C13.972,17.893,11.973,18.55,10,18.55 M19.406,2.304h-1.71l-0.642-1.71l-0.642,1.71h-1.71l1.39,1.069l-0.642,1.924l1.604-1.176l1.604,1.176l-0.642-1.924L19.406,2.304z"></path>
</svg>
</label>
</form>"""
pageend = """</center></body></html>"""
ctrl_cmd = {"%7C%3C" :"prev", # |<
"%3C%3C" :"seek -20%", # <<
"%3C" :"seek -5%", # <
"%3E%7C%7C" :"toggle", # >||
"%3E" :"seek +5%", # >
"%3E%3E" :"seek +20%", # >>
"%3E%7C" :"next", # >|
"fullscreen":"fullscreen" # fullscreen
}
vol_cmd = {"%2B" :"amixer --quiet set Master 1%+", #+
"-" :"amixer --quiet set Master 1%-", #-
"mute":"amixer --quiet set Master toggle"} #mute
session_cmd = { "lock" :"lock",
"suspend" :"suspend",
"hibernate" :"hibernate"}
def drop_privileges():
if os.getuid() != 0:
#we're not root so, whatever dude
return
#get the uid/gid from the name
user_name = os.getenv("SUDO_USER")
pwnam = pwd.getpwnam(user_name)
#remove group privileges
os.setgroups([])
#try setting the new uid/gid
os.setgid(pwnam.pw_gid)
os.setuid(pwnam.pw_uid)
#ensure a reasonable umask
old_umask = os.umask(0o22)
print "Dropping user privileges: root -> " + user_name
def execute(cmd):
""" Execute a shell command, while handling errors gracefully"""
(stat, out) = commands.getstatusoutput(cmd)
if stat != 0:
print >>sys.stderr, "ERROR: '"+cmd+"' returned "+ str(out)
return out
class myHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handles all http requests."""
def __init__(self, *args):
"""Initialise the handler (called each time we handle a request)"""
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
def write(self, x):
"""Send some html code to the browser"""
return self.wfile.write(x)
def handleCmd(self,param):
"""Global handler of commands, doesn't care what the page is, just
executes the correct commands if possible"""
if "player" in param:
if param["player"] in ctrl_cmd:
self.write(execute("player-ctrl " + ctrl_cmd[param["player"]]))
print "exec: player-ctrl " + ctrl_cmd[param["player"]]
if "volume" in param:
if param["volume"] in vol_cmd:
self.write(execute(vol_cmd[param["volume"]]))
print "exec: " + vol_cmd[param["volume"]]
vol = execute("amixer get 'Master',0|egrep -m1 -o '[0-9]{1,3}%'")
audio_enabled = execute("amixer get 'Master',0|egrep -o '\[o.+\]'")
if vol != "0%" and audio_enabled == "[on]":
self.write('Volume: ' +vol)
else:
self.write('MUTED')
if "session" in param:
if param["session"] in session_cmd:
subprocess.Popen(["dmenu-session", session_cmd[param["session"]]])
print "exec: dmenu-session " + session_cmd[param["session"]] + " &"
if param["session"] == "lock":
self.write("Interactive session LOCKED")
elif param["session"] == "suspend":
self.write("System SUSPENDED");
elif param["session"] == "hibernate":
self.write("System HIBERNATED");
def volumebar(self):
"""Get the current volume setting and plot it as a "bar diagram", or
if muted, indicate this"""
out = execute("amixer get Master").split()
vol = execute("amixer get 'Master',0|egrep -m1 -o '[0-9]{1,3}%'")
audio_enabled = execute("amixer get 'Master',0|egrep -o '\[o.+\]'")
if vol != "0%" and audio_enabled == "[on]":
self.write('<table width="99%" border=1><tr><td width="'
+vol+'" class="volbar">'+vol+'</td><td></td></tr></table>')
else:
self.write('<table width="99%" border=1><tr><td>Muted</td></tr></table>')
def do_GET(self):
"""Handle the GET request. This identifies the page that is
being served, and handles the submitted form variables"""
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.write(pagehead)
file = self.path.split('/')[-1]
paramstr=file.split('?')
print "args:", paramstr
param = {}
if len(paramstr) > 1:
for p in paramstr[1].split('&'):
s = p.split('=')
param[s[0]] = s[1]
self.handleCmd(param)
# self.volumebar()
self.write(pagectrl)
self.write(pageend)
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""
This is a simple change to the normal basehttp server which is
not blocked waiting for HTTP connections
"""
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
self.socket.settimeout(1)
self.run = True
def get_request(self):
while self.run:
try:
sock, addr = self.socket.accept()
sock.settimeout(None)
return (sock, addr)
except socket.timeout:
pass
def run():
"""
This is the "main" function, that gets called from the
end of the file, after everything has been parsed.
"""
try:
server = StoppableHTTPServer(('', port), myHandler)
print 'Started httpserver on port ' , port
drop_privileges()
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
finally:
if 'server' in locals():
server.socket.close()
# Finally, just execute it all
run()
| [
"m@javier.io"
] | m@javier.io | |
518efa12cdac4e7d9987566f0fde5f3ba06a7022 | f64cce55bc3fb1f2de624a69d5ae10e10dc56999 | /trigram_creation.py | c92ef138fb12dba1d823d4a29b148b81bc7c171e | [] | no_license | shricharanks108/MLCourse_GroupE | c9b58ed65af6ba076d5da7f758bdbf25dea9a66b | 20977fd6546bbdd9053051730af97a6af2125299 | refs/heads/master | 2022-12-04T01:19:33.135117 | 2020-08-23T13:34:19 | 2020-08-23T13:34:19 | 288,582,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | training_set = []
with open('imdb_train.csv') as file:
training_set = file.readlines()
# Using CountVectorizer
Cvectorizer = CountVectorizer(ngram_range=(3, 3))
count_vec_model = Cvectorizer.fit_transform(training_set)
f = open("Cvector_Score.txt", "a")
f.write(str(count_vec_model.toarray()))
f.close()
# Applying TFIDF
Tvectorizer = TfidfVectorizer(ngram_range=(3, 3))
TFIDF_vec_model = Tvectorizer.fit_transform(training_set)
TFIDF_scores = (TFIDF_vec_model.toarray())
f = open("TFIDF_scores.txt", "a")
f.write(str(TFIDF_scores))
f.close()
| [
"noreply@github.com"
] | shricharanks108.noreply@github.com |
7856572f45aa7aac250552352e2eaa3974420458 | 7b358ca457bb2318b2cc4c74a3d43f97cb979a2a | /ags.py | 58e1725922d06e236f8d0f4e106f02851fb50c84 | [
"MIT"
] | permissive | rkelson/agrc.python | df3e266b6da6ecc108812d40527c1a4575909662 | d2372f3be89997c69b6d990cb0b948f9c31880ce | refs/heads/master | 2020-05-29T12:21:23.521210 | 2016-09-29T19:57:36 | 2016-09-29T19:57:36 | 15,258,499 | 0 | 0 | null | 2016-09-29T19:57:36 | 2013-12-17T15:43:41 | Python | UTF-8 | Python | false | false | 4,372 | py | import requests
import json
from time import time
# urls
baseUrl = r'http://{}:6080/arcgis/admin/'
tokenUrl = r'{}generateToken'.format(baseUrl)
servicesUrl = r'{}services'.format(baseUrl)
class AGSAdmin:
"""
Provides methods for administering arcgis server
"""
username = ''
password = ''
server = ''
token = ''
tokenExpireDate = 0
payload = None
services = []
noChangeMsg = "'{}'' is already set to '{}'. No changes made."
def __init__(self, username, password, server):
"""
username: String
password: String
server: String
ip address of the server that you are admining
"""
self.server = server
self.username = username
self.password = password
self.getToken()
def getToken(self):
data = {'username': self.username,
'password': self.password,
'client': 'requestip',
'f': 'json'}
r = requests.post(tokenUrl.format(self.server), data=data)
r.raise_for_status()
r = r.json()
self.checkError(r)
self.token = r['token']
self.tokenExpireDate = r['expires']
def getServices(self):
def getServicesForFolder(folder):
if folder is not None:
url = servicesUrl.format(self.server) + r'/{}'.format(folder)
else:
url = servicesUrl.format(self.server)
responseJson = self.request(url)
self.services = self.services + responseJson['services']
try:
return responseJson['folders']
except:
pass
for folder in getServicesForFolder(None):
getServicesForFolder(folder)
return self.services
def editService(self, service, type, property, value):
url = '{}/{}.{}'.format(servicesUrl.format(self.server),
service,
type)
serviceJson = self.request(url)
if property not in serviceJson.keys():
raise Exception('Property: {} not found!'.format(property))
if serviceJson[property] == value:
return self.noChangeMsg.format(property, value)
serviceJson[property] = value
return self.request('{}/edit'.format(url), {'service': json.dumps(serviceJson)})
def getServiceProperty(self, service, type, property):
url = '{}/{}.{}'.format(servicesUrl.format(self.server),
service,
type)
serviceJson = self.request(url)
if property not in serviceJson.keys():
raise Exception('Property: {} not found!'.format(property))
return serviceJson[property]
def stopService(self, service, type):
return self._commandService(service, type, 'stop')
def startService(self, service, type):
return self._commandService(service, type, 'start')
def getStatus(self, service, type):
return self._commandService(service, type, 'status')
def _commandService(self, service, type, command):
url = '{}/{}.{}/{}'.format(servicesUrl.format(self.server),
service,
type,
command)
return self.request(url)
def request(self, url, additionalData={}):
# check to make sure that token isn't expired
if self.tokenExpireDate <= time()*1000:
self.getToken()
data = dict(additionalData.items() + {'f': 'json', 'token': self.token}.items())
r = requests.post(url, data=data)
r.raise_for_status()
self.checkError(r.json())
return r.json()
def checkError(self, jsonResponse):
if 'status' in jsonResponse.keys() and jsonResponse['status'] == 'error':
raise Exception('; '.join(jsonResponse['messages']))
def startAllServices(self):
if len(self.services) == 0:
self.getServices()
for s in self.services:
serv = s['folderName'] + '//' + s['serviceName']
print 'starting {}'.format(serv)
self.startService(serv, s['type']) | [
"scott.sheri@gmail.com"
] | scott.sheri@gmail.com |
0b8ac0fd48b23f6221dadd4080dfadd5e98b0079 | 83f04821ce65c367a8e3456ebaeac20c15de2b10 | /concurrent/cache.py | e5b2716607aaf30b78592e84aae7d0d7e4d2094f | [
"MIT"
] | permissive | Mochis/curry-grib-runner | 19ca60762e4085d67ed3e6485a153ac327b91370 | 628dbdf5ad9c06a246fd65f44772f31ebe127079 | refs/heads/master | 2022-06-12T01:35:56.216799 | 2022-05-20T16:35:03 | 2022-05-20T16:35:03 | 198,517,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | import threading
# Implements a cache using a dict() object
class Cache:
def __init__(self):
self.cache = dict()
self.lock = threading.Lock()
def put_value(self, key, value):
self.lock.acquire()
if key not in self.cache:
self.cache[key] = value
self.lock.release()
def get_value(self, key):
self.lock.acquire()
value = self.cache.get(key, None)
self.lock.release()
return value
| [
"mochi@MacBook-Pro-de-JuanJo.local"
] | mochi@MacBook-Pro-de-JuanJo.local |
ce978302f88b0065282a8fb57be6ec347d9e2012 | 2fabea234735beefc980b77b213fcb0dfb394980 | /tensorflow_probability/python/distributions/deprecated_linalg_test.py | e30bf6de1138043acd8d2544bd85b4b5b72eabca | [
"Apache-2.0"
] | permissive | tarrou/probability | 0eee452b525a6e6b3c7c98d467468e47f07e861b | d4d80a1c04ad0b3e98758ebc3f7f82887274384d | refs/heads/master | 2020-08-08T11:16:42.441268 | 2019-12-06T17:35:17 | 2019-12-06T17:35:17 | 213,819,828 | 0 | 0 | Apache-2.0 | 2019-10-09T04:20:19 | 2019-10-09T04:20:19 | null | UTF-8 | Python | false | false | 2,518 | py | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for deprecated_linalg functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions.deprecated_linalg import tridiag
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class TridiagTest(test_case.TestCase):
def testWorksCorrectlyNoBatches(self):
self.assertAllEqual(
[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
self.evaluate(tridiag(
[1., 2., 3.],
[4., 5., 6., 7.],
[8., 9., 10.])))
def testWorksCorrectlyBatches(self):
self.assertAllClose(
[[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
[[0.7, 0.1, 0.0, 0.0],
[0.8, 0.6, 0.2, 0.0],
[0.0, 0.9, 0.5, 0.3],
[0.0, 0.0, 1.0, 0.4]]],
self.evaluate(tridiag(
[[1., 2., 3.],
[0.8, 0.9, 1.]],
[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]],
[[8., 9., 10.],
[0.1, 0.2, 0.3]])),
rtol=1e-5, atol=0.)
def testHandlesNone(self):
self.assertAllClose(
[[[4., 0., 0., 0.],
[0., 5., 0., 0.],
[0., 0., 6., 0.],
[0., 0., 0, 7.]],
[[0.7, 0.0, 0.0, 0.0],
[0.0, 0.6, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.4]]],
self.evaluate(tridiag(
diag=[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]])),
rtol=1e-5, atol=0.)
if __name__ == '__main__':
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
65b2949c009f8ad20cc04f691b5311b5128a254e | d661dc74491613029701f8c36351c5ffb4e27b0b | /mmo/server/database.py | 83a25d42adf71d694b8f29804a73dd3834c22e26 | [] | no_license | rafael146/workufal | d0d1b1507eab709dc67a056a5d6bee03231a9b67 | f17de5dcfe057df28e956213737a95321693e848 | refs/heads/master | 2021-01-15T19:28:12.854119 | 2015-04-23T13:21:08 | 2015-04-23T13:21:08 | 34,455,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | #Autor: Alisson Oliveira
from MySQLdb import Connect, escape_string
from MySQLdb.cursors import DictCursor
#Move this values to config
HOST = '127.0.0.1'
USER = 'root'
PWD = 'root'
DB = 'mmo'
class DatabaseManager(object):
def __init__(self):
self.db = Connect(host=HOST,user=USER,passwd=PWD,db=DB,cursorclass=DictCursor)
self.db.autocommit(True)
@staticmethod
def getInstance():
return SingletonHolder.INSTANCE
def query(self, sql, *params):
cur = self.db.cursor()
sql= escape_string(sql)
cur.execute(sql, *params)
r = ResultSet(cur.fetchall())
cur.close()
return r
class ResultSet(object):
def __init__(self, result):
self.result = list(result)
self.row = None
def next(self):
if len(self.result):
self.row = self.result.pop(0)
return True
self.row = None
return False
def empty(self):
return len(self.result)==0
def getInt(self,column):
return int(self.row[column])
def getLong(self,column):
return long(self.row[column])
def getfloat(self,column):
return float(self.row[column])
def getString(self, column):
return self.row[column]
class SingletonHolder(object):
INSTANCE = DatabaseManager()
| [
"Alisson Oliveira@cd46cd7e-7c3c-4829-9a36-9a7bc2eed6a4"
] | Alisson Oliveira@cd46cd7e-7c3c-4829-9a36-9a7bc2eed6a4 |
a6d5eda42a50060fe71b8e3abac6a3debb0e2b97 | 8953e89983ce8a7fec3e93fea90a1c306d8886b3 | /App/forms/UserForms.py | 079f2b95abf662d5a40a26ac4920f9d441cd3965 | [] | no_license | mhasan-t/Rabo | ea6011999b5d5cb8dfc3d0c00ccda5a46c09c1b5 | b8166760fc68692e5a7240e7a0000d035e27c7c6 | refs/heads/master | 2023-07-09T09:30:06.708699 | 2021-06-18T10:31:13 | 2021-06-18T10:31:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | from django import forms
class CreateUserForm(forms.Form):
first_name = forms.CharField(max_length=20, widget=forms.TextInput(
attrs={'placeholder': 'enter your first name'}))
last_name = forms.CharField(max_length=20, widget=forms.TextInput(
attrs={'placeholder': 'enter your last name'}))
email = forms.EmailField(widget=forms.EmailInput(
attrs={'placeholder': 'enter your e-mail'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={'placeholder': 'enter your password'}))
confirm_password = forms.CharField(widget=forms.PasswordInput(
attrs={'placeholder': 'enter your password again'}))
def clean(self):
cleaned_data = super(CreateUserForm, self).clean()
password = cleaned_data.get("password")
confirm_password = cleaned_data.get("confirm_password")
if password != confirm_password:
raise forms.ValidationError(
"password and confirm_password does not match"
)
class LoginUserForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(
attrs={'placeholder': 'enter your e-mail'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={'placeholder': 'enter your password'}))
| [
"tahnoonn19@gmail.com"
] | tahnoonn19@gmail.com |
0132b1722e94891601c8cc8372386775e645a23c | d989b8504599c17201215c343c2111525cfa2706 | /FeatureEng.cluster3.py | 7e750e6dc75a2385b8f51482787d29a09f560fa6 | [] | no_license | salilkhandelwal/Predict-Future-Sales | 3c53b16d1683598ab8a057a0807e031a1caaa8e1 | 6dbf7f7f57c5e05033cd710a59a335db1014f992 | refs/heads/master | 2022-04-10T19:36:09.565058 | 2020-03-26T18:48:05 | 2020-03-26T18:48:05 | 250,343,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | import numpy as np
import pandas as pd
import random as rd
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.stattools import adfuller, acf, pacf,arma_order_select_ic
import statsmodels.formula.api as smf
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import scipy.stats as scs
sales=pd.read_csv("D:\SFU\DataMining\Project\sales_train_v2.csv")
item_cat=pd.read_csv("D:\SFU\DataMining\Project\item_categories.csv")
item=pd.read_csv("D:\SFU\DataMining\Project\items.csv")
sub=pd.read_csv("D:\SFU\DataMining\Project\sample_submission.csv")
shops=pd.read_csv("D:\SFU\DataMining\Project\shops.csv")
#test=pd.read_csv("D:\SFU\DataMining\Project\test.csv")
sales.date=sales.date.apply(lambda x:datetime.datetime.strptime(x, '%d.%m.%Y'))
monthly_sales=sales.groupby(["date_block_num","shop_id","item_id"])[
"date_block_num","item_price","item_cnt_day"].agg({"date_block_num":"mean", "item_price":"mean","item_cnt_day":"sum"})
df=pd.DataFrame(monthly_sales)
df=np.array(df)
#clustering by Clique
from pyclustering.cluster.clique import clique, clique_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
intervals = 10
threshold = 0
clique_instance = clique(df, intervals, threshold)
clique_instance.process()
clusters = clique_instance.get_clusters()
noise = clique_instance.get_noise()
cells = clique_instance.get_cells()
print("Amount of clusters:", len(clusters))
#defining a variable (cluster) including cluster number for each shop-item id
cl=pd.DataFrame(clusters)
cluster=[]
#for k in range(4):
# for j in range(1609120):
# if cl[j][k] in range(1609124):
# cluster.append(k)
# else: cluster.append('nan')
for k in range(4):
for j in range(1609120):
if cl[j][k] >= 0:
cluster.append(k)
else: cluster.append('nan')
| [
"noreply@github.com"
] | salilkhandelwal.noreply@github.com |
8c0d012d8d04a4973b14979e0731ec72a32bbdde | 0728a2e165808cfe5651693a6e7f47804bfb085f | /ry/trunk-ry/rynok/controllers/category.py | 2c8663f5cbf391dbaad2d949ff7d5a5f07a4cd0e | [] | no_license | testTemtProj/OLD_PROJECT | 5b026e072017f5135159b0940370fda860241d39 | 9e5b165f4e8acf9003536e05dcefd33a5ae46890 | refs/heads/master | 2020-05-18T15:30:24.543319 | 2013-07-23T15:17:32 | 2013-07-23T15:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,907 | py | #coding: utf-8
""" Category Controller
"""
import logging
import rynok.lib.helpers as h
import json
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from webhelpers.html.builder import HTML
from rynok.lib.base import BaseController, render
from rynok.model.categoriesModel import CategoriesModel
from rynok.lib import helpers as h
from rynok.model.referenceModel import ReferenceModel
from rynok.model.settingsModel import SettingsModel
LOG = logging.getLogger(__name__)
class CategoryController(BaseController):
def __init__(self):
BaseController.__init__(self)
self.categories_model = CategoriesModel
def index(self, url):
category = self.categories_model.getByURL(url=url)
if not category:
return render('/error/error.mako.html')
if 'isLeaf' in category and category['isLeaf']:
return self.view(category=category)
cats = self.categories_model.getChildrens(category["ID"], non_empty=True)
c.cats = []
for cat in cats:
c.cats.append(cat)
c.category = category
return render('/category.mako.html')
def all(self):
cats = self.categories_model.getChildrens(categoryId=0, non_empty=True)
c.cats = []
for cat in cats:
c.cats.append(cat)
return render('/all.categories.mako.html')
def popular(self):
reference_model = ReferenceModel
settings = SettingsModel.get_popular_block_settings()
c.title = 'Популярные товары'
c.products = reference_model.get_popular_products(settings['categories'], settings['per_category'])
return render('/products.html')
def new(self):
reference_model = ReferenceModel
c.title = 'Новые товары'
c.products = reference_model.get_new_products(28)
return render('/products.html')
def view(self, category, page=1):
reference_model = ReferenceModel
if not isinstance(category, dict):
category = self.categories_model.getByURL(category)
c.category = category
c.error_message = None
sort_by = request.params.get('sort_by', 'price')
if sort_by == 'rating':
by = 'Rate'
elif sort_by == 'price':
by = 'price'
elif sort_by == 'popular':
by = 'popular'
try:
c.markets = json.loads(request.params.get('m_id', '[]'))
except ValueError:
c.markets = []
try:
c.vendors = json.loads(request.params.get('v_id', '[]'))
except ValueError:
c.vendors = []
sort_order = request.params.get('sort_order', 'desc')
try:
c.price_min = int(request.params.get('price_min', 0))
except:
c.price_min = 0
try:
c.perPage = int(request.params.get('per_page', 10))
except:
c.perPage = 10
c.currency = request.params.get('currency', 'UAH')
query = {'categoryId':int(category['ID']), c.currency: {'$gt': c.price_min-1}}
c.affordable_price = reference_model.get_max_price(query, c.currency) + 1
c.price_max = int(request.params.get('price_max', c.affordable_price))
query[c.currency]['$lt'] = c.price_max + 1
if len(c.markets) > 0 and len(c.vendors) > 0:
query['shopId'] = {'$in':c.markets}
query['vendor'] = {'$in':c.vendors}
if len(c.markets) > 0 and len(c.vendors) == 0:
query['shopId'] = {'$in':c.markets}
if len(c.markets) == 0 and len(c.vendors) > 0:
query['vendor'] = {'$in':c.vendors}
count_products = reference_model.get_count(query=query)
"""
if count_products == 0:
referer = request.headers.get('Referer', '')
http_host = request.environ.get('HTTP_HOST')
c.back_url = referer
if referer.find(http_host) == -1:
c.back_url = '/'
cats = self.categories_model.getChildrens(categoryId=0, non_empty=True)
c.cats = []
for cat in cats:
c.cats.append(cat)
c.noresult = u"По даной цене товары не найдены"
return render('/empty.category.mako.html')
"""
if count_products > 0:
c.products = reference_model.get_reference(where=query, perPage = c.perPage, page = int(page)-1, by=by, direction=sort_order)
else:
#get_less_products_query = query.copy()
#get_less_products_query[c.currency] = {'$lt' : c.price_min}
get_more_products_query = query.copy()
del(get_more_products_query[c.currency])# = {'$lte' : c.price_max}
#less_products = reference_model.get_reference(where=get_less_products_query, limit=2, by=c.currency, direction=-1)
#more_products = reference_model.get_reference(where=get_more_products_query, limit=2, by=c.currency, direction=1)
#c.products = more_products
print get_more_products_query
c.products = reference_model.get_reference(where=get_more_products_query, perPage = c.perPage, page = int(page)-1, by=by, direction=sort_order)
c.error_message = u"По даной цене товары не найдены, показаны без учета цены"
count_products = reference_model.get_count(query=get_more_products_query)
c.page = page
c.total_pages = count_products/c.perPage
if count_products%c.perPage:
c.total_pages += 1
c.sort_settings = {sort_by: sort_order}
c.current_url = category['URL']+'/'+str(page)
return render('/view.category.mako.html')
| [
"Kyzmenko_Pavel@mail.ru"
] | Kyzmenko_Pavel@mail.ru |
65808865c4ac615c1029fc059831cfae0dd7f3b1 | 0295d379d396fa02202d0db01a0ccf9f9ad2f3c0 | /aoc16/D15/d15.py | cd510a67e0d6a37e59f28f94019aee6a3ddbfd26 | [] | no_license | majstenmark/adventofcode | 047c8839aa89acc0cf60c4ef326f1a5f9cc400b0 | f2fd8cb6f645de6c67a58f466358fe4813153a1b | refs/heads/main | 2023-06-07T04:23:47.678427 | 2021-04-17T19:28:39 | 2021-04-17T19:28:39 | 379,188,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | import sys, time
from datetime import date
sys.path.extend(['..', '.'])
from collections import *
from fetch import *
from util import *
#import drawgraph
#lo, hi, lt, pw = lazy_ints(multisplit(line, '-: ')) #chars only!
#or lo, hi, lt, pw = lazy_ints(multisplit(line, ['-',': ','))
import re
#use regex re.split(' |,|: ', line)
def db(*a):
if DB: print(*a)
def parse(line):
line = removeall(line, 'Disc #', 'has', 'positions; at time=0, it is at position ', '.')
return lazy_ints(line.split())
def gcd(a, b):return gcd(b, a % b) if b else a
# x * a + y * b = gcd(a, b). Return gcd(a, b), x, y
def xgcd(a, b):
x0, x1, y0, y1 = 1, 0, 0, 1
while b != 0:
q, a, b = (a // b, b, a % b)
x0, x1 = (x1, x0 - q * x1)
y0, y1 = (y1, y0 - q * y1)
return (a, x0, y0)
#If a list of t = a1 mod n1, t = a2 mod n2 ... Given a list of a and n, returns t
def crt(la, ln):
assert len(la) == len(ln)
for i in range(len(la)):
assert 0 <= la[i] < ln[i]
prod = 1
for n in ln:
assert gcd(prod, n) == 1
prod *= n
lN = []
for n in ln:
lN.append(prod//n)
x = 0
for i, a in enumerate(la):
print(lN[i], ln[i])
_, Mi, mi = xgcd(lN[i], ln[i])
x += a*Mi*lN[i]
return x % prod
def p1(v):
lines = v.strip().split('\n')
vals = [parse(line) for line in lines]
la = []
ln = []
for ai, ni, si in vals:
la.append(-(ai + si)%ni)
ln.append(ni)
return crt(la, ln)
def p2(v):
return p1(v)
def manual():
v = open("real.txt", 'r').read().strip('\n')
print('part_1: {}\npart2: {}'.format(p1(v), p2(v)))
cmds, stats, io, so, DB = get_args(sys.argv)
if not io: run_samples(p1, p2, cmds)
if not so: run(2016,15, p1, p2, cmds)
if stats: print_stats()
#manual()
| [
"majstenmark@gmail.com"
] | majstenmark@gmail.com |
2bd112ac93dcd356a16b4eefafb8a2aa5b1fe728 | 4d30d39cbcb0d2428d710a47c0ca7ef8bcec447d | /core/dbs/__init__.py | c95c6e94c224de6b64cb1e2e67ddf572f055abd2 | [
"BSD-3-Clause"
] | permissive | baifengbai/CornerNet-Lite-Pytorch | 7a8c5bbe49343e67ae100001c18df5542b375b4e | 7c52f93720d6276a6e073c06fb6cec6b8580da56 | refs/heads/master | 2020-09-22T15:25:13.050615 | 2019-12-01T16:44:28 | 2019-12-01T16:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from .coco import COCO
from .dagm import DAGM
# 数据库名字
datasets = {
"COCO": COCO,
"DAGM": DAGM,
}
| [
"274762204@qq.com"
] | 274762204@qq.com |
0afc1093eb45acf937105c84f16f51d70e60e023 | 300c162a7345dc2f68cf472abb5f60a2cb7e516f | /1209.py | 5426f7b1ce6954a3016ba2e4ad8c4ba7451407a0 | [] | no_license | kinabalu/adventofcode2020 | 871cab85509c85a4d7b02863cc2239b70687b7bd | d714f776c002b0f965f5b8dd55d74f6fd9e48e64 | refs/heads/master | 2023-01-28T12:44:53.528927 | 2020-12-11T17:28:43 | 2020-12-11T17:28:43 | 317,611,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | def read_input():
with open('12_09_input.txt') as reader:
return [int(line.strip()) for line in reader]
def main():
data = read_input()
prev_25 = []
idx = 0
found = False
xmas_number = None
while idx < len(data) and not found:
entry = data[idx]
if idx > 25:
test_found = False
for x in range(0, 25):
for y in range(1, 25):
if entry == prev_25[x] + prev_25[y]:
test_found = True
break
if not test_found:
xmas_number = entry
found = True
if len(prev_25) == 25:
prev_25.pop(0)
prev_25.append(entry)
idx+=1
add_set = []
contiguous_found = False
contiguous_add = None
x = 0
while not contiguous_found and x in range(0, len(data)):
total = data[x]
add_set.append(data[x])
x += 1
y = 1
while not contiguous_found and y in range(1, len(data)):
total += data[y]
add_set.append(data[y])
y += 1
if total == xmas_number and len(add_set) > 1:
add_set.sort()
contiguous_add = add_set[0] + add_set[len(add_set) - 1]
contiguous_found = True
break
elif total > xmas_number:
add_set = []
total = 0
print("part 1: %d" % xmas_number)
print("part 2: %d" % contiguous_add)
if __name__ == '__main__':
main()
| [
"andrew@mysticcoders.com"
] | andrew@mysticcoders.com |
e8b820bfaa080a86067ff6ddf2cda44d01ba25eb | fa14db2af8403af9cba892489e382f4b33547b58 | /levelupapi/views/game.py | 22fe771b120aa2eef06a7581f23783972a44ad7c | [] | no_license | hrlnsnchz/nss-level-up-server | 4e1e737121229d4a52f2651dc71ca114adef5933 | b06436addfe7ed97ed8b58e31f5561cca7ec2f79 | refs/heads/main | 2023-05-05T19:17:01.751397 | 2021-05-28T01:38:05 | 2021-05-28T01:38:05 | 364,671,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,590 | py | """View module for handling requests about games"""
from django.core.exceptions import ValidationError
from rest_framework import status
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from levelupapi.models import Game, GameType, Gamer
class Games(ViewSet):
"""Level up games"""
def create(self, request):
"""Handle POST operations
Returns:
Response -- JSON serialized game instance
"""
# Uses the token passed in the `Authorization` header
gamer = Gamer.objects.get(user=request.auth.user)
# Create a new Python instance of the Game class
# and set its properties from what was sent in the
# body of the request from the client.
game = Game()
game.name = request.data["name"]
game.maker = request.data["maker"]
game.number_of_players = request.data["number_of_players"]
game.difficulty = request.data["difficulty"]
game.gamer = gamer
# Use the Django ORM to get the record from the database
# whose `id` is what the client passed as the
# `gameTypeId` in the body of the request.
game_type = GameType.objects.get(pk=request.data["game_type_id"])
game.game_type = game_type
# Try to save the new game to the database, then
# serialize the game instance as JSON, and send the
# JSON as a response to the client request
try:
game.save()
serializer = GameSerializer(game, context={'request': request})
return Response(serializer.data, status=status.HTTP_201_CREATED)
# If anything went wrong, catch the exception and
# send a response with a 400 status code to tell the
# client that something was wrong with its request data
except ValidationError as ex:
return Response({"reason": ex.message}, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk):
"""Handle GET requests for single game
Returns:
Response -- JSON serialized game instance
"""
try:
# `pk` is a parameter to this function, and
# Django parses it from the URL route parameter
# http://localhost:8000/games/2
#
# The `2` at the end of the route becomes `pk`
game = Game.objects.get(pk=pk)
serializer = GameSerializer(game, context={'request': request})
return Response(serializer.data)
except Game.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return HttpResponseServerError(ex)
def update(self, request, pk):
"""Handle PUT requests for a game
Returns:
Response -- Empty body with 204 status code
"""
gamer = Gamer.objects.get(user=request.auth.user)
# Do mostly the same thing as POST, but instead of
# creating a new instance of Game, get the game record
# from the database whose primary key is `pk`
game = Game.objects.get(pk=pk)
game.name = request.data["name"]
game.maker = request.data["maker"]
game.number_of_players = request.data["number_of_players"]
game.difficulty = request.data["difficulty"]
game.gamer = gamer
game_type = GameType.objects.get(pk=request.data["game_type_id"])
game.game_type = game_type
try:
game.save()
except ValidationError as ex:
return Response({ 'reason': ex.message}, status=status.HTTP_400_BAD_REQUEST)
# 204 status code means everything worked but the
# server is not sending back any data in the response
return Response({}, status=status.HTTP_204_NO_CONTENT)
def destroy(self, request, pk):
"""Handle DELETE requests for a single game
Returns:
Response -- 200, 404, or 500 status code
"""
try:
game = Game.objects.get(pk=pk)
game.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except Game.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def list(self, request):
"""Handle GET requests to games resource
Returns:
Response -- JSON serialized list of games
"""
# Get all game records from the database
games = Game.objects.all()
# Support filtering games by type
# http://localhost:8000/games?type=1
#
# That URL will retrieve all tabletop games
game_type = self.request.query_params.get('game_type', None)
if game_type is not None:
games = games.filter(game_type__id=game_type)
serializer = GameSerializer(
games, many=True, context={'request': request})
return Response(serializer.data)
class GameSerializer(serializers.ModelSerializer):
"""JSON serializer for games
Arguments:
serializer type
"""
class Meta:
model = Game
fields = ('id', 'name', 'maker', 'number_of_players', 'difficulty', 'game_type', 'gamer')
depth = 1 | [
"hrlnsnchz@gmail.com"
] | hrlnsnchz@gmail.com |
4905389b265f26eae8f3ad56e407e10420eb28aa | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/eclect.py | 7be45d81379ad886d250abb271881bb833c02954 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 80 | py | ii = [('NewmJLP.py', 2), ('BachARE.py', 1), ('SoutRD.py', 1), ('WestJIT.py', 3)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
36686ecf3ef8dddacb386186f976e7db325b7da8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/16/usersdata/122/6123/submittedfiles/triangulo.py | 3ae57cd9af2aa0c78741ee0de80b08dafd3b0c19 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#ENTRADA
a=input('digite o valor do lado a:')
b=input('digite o valor do lado b:')
c=input('digite o valor do lado c:')
if a<(b+c):
print('S')
if (a**2)==(b**2)+(c**2):
print('Re')
elif (a**2)>(b**2)+(c**2):
print('Ob')
elif (a**2)<(b**2)+(c**2):
print('Ac')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f2c593a16f13760954e84df1235f3e79e7d61106 | 49b769476ecc6adb306f158a61412e45e677a00b | /frontend/migrations/0029_auto_20170529_0118.py | 81a65bab91a2c9bd0bcddda6d72da12f87b8136b | [] | no_license | 3dlink/ivoweb | ac4743eb4d85ad17976c11d31d846bf01d017b40 | 088c78af64a6cb803e284fa309575cfc4354df03 | refs/heads/master | 2021-01-19T12:50:47.380535 | 2017-07-22T07:55:55 | 2017-07-22T07:55:55 | 82,341,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-05-29 01:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0028_auto_20170529_0113'),
]
operations = [
migrations.AlterField(
model_name='user',
name='uuid',
field=models.CharField(default='72c1b946-5511-411e-b287-c99ead712820', max_length=100, unique=True),
),
]
| [
"wa_p@hotmail.com"
] | wa_p@hotmail.com |
440e63a0acf0112f5cb39eb44852550860e1d240 | b6d3d7647705851c5eb16a6a09a267c6f01c8e05 | /flaskblog/__init__.py | d734ebb7f83bb05ae99b57ee04ace80dd9240abc | [] | no_license | TarasFurman/flaskblog | 309efacec913d5e4ef8c1eed950a2ce1bae37726 | 3fa70702b689875c44ad34ecedb49e18ca89e8b1 | refs/heads/master | 2020-05-25T00:23:40.433221 | 2019-05-19T22:06:52 | 2019-05-19T22:06:52 | 187,532,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from flaskblog.config import Config
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
mail = Mail()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
from flaskblog.users.routes import users
from flaskblog.posts.routes import posts
from flaskblog.main.routes import main
from flaskblog.errors.handlers import errors
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(main)
app.register_blueprint(errors)
return app | [
"tarfurman@gmail.com"
] | tarfurman@gmail.com |
c2bb2d65b3870a887e0ddb17c2f03d3f97dbddcc | 8f50c262f89d3dc4f15f2f67eb76e686b8f808f5 | /Trigger/TriggerCommon/TriggerMenu/scripts/moveDisabledChains.py | 3cf550466499b6bb7c7896c7e66d13a32585c169 | [
"Apache-2.0"
] | permissive | strigazi/athena | 2d099e6aab4a94ab8b636ae681736da4e13ac5c9 | 354f92551294f7be678aebcd7b9d67d2c4448176 | refs/heads/master | 2022-12-09T02:05:30.632208 | 2020-09-03T14:03:18 | 2020-09-03T14:03:18 | 292,587,480 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,373 | py | #!/bin/env python
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
"""
This script reads the rulebook and swaps items between Physics and MC if they are disabled or not.
TrigMenuRulebook needs to be checked out and installed.
Currently it needs to be run from the script folder.
One argument can be supplied specifying which "tag" to use, default is pp_v7
"""
import importlib
import sys, re, os
tag = "pp_v7"
if len(sys.argv) > 1:
tag = sys.argv[1]
import checkTigherThanPrimary
tighter_than_primaries = set([x for x, y in checkTigherThanPrimary.main()])
def swapItems():
physics_rulemod = importlib.import_module("TrigMenuRulebook.Physics_%s_rules" % tag)
monitoring_rulemod = importlib.import_module("TrigMenuRulebook.Monitoring_%s_rules" % tag)
standby_rulemod = importlib.import_module("TrigMenuRulebook.Standby_%s_rules" % tag)
cosmic_rulemod = importlib.import_module("TrigMenuRulebook.Cosmic_%s_rules" % tag)
toroidoff_rulemod = importlib.import_module("TrigMenuRulebook.Physics_%s_ToroidOff_rules" % tag)
commissioning_rulemod = importlib.import_module("TrigMenuRulebook.Commissioning2016_rules")
startup_rulemod = importlib.import_module("TrigMenuRulebook.Physics_%s_startup_rules" % tag)
monitoring_rulemod.rules = monitoring_rulemod.physics_rules
modules = (physics_rulemod,monitoring_rulemod,standby_rulemod,cosmic_rulemod,toroidoff_rulemod,commissioning_rulemod,startup_rulemod)
modules = (physics_rulemod,monitoring_rulemod,standby_rulemod,cosmic_rulemod,toroidoff_rulemod,commissioning_rulemod)
l1topo_pattern = "\w-\w"
def getPS(item):
hlt = "HLT_"+item
ps = -1
for mod in modules:
if hlt in mod.rules.keys(): key = hlt
elif item in mod.rules.keys(): key = item
else: continue
for rule in mod.rules[key].itervalues():
if "PS" not in rule.keys():
if "rate" in rule.keys(): return 2
else: ps = 1
if rule["PS"] > ps:
ps = rule["PS"]
if ps > 1: return ps
return ps
lines_Physics = {}
lines_MC = {}
lines_Physics_fromMC = {}
lines_MC_fromPhysics = {}
count_toMC = 0
count_toPhysics = 0
items_MC_fromPhysics = set()
items_Physics_fromMC_withPS = set()
items_Physics_withPS = set()
current_Physics = "../python/menu/Physics_%s.py" % tag
current_MC = "../python/menu/MC_%s.py" % tag
slice_start_pattern = r"\s*TriggerFlags.(.*).signatures *=.*\[[^\]]"
slice_end_pattern = "^\s*\]\s*$"
item_pattern = """(?x)\['(.*)'\s*, #chainName
\s*'.*'\s*, #L1itemforchain
\s*\[.*\]\s*, #[L1 items for chainParts]
\s*\[.*\]\s*, #[stream]
\s*\[.*\]\s*, #[groups]
\s*.*\]* #EBstep and ]"""
ftk_pattern = "if TriggerFlags.doFTK()"
last_slice_pattern = "TriggerFlags.(HeavyIon|MinBias|Beamspot)Slice.signatures"
output_Physics = ""
output_MC = ""
with open (current_Physics) as file_Physics:
currentSlice = None
linebuffer = ""
lastslice = False
for line in file_Physics:
commented = line.strip().startswith("#")
# Last slice pattern
m = re.search(last_slice_pattern,line)
if m and not commented:
lastslice = True
if lastslice:
output_Physics += line
continue
# Item pattern
m = re.search(item_pattern,line)
if m and not commented:
assert(currentSlice != None)
name = m.group(1)
ps = getPS(name)
if name in tighter_than_primaries:
print "Found item that is tighter than primary, instead of moving to MC please enable the rule in RB:", name
if ps==-1 and not name in tighter_than_primaries and not re.search(l1topo_pattern,line):
lines_MC_fromPhysics[currentSlice] += linebuffer+line
items_MC_fromPhysics.add(name)
count_toMC += 1
else:
lines_Physics[currentSlice] += linebuffer+line
linebuffer = ""
if ps>1:
items_Physics_withPS.add(name)
continue
# Slice start pattern
m = re.search(slice_start_pattern,line)
if m and not commented:
assert(currentSlice == None)
currentSlice = m.group(1)
output_Physics += "REPLACE_%s\n"%currentSlice
if not currentSlice in lines_Physics.keys(): lines_Physics[currentSlice] = line
if not currentSlice in lines_MC_fromPhysics.keys(): lines_MC_fromPhysics[currentSlice] = ""
continue
# Slice end pattern
m = re.search(slice_end_pattern,line)
if m and not commented:
if currentSlice!=None:
lines_Physics[currentSlice] += linebuffer
linebuffer = ""
currentSlice = None
continue
# Store line in the proper place
if currentSlice == None:
output_Physics += line
else:
linebuffer += line
with open (current_MC) as file_MC:
currentSlice = None
if_ftk = False
linebuffer = ""
lastslice = False
for line in file_MC:
commented = line.strip().startswith("#")
# Last slice pattern
m = re.search(last_slice_pattern,line)
if m and not commented:
lastslice = True
if lastslice:
output_MC += line
continue
# Item pattern
m = re.search(item_pattern,line)
if m and not commented and not if_ftk:
assert(currentSlice != None)
ps = getPS(m.group(1))
if ps==-1 or re.search(l1topo_pattern,line):
lines_MC[currentSlice] += linebuffer+line
else:
lines_Physics_fromMC[currentSlice] += linebuffer+line
if ps>1:
items_Physics_fromMC_withPS.add(m.group(1))
count_toPhysics +=1
linebuffer = ""
continue
# Slice start pattern
m = re.search(slice_start_pattern,line)
if m and not commented and not if_ftk:
assert(currentSlice == None)
currentSlice = m.group(1)
output_MC += "REPLACE_%s\n"%currentSlice
if not currentSlice in lines_Physics_fromMC.keys(): lines_Physics_fromMC[currentSlice] = ""
if not currentSlice in lines_MC.keys(): lines_MC[currentSlice] = line
continue
# Slice end pattern
m = re.search(slice_end_pattern,line)
if m and not commented:
if_ftk = False
if currentSlice!=None:
lines_MC[currentSlice] += linebuffer
linebuffer = ""
currentSlice = None
continue
# FTK pattern
m = re.search(ftk_pattern,line)
if m and not commented:
if_ftk = True
# Store line in the proper place
if currentSlice == None:
output_MC += line
else:
linebuffer += line
for key,val in lines_Physics.iteritems():
toreplace = val
if key in lines_Physics_fromMC.keys():
toreplace += lines_Physics_fromMC[key]
toreplace += " ]\n"
output_Physics = output_Physics.replace("REPLACE_%s\n"%key,toreplace)
for key,val in lines_MC.iteritems():
toreplace = val
if key in lines_MC_fromPhysics.keys():
toreplace += lines_MC_fromPhysics[key]
toreplace += " ]\n"
output_MC = output_MC.replace("REPLACE_%s\n"%key,toreplace)
with open (current_Physics+".edit","w") as outfile_Physics:
outfile_Physics.write(output_Physics)
with open (current_MC+".edit","w") as outfile_MC:
outfile_MC.write(output_MC)
print "Items moved Physics -> MC:",count_toMC
print "Items moved MC -> Physics:",count_toPhysics
if items_Physics_fromMC_withPS:
print "Some new items in Physics are prescaled, you probably want to add them to CPS.py:"
print sorted(list(items_Physics_fromMC_withPS))
return items_MC_fromPhysics
def cleanCPS(movedToMC):
from TriggerMenu.menu.CPS import defineCPSGroups
HLT_CPS_Groups = defineCPSGroups()
cps_to_remove = set()
for cps, items in HLT_CPS_Groups.iteritems():
if sum(item in movedToMC for item in items) >= len(items)-1:
cps_to_remove.add(cps)
cpsfile = "../python/menu/CPS.py"
cps_start_pattern = "'RATE:CPS:(.*)'\s*:\s*\["
cps_end_pattern = "\]\s*,"
cps_item_pattern = "^\s*'(.*)'\s*,\s*$"
with open (cpsfile+".edit","w") as outcps, open (cpsfile) as incps:
removing = False
for line in incps:
commented = line.strip().startswith("#")
# CPS start pattern
m = re.search(cps_start_pattern,line)
if m and not commented:
name = 'RATE:CPS:%s'%m.group(1)
if name in cps_to_remove:
removing = True
#CPS item pattern
if not m:
m = re.search(cps_item_pattern,line)
if m and not commented:
if m.group(1) in movedToMC: continue
if not removing:
outcps.write(line)
# CPS end pattern
m = re.search(cps_end_pattern,line)
if m and not commented:
removing = False
if __name__ == "__main__":
movedToMC = swapItems()
cleanCPS(movedToMC)
| [
"graemes.cern@gmail.com"
] | graemes.cern@gmail.com |
d694de90a7928e69f7b07e2791407d265fc58b26 | 701c93f0cba68c28162e6f9443022420b8f4e57d | /wotmad/stats/migrations/0003_auto__del_field_stat_moves__del_field_stat_spellpoints__del_field_stat.py | 2348b5b74c1d5693738786801c8238b140202a42 | [] | no_license | avidal/wotmad | e9c069ff07fc528738e97851e2f1db87759651cb | 78b7164afc0d63fc58451d4078da8ad71c57bb7e | refs/heads/master | 2021-01-22T19:48:44.790586 | 2015-02-24T19:41:38 | 2015-02-24T19:41:38 | 1,926,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,569 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Stat.moves'
db.delete_column('stats_stat', 'moves')
# Deleting field 'Stat.spellpoints'
db.delete_column('stats_stat', 'spellpoints')
# Deleting field 'Stat.hitpoints'
db.delete_column('stats_stat', 'hitpoints')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Stat.moves'
raise RuntimeError("Cannot reverse this migration. 'Stat.moves' and its values cannot be restored.")
# Adding field 'Stat.spellpoints'
db.add_column('stats_stat', 'spellpoints',
self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Stat.hitpoints'
raise RuntimeError("Cannot reverse this migration. 'Stat.hitpoints' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stats.stat': {
'Meta': {'object_name': 'Stat'},
'con': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dex': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'faction': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'homeland': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intel': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'klass': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'to': "orm['auth.User']"}),
'wil': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
}
}
complete_apps = ['stats'] | [
"alex.vidal@gmail.com"
] | alex.vidal@gmail.com |
937e8d50119c5045eaa3d246d2349f4e1b9acdb8 | 12dd250ca14f9e42320acbc7c95cf67b654da702 | /ratings/wagtail_hooks.py | 90ceae160dcaace8e0e868bb8bf11c3502bc5cf3 | [] | no_license | fourfridays/umairabbasi.com | 86b3fe09108cd0f09c8d7b122dc18b16a81b985b | b6bd6ecb85b281b03564eef4d7a6f74f59bbd31b | refs/heads/master | 2023-08-20T00:59:53.741112 | 2023-08-18T14:51:48 | 2023-08-18T14:51:48 | 54,347,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | from wagtail.contrib.modeladmin.options import (
ModelAdmin, modeladmin_register)
from ratings.models import Cast, MovieGenre, People
class MovieGenreModelAdmin(ModelAdmin):
model = MovieGenre
add_to_settings_menu = False # or True to add your model to the Settings sub-menu
exclude_from_explorer = False # or True to exclude pages of this type from Wagtail's explorer view
list_per_page = 50
list_display = ('id', 'name')
ordering = ['name']
#list_filter = ('offering_id', 'offering_code')
search_fields = ('name')
class PeopleModelAdmin(ModelAdmin):
model = People
add_to_settings_menu = False # or True to add your model to the Settings sub-menu
exclude_from_explorer = False # or True to exclude pages of this type from Wagtail's explorer view
list_per_page = 50
list_display = ('id', 'name')
ordering = ['id', 'name']
#list_filter = ('offering_id', 'offering_code')
search_fields = ('id', 'name')
class CastModelAdmin(ModelAdmin):
model = Cast
add_to_settings_menu = False # or True to add your model to the Settings sub-menu
exclude_from_explorer = False # or True to exclude pages of this type from Wagtail's explorer view
list_per_page = 50
list_display = ('movie', 'cast_member', 'character')
ordering = ['movie']
#list_filter = ('offering_id', 'offering_code')
search_fields = ('character')
# When using a ModelAdminGroup class to group several ModelAdmin classes together,
# you only need to register the ModelAdminGroup class with Wagtail:
modeladmin_register(MovieGenreModelAdmin)
modeladmin_register(PeopleModelAdmin)
modeladmin_register(CastModelAdmin)
| [
"umair.abbasi@fourfridays.com"
] | umair.abbasi@fourfridays.com |
98299e8fc9719396b0a0c8296d8e7d217f227d6b | d385c85c8f462e6908f41fb51e5fb6386a9398a8 | /Desktop/Program/Packing and Unpacking/5.unpacking.py | bcd7f2483cd09021c58e26d8d5a2342866eef8d4 | [] | no_license | spiderr7/cls-python | 697b7802c5efa8eeba2d77fa5fd62a3a1e7c7045 | f2a325f858a2f073cb40e5d8e04ce91dba32d28d | refs/heads/master | 2020-09-13T10:39:42.841354 | 2019-11-19T17:00:14 | 2019-11-19T17:00:14 | 222,745,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | def unpacking(a,b,c):
print("a:",a,end="|")
print("b:",b,end="|")
print("c:",c)
print("-"*50)
a="Hai"
b=[1,2,3]
c=(4,5,6)
d={7,8,9}
e={'a':1,'b':2,'c':3}
unpacking(*a)
unpacking(*b)
unpacking(*c)
unpacking(*d)
unpacking(*e)
unpacking(**e)
| [
"utkarshrana1@gmail.com"
] | utkarshrana1@gmail.com |
8948ad1eaafc3c4f1f7c4e33dfe4de52e0e26e2e | 5d91c8dc65df96816994b5e8cce10d2261294349 | /natural-selection-sim 30-10-2020/vectorMath.py | 351c07b104768d43c93deaf50a21799c1e8d1c85 | [] | no_license | phletic/pythonEcosystemSimulation | 9d8f90b53a6c37d0f7236404bd9997cb45daa8ce | 9e20afacb1c5a44bca920a9f549962f46d437d5c | refs/heads/main | 2023-01-06T15:27:14.272785 | 2020-10-31T04:14:44 | 2020-10-31T04:14:44 | 302,033,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,349 | py | '''
Credits
__author__ = "Sven Hecht"
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Sven Hecht"
__email__ = "info@shdev.de"
__status__ = "Production"
to get it : https://gist.github.com/mostley/3819375
Code was used to process the Vector 2 Math required in the simulation, which I was extremely lazy to calculate. Works like a charm.
Edited it to suit my needs eg. change distance measurement from euclidean taxicab distance
'''
from math import *
class Vector:
def __init__(self, x=0, y=0):
self.x = 0
self.y = 0
if isinstance(x, tuple) or isinstance(x, list):
y = x[1]
x = x[0]
elif isinstance(x, Vector):
y = x.y
x = x.x
self.set(x, y)
@staticmethod
def Distance(self, other):
return sqrt(pow(self.x - other.x, 2) + pow(self.y - other.y, 2))
@staticmethod
def angle(v1, v2):
return acos(v1.dotproduct(v2) / (v1.getLength() * v2.getLength()))
@staticmethod
def angleDeg(v1, v2):
return Vector.angle(v1, v2) * 180.0 / pi
def set(self, x, y):
self.x = x
self.y = y
def toArr(self):
return [self.x, self.y]
def toInt(self):
return Vector(int(self.x), int(self.y))
def toIntArr(self):
return self.toInt().toArr()
def getNormalized(self):
if self.getLength() != 0:
return self / self.getLength()
else:
return Vector(0, 0)
def dotproduct(self, other):
if isinstance(other, Vector):
return self.x * other.x + self.y * other.y
elif isinstance(other, tuple) or isinstance(other, list):
return self.x * other[0] + self.y * other[1]
else:
return NotImplemented
def __add__(self, other):
if isinstance(other, Vector):
return Vector(self.x + other.x, self.y + other.y)
elif isinstance(other, tuple) or isinstance(other, list):
return Vector(self.x + other[0], self.y + other[1])
elif isinstance(other, int) or isinstance(other, float):
return Vector(self.x + other, self.y + other)
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, Vector):
return Vector(self.x - other.x, self.y - other.y)
if isinstance(other, tuple) or isinstance(other, list):
return Vector(self.x - other[0], self.y - other[1])
elif isinstance(other, int) or isinstance(other, float):
return Vector(self.x - other, self.y - other)
else:
return NotImplemented
def __rsub__(self, other):
if isinstance(other, Vector):
return Vector(other.x - self.x, other.y - self.y)
elif isinstance(other, tuple) or isinstance(other, list):
return Vector(other[0] - self.x, other[1] - self.y)
elif isinstance(other, int) or isinstance(other, float):
return Vector(other - self.x, other - self.y)
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, Vector):
return Vector(self.x * other.x, self.y * other.y)
elif isinstance(other, tuple) or isinstance(other, list):
return Vector(self.x * other[0], self.y * other[1])
elif isinstance(other, int) or isinstance(other, float):
return Vector(self.x * other, self.y * other)
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Vector):
return Vector(self.x / other.x, self.y / other.y)
elif isinstance(other, tuple) or isinstance(other, list):
return Vector(self.x / other[0], self.y / other[1])
elif isinstance(other, int) or isinstance(other, float):
return Vector(self.x / other, self.y / other)
else:
return NotImplemented
def __rdiv__(self, other):
if isinstance(other, Vector):
return Vector(other.x / self.x, other.y / self.y)
elif isinstance(other, tuple) or isinstance(other, list):
return Vector(other[0] / self.x, other[1] / self.y)
elif isinstance(other, int) or isinstance(other, float):
return Vector(other / self.x, other / self.y)
else:
return NotImplemented
def __pow__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vector(self.x ** other, self.y ** other)
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, Vector):
self.x += other.x
self.y += other.y
return self
elif isinstance(other, tuple) or isinstance(other, list):
self.x += other[0]
self.y += other[1]
return self
elif isinstance(other, int) or isinstance(other, float):
self.x += other
self.y += other
return self
else:
return NotImplemented
def __isub__(self, other):
if isinstance(other, Vector):
self.x -= other.x
self.y -= other.y
return self
elif isinstance(other, tuple) or isinstance(other, list):
self.x -= other[0]
self.y -= other[1]
return self
elif isinstance(other, int) or isinstance(other, float):
self.x -= other
self.y -= other
return self
else:
return NotImplemented
def __imul__(self, other):
if isinstance(other, Vector):
self.x *= other.x
self.y *= other.y
return self
elif isinstance(other, tuple) or isinstance(other, list):
self.x *= other[0]
self.y *= other[1]
return self
elif isinstance(other, int) or isinstance(other, float):
self.x *= other
self.y *= other
return self
else:
return NotImplemented
def __idiv__(self, other):
if isinstance(other, Vector):
self.x /= other.x
self.y /= other.y
return self
elif isinstance(other, tuple) or isinstance(other, list):
self.x /= other[0]
self.y /= other[1]
return self
elif isinstance(other, int) or isinstance(other, float):
self.x /= other
self.y /= other
return self
else:
return NotImplemented
def __ipow__(self, other):
if isinstance(other, int) or isinstance(other, float):
self.x **= other
self.y **= other
return self
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Vector):
return self.x != other.x or self.y != other.y
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, Vector):
return self.getLength() > other.getLength()
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, Vector):
return self.getLength() >= other.getLength()
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, Vector):
return self.getLength() < other.getLength()
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, Vector):
return self.getLength() <= other.getLength()
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, Vector):
return self.x == other.x and self.y == other.y
else:
return NotImplemented
def __len__(self):
return int(sqrt(self.x ** 2 + self.y ** 2))
def getLength(self):
return sqrt(self.x ** 2 + self.y ** 2)
def __getitem__(self, key):
if key == "x" or key == "X" or key == 0 or key == "0":
return self.x
elif key == "y" or key == "Y" or key == 1 or key == "1":
return self.y
def __repr__(self):
return "[x:{0},y:{1}]".format(self.x, self.y)
def __neg__(self):
return Vector(-self.x, -self.y)
| [
"chavezchendy@gmail.com"
] | chavezchendy@gmail.com |
5081f40832a7f517c7158063156294613b41d58e | b84bf023fc27c8b4c800c9397f0dfb0a36025e4b | /share/static/i18n.py | c3ccb7b3de5d8cedc1631eb9cfb3da8efa1ff69e | [
"MIT-0"
] | permissive | gracious-tech/track | e95ccc619f42384d0007674ca6eb9e779ccadfc1 | 859804b000222af77ee1a52b5eb25fd93a8cc90a | refs/heads/master | 2023-06-25T01:04:12.197270 | 2023-06-09T00:37:39 | 2023-06-09T00:37:39 | 206,684,739 | 7 | 0 | MIT | 2022-12-08T06:47:41 | 2019-09-06T01:05:50 | TypeScript | UTF-8 | Python | false | false | 585 | py |
I18N = {
# WARN Check any added strings don't overflow sidebar width
'en': {
# Used in image
'percent_heading': "Current readthrough",
'recently_heading': "Recently finished",
'currently_heading': "Currently reading",
# Used in html
'page_title': "Bible reading progress @ track.bible",
'page_description': "Keep track of your Bible reading, and complete it at your own pace",
'page_button': "OPEN APP",
}
}
def get_strings(locale):
if locale not in I18N:
locale = 'en'
return I18N[locale]
| [
"noreply@gracious.tech"
] | noreply@gracious.tech |
a8ccd4938b7833a0c65b7d17b12e9b858fdf862c | b1562c178375809b7460b15c5cef266eec47b8d9 | /dailyacitivities/dailyacitivities/urls.py | 0f8794ff1349809a4bea409a060a12760209d65f | [] | no_license | viswanath27/dailyactivities | ddeccc900e189a0aed1df82ffb7d8177d708abe6 | 1483158e35f5e1a8ca40281d02fa760ad6b5954a | refs/heads/master | 2022-04-11T19:19:43.006451 | 2020-01-21T16:07:21 | 2020-01-21T16:07:21 | 233,562,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | """dailyacitivities URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"noreply@github.com"
] | viswanath27.noreply@github.com |
9cc28d9f4c07c4648dc57207f4e8201627ae8aed | 1b9075ffea7d4b846d42981b41be44238c371202 | /2008/devel/programming/libs/libdbf/actions.py | 242da707fa8723753f2298926612cdf827675c4e | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir = "libdbf"
def setup():
shelltools.chmod("configure")
shelltools.chmod("install-sh")
pisitools.dosed("configure","docbook-to-man","docbook2man")
autotools.rawConfigure("--prefix=/usr \
--disable-static")
def build():
autotools.make()
def install():
autotools.install()
pisitools.domo("po/tr.po", "tr", "libdbf.mo")
pisitools.insinto("/usr/share/doc/%s" % get.srcTAG(),"man/html")
pisitools.dodoc("ChangeLog", "COPYING", "README")
| [
"MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2"
] | MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2 |
9cd3be7836070f97da48bb5a27fe23846acf509d | 8cc547e9b1285b885f4d2c4320f3b5416c37094d | /proyecto1/testimonios/migrations/0003_auto_20190426_1231.py | 7742ac23426d2eb4110db090baa3ff9ecfeb36a6 | [] | no_license | programaciondb/Django-BootsrapProject | 0f7122142c7c7aadf74fff4a3101be34b1fa16c7 | 8e95c3f238ffec940cbc73c262a7abf6a28444dc | refs/heads/master | 2020-06-20T21:13:41.557227 | 2019-07-16T19:07:10 | 2019-07-16T19:07:10 | 197,251,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # Generated by Django 2.2 on 2019-04-26 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testimonios', '0002_auto_20190426_1228'),
]
operations = [
migrations.AlterField(
model_name='testimonio',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Creado'),
),
migrations.AlterField(
model_name='testimonio',
name='updated',
field=models.DateTimeField(auto_now=True, verbose_name='Actualizado'),
),
]
| [
"noreply@github.com"
] | programaciondb.noreply@github.com |
b4a09ce335b1af8c297f6a936a94a0159504d5ee | 3fb1b01aa17af71b8c456230474a2c0be2239af7 | /venv/bin/cwutil | c98640220fdb6d4c85f06555c2ad2ccb451a4dfb | [] | no_license | Playron/tdt4117_Assignment3 | 0a0adf7739ca8dd1b42f0cba7ea9ea093c510059 | d481f079d0fa2c0bfcfd304bd31da321bb064cce | refs/heads/master | 2020-08-07T10:23:34.711338 | 2019-10-16T16:38:52 | 2019-10-16T16:38:52 | 213,410,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,092 | #!/Users/perhaagensen/PycharmProjects/tdt4117Oving3/venv/bin/python
# Author: Chris Moyer <cmoyer@newstex.com>
# Description: CloudWatch Utility
# For listing stats, creating alarms, and managing
# other CloudWatch aspects
import boto
cw = boto.connect_cloudwatch()
from datetime import datetime, timedelta
def _parse_time(time_string):
"""Internal function to parse a time string"""
def _parse_dict(d_string):
result = {}
if d_string:
for d in d_string.split(","):
d = d.split(":")
result[d[0]] = d[1]
return result
def ls(namespace=None):
"""
List metrics, optionally filtering by a specific namespace
namespace: Optional Namespace to filter on
"""
print "%-10s %-50s %s" % ("Namespace", "Metric Name", "Dimensions")
print "-"*80
for m in cw.list_metrics():
if namespace is None or namespace.upper() in m.namespace:
print "%-10s %-50s %s" % (m.namespace, m.name, m.dimensions)
def stats(namespace, metric_name, dimensions=None, statistics="Average", start_time=None, end_time=None, period=60, unit=None):
"""
Lists the statistics for a specific metric
namespace: The namespace to use, usually "AWS/EC2", "AWS/SQS", etc.
metric_name: The name of the metric to track, pulled from `ls`
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
statistics: The statistics to measure, defaults to "Average"
'Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'
start_time: Start time, default to now - 1 day
end_time: End time, default to now
period: Period/interval for counts, default to 60 minutes
unit: Unit to track, default depends on what metric is being tracked
"""
# Parse the dimensions
dimensions = _parse_dict(dimensions)
# Parse the times
if end_time:
end_time = _parse_time(end_time)
else:
end_time = datetime.utcnow()
if start_time:
start_time = _parse_time(start_time)
else:
start_time = datetime.utcnow() - timedelta(days=1)
print "%-30s %s" % ('Timestamp', statistics)
print "-"*50
data = {}
for m in cw.get_metric_statistics(int(period), start_time, end_time, metric_name, namespace, statistics, dimensions, unit):
data[m['Timestamp']] = m[statistics]
keys = data.keys()
keys.sort()
for k in keys:
print "%-30s %s" % (k, data[k])
def put(namespace, metric_name, dimensions=None, value=None, unit=None, statistics=None, timestamp=None):
"""
Publish custom metrics
namespace: The namespace to use; values starting with "AWS/" are reserved
metric_name: The name of the metric to update
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
value: The value to store, mutually exclusive with `statistics`
statistics: The statistics to store, mutually exclusive with `value`
(must specify all of "Minimum", "Maximum", "Sum", "SampleCount")
timestamp: The timestamp of this measurement, default is current server time
unit: Unit to track, default depends on what metric is being tracked
"""
def simplify(lst):
return lst[0] if len(lst) == 1 else lst
print cw.put_metric_data(namespace, simplify(metric_name.split(';')),
dimensions = simplify(map(_parse_dict, dimensions.split(';'))) if dimensions else None,
value = simplify(value.split(';')) if value else None,
statistics = simplify(map(_parse_dict, statistics.split(';'))) if statistics else None,
timestamp = simplify(timestamp.split(';')) if timestamp else None,
unit = simplify(unit.split(';')) if unit else None)
def help(fnc=None):
"""
Print help message, optionally about a specific function
"""
import inspect
self = sys.modules['__main__']
if fnc:
try:
cmd = getattr(self, fnc)
except:
cmd = None
if not inspect.isfunction(cmd):
print "No function named: %s found" % fnc
sys.exit(2)
(args, varargs, varkw, defaults) = inspect.getargspec(cmd)
print cmd.__doc__
print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args]))
else:
print "Usage: cwutil [command]"
for cname in dir(self):
if not cname.startswith("_") and not cname == "cmd":
cmd = getattr(self, cname)
if inspect.isfunction(cmd):
doc = cmd.__doc__
print "\t%s - %s" % (cname, doc)
sys.exit(1)
if __name__ == "__main__":
import sys
self = sys.modules['__main__']
if len(sys.argv) >= 2:
try:
cmd = getattr(self, sys.argv[1])
except:
cmd = None
args = sys.argv[2:]
else:
cmd = help
args = []
if not cmd:
cmd = help
try:
cmd(*args)
except TypeError as e:
print e
help(cmd.__name__)
| [
"pdhaagensen@gmail.com"
] | pdhaagensen@gmail.com | |
bc7516acc082e5094019d93d309b7b2de24a0d03 | 3af4cdd6d0ea352a82e7126444c321578bfa96e1 | /web_flask/0-hello_route.py | aabef345b768c68ba04374650634e608b4c3d781 | [] | no_license | pasignature/AirBnB_clone_v2 | 7388968c924ddaa024d8b5de9797568b22cbca84 | 7a5c816e76d6442bf6bd5b8f0a00e1c267599036 | refs/heads/master | 2022-12-19T09:33:03.606231 | 2020-08-30T21:42:01 | 2020-08-30T21:42:01 | 288,272,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/python3
"""runs an app with Flask"""
from flask import Flask
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_hbnb():
""" Function called with / route """
return 'Hello HBNB!'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| [
"pasignature@gmail.com"
] | pasignature@gmail.com |
cdbe41c2ec761eb560f3450e4eafcb73d802900a | cecd61903943d9f25f37605a344b1683ee958b11 | /what_is_food/config/desktop.py | 06f7e215806a55310c988083ea653bb469f998b8 | [
"MIT"
] | permissive | ashish-greycube/what_is_food | 1f31ce461f97f8d4dccbbd078eb2190a4f785caa | 6c4a327a721accf86667cc87c0b2976dcd09abe6 | refs/heads/master | 2022-12-15T06:07:07.137326 | 2020-08-27T04:05:21 | 2020-08-27T04:05:21 | 273,717,466 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "What Is Food",
"color": "yellow",
"icon": "octicon octicon-device-mobile",
"type": "module",
"label": _("What Is Food")
}
]
| [
"mr.ashish.shah@gmail.com"
] | mr.ashish.shah@gmail.com |
2f4826d4a50897a095ca333682e37d6d4c270c40 | 413450abb1b1b13fd432e41a8a048607aa1b2fc7 | /db/models/__init__.py | b5d02aaf058a5c70fc7d526d92838c29a717bede | [] | no_license | RubenBejanyan/real_estate_app | ea57157517633fc1903e972227db01c7b5f49b3b | afb0a6dba113ea9afbe0cd2359fb08c09b30e012 | refs/heads/main | 2023-05-08T11:10:44.862127 | 2021-06-01T10:22:56 | 2021-06-01T10:22:56 | 358,673,556 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from .db_flask_sql import db
from .apartment import Apartment
from .city import City
from .currency import Currency | [
"bejanyanruben@gmail.com"
] | bejanyanruben@gmail.com |
422cf42aadee1922388e66fd743cda78df05f826 | a901ec459b38cfc0ed2bb3e0ed711dcca3c850c3 | /appium/migrations/0008_auto_20170317_1324.py | c9db2a5b5c2b449e7d2ef2a05157766eab2510d5 | [] | no_license | balavnc/TestConsoleNew | c265999517c2852d1e408849da073de93452673c | 5a1391ed60e7e0a2a9a636990f86d49b68252d5a | refs/heads/master | 2021-01-23T07:16:04.565965 | 2017-07-06T06:26:59 | 2017-07-06T06:26:59 | 86,418,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('appium', '0007_auto_20170317_1322'),
]
operations = [
migrations.AlterField(
model_name='appiumdevices',
name='device_type',
field=models.CharField(max_length=10, choices=[(b'Android', b'Android'), (b'IOS', b'IOS')]),
),
migrations.AlterField(
model_name='appiumos',
name='os_type',
field=models.CharField(max_length=10, choices=[(b'Android', b'Android'), (b'IOS', b'IOS')]),
),
]
| [
"balasubramanian_n@hcl.com"
] | balasubramanian_n@hcl.com |
e0c0f5874c310c08d6aae5b8963709dc2a7f55f7 | 061c36c4b33dd0c47d9d62c2057559d4c5973681 | /validate_csv.py | 2e7d2f78cb711aa32b69265e49916552cf28ae42 | [
"MIT"
] | permissive | ashkankamyab/DevOps-Python-tools | 0847f9e1b74d7864d17b0a9833beeef1f149e5a5 | dc4b1ce2b2fbee3797b66501ba3918a900a79769 | refs/heads/master | 2022-10-09T15:23:31.108086 | 2022-09-01T14:32:56 | 2022-09-01T14:32:56 | 189,855,037 | 1 | 0 | NOASSERTION | 2019-06-02T14:15:18 | 2019-06-02T14:15:18 | null | UTF-8 | Python | false | false | 10,977 | py | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2015-12-22 23:25:25 +0000 (Tue, 22 Dec 2015)
#
# https://github.com/HariSekhon/DevOps-Python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback
# to help improve or steer this or other code I publish
#
# https://www.linkedin.com/in/HariSekhon
#
"""
CSV Validator Tool
Validates each file passed as an argument
Directories are recursed, checking all files ending in a .csv suffix.
Works like a standard unix filter program - if no files are passed as arguments or '-' is given then reads
from standard input
This is not as good as the other validate_*.py programs in this repo as the others have clearer syntactic structure
to check. CSV/TSV has higher variation with delimiters, quote characters etc. If delimiters and quotechars are not
specified it'll try to infer the structure but I've had to add a few heuristics to invalidate files which otherwise
pass python csv module's inference including json and yaml files which we don't accept.
Explicitly using the --delimiter option will disable the inference which is handy if it's
allowing through non-csv files, you don't want to accept other delimited files such as TSV files etc.
This may be fine for simple purposes but for a better validation tool with more options see:
https://pythonhosted.org/chkcsv/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# this causes csvreader TypeError: the "delimiter" must be an 1-character string
# from __future__ import unicode_literals
import csv
import os
import re
import sys
libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pylib'))
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import die, ERRORS, log_option, uniq_list_ordered, log, isChars, validate_regex
from harisekhon import CLI
except ImportError as _:
print('module import failed: %s' % _, file=sys.stderr)
print("Did you remember to build the project by running 'make'?", file=sys.stderr)
print("Alternatively perhaps you tried to copy this program out without it's adjacent libraries?", file=sys.stderr)
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.10.0'
class CsvValidatorTool(CLI):
def __init__(self):
# Python 2.x
super(CsvValidatorTool, self).__init__()
# Python 3.x
# super().__init__()
self.filename = None
# self.delimiter = ','
# self.quotechar = '"'
# allow CSV module inference - this way user can choose to explicitly specify --delimiter=, --quotechar='"'
# or allow to try to infer itself
self.delimiter = None
self.quotechar = None
self.re_csv_suffix = re.compile(r'.*\.csv$', re.I)
self.valid_csv_msg = '<unknown> => CSV OK'
self.invalid_csv_msg = '<unknown> => CSV INVALID'
self.failed = False
self.exclude = None
def add_options(self):
# do not leave as None to infer per line, it'll split a single word line like 'blah' => ['b', 'ah']
# and there is no way to detect it only had one field
self.add_opt('-d', '--delimiter', default=',',
help='Delimiter to test (default: comma)')
self.add_opt('-q', '--quotechar', default=self.quotechar,
help='Quotechar to test (default: None)')
# self.add_opt('-p', '--print', action='store_true',
# help='Print the CSV lines(s) which are valid, else print nothing (useful for shell ' +
# 'pipelines). Exit codes are still 0 for success, or %s for failure'
# % ERRORS['CRITICAL'])
self.add_opt('-e', '--exclude', metavar='regex', default=os.getenv('EXCLUDE'),
help='Regex of file / directory paths to exclude from checking ($EXCLUDE)')
def process_options(self):
self.exclude = self.get_opt('exclude')
if self.exclude:
validate_regex(self.exclude, 'exclude')
self.exclude = re.compile(self.exclude, re.I)
def is_excluded(self, path):
if self.exclude and self.exclude.search(path):
log.debug("excluding path: %s", path)
return True
return False
def process_csv(self, filehandle):
csvreader = None
try:
if self.delimiter is not None:
try:
csvreader = csv.reader(filehandle, delimiter=self.delimiter, quotechar=self.quotechar)
except TypeError as _:
self.usage(_)
else:
# dialect = csv.excel
dialect = csv.Sniffer().sniff(filehandle.read(1024))
# this will raise an Error if invalid
dialect.strict = True
filehandle.seek(0)
csvreader = csv.reader(filehandle, dialect)
except csv.Error as _:
log.warning('file %s: %s', self.filename, _)
return False
count = 0
try:
# csvreader doesn't seem to generate any errors ever :-(
# csv module allows entire lines of json/xml/yaml to go in as a single field
# Adding some invalidations manually
for field_list in csvreader:
# list of fields with no separator information
# log.debug("line: %s", _)
# make it fail if there is only a single field on any line
if len(field_list) < 2:
return False
# it's letting JSON through :-/
if field_list[0] == '{':
return False
# extra protection along the same lines as anti-json:
# the first char of field should be alphanumeric, not syntax
# however instead of isAlnum allow quotes for quoted CSVs to pass validation
if not isChars(field_list[0][0], 'A-Za-z0-9\'"'):
return False
count += 1
except csv.Error as _:
log.warning('file %s, line %s: %s', self.filename, csvreader.line_num, _)
return False
if count == 0:
log.debug('zero lines detected, blank input is not valid CSV')
return False
log.debug('%s CSV lines passed', count)
return True
def check_csv(self, filehandle):
if self.process_csv(filehandle):
# if self.get_opt('print'):
# print(content, end='')
# else:
# print(self.valid_csv_msg)
print(self.valid_csv_msg)
else:
self.failed = True
# if not self.get_opt('print'):
# if self.verbose > 2:
# try:
# except csv.Error as _:
# if not self.get_opt('print'):
# print(_)
# die(self.invalid_csv_msg)
die(self.invalid_csv_msg)
def run(self):
self.delimiter = self.get_opt('delimiter')
self.quotechar = self.get_opt('quotechar')
log_option('delimiter', self.delimiter)
log_option('quotechar', self.quotechar)
if not self.args:
self.args.append('-')
args = uniq_list_ordered(self.args)
for arg in args:
if arg == '-':
continue
if not os.path.exists(arg):
print("'{0}' not found".format(arg))
sys.exit(ERRORS['CRITICAL'])
if os.path.isfile(arg):
log_option('file', arg)
elif os.path.isdir(arg):
log_option('directory', os.path.abspath(arg))
else:
die("path '{0}' could not be determined as either a file or directory".format(arg))
for arg in args:
self.check_path(arg)
if self.failed:
sys.exit(ERRORS['CRITICAL'])
def check_path(self, path):
if path == '-' or os.path.isfile(path):
self.check_file(path)
elif os.path.isdir(path):
self.walk(path)
else:
die("failed to determine if path '%s' is file or directory" % path)
# don't need to recurse when using walk generator
def walk(self, path):
if self.is_excluded(path):
return
for root, dirs, files in os.walk(path, topdown=True):
# modify dirs in place to prune descent for increased efficiency
# requires topdown=True
# calling is_excluded() on joined root/dir so that things like
# '/tests/spark-\d+\.\d+.\d+-bin-hadoop\d+.\d+' will match
dirs[:] = [d for d in dirs if not self.is_excluded(os.path.join(root, d))]
for filename in files:
file_path = os.path.join(root, filename)
if self.re_csv_suffix.match(file_path):
self.check_file(file_path)
def check_file(self, filename):
self.filename = filename
if self.filename == '-':
self.filename = '<STDIN>'
self.valid_csv_msg = '%s => CSV OK' % self.filename
self.invalid_csv_msg = '%s => CSV INVALID' % self.filename
if self.filename == '<STDIN>':
log.debug('checking stdin')
self.check_csv(sys.stdin)
else:
if self.is_excluded(filename):
return
log.debug('checking %s', self.filename)
try:
with open(self.filename) as iostream:
self.check_csv(iostream)
except IOError as _:
die("ERROR: %s" % _)
if __name__ == '__main__':
CsvValidatorTool().main()
# =========================================================================== #
# borrowed and tweaked from Python standard library:
# https://docs.python.org/2/library/csv.html
# import codecs
# import cStringIO
# class UTF8Recoder(object):
# """
# Iterator that reads an encoded stream and reencodes the input to UTF-8
# """
# def __init__(self, _, encoding):
# self.reader = codecs.getreader(encoding)(_)
#
# def __iter__(self):
# return self
#
# def next(self):
# return self.reader.next().encode("utf-8")
#
#
# class UnicodeReader(object):
# """
# A CSV reader which will iterate over lines in the CSV filehandle,
# which is encoded in the given encoding.
# """
#
# def __init__(self, _, dialect=csv.excel, encoding="utf-8", **kwargs):
# _ = UTF8Recoder(_, encoding)
# self.reader = csv.reader(_, dialect=dialect, **kwargs)
#
# def next(self):
# row = self.reader.next()
# return [unicode(s, "utf-8") for s in row]
#
# def __iter__(self):
# return self
| [
"harisekhon@gmail.com"
] | harisekhon@gmail.com |
40bf69fc32a19fddc23cf0e29fdc8fc40c238709 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /nlp/dialogue_generation/cpm/pytorch/iluvatar/cpm/config/layers/self_multihead_attn.py | 55be679404c3ac3d70ad62e15e3d9ac7aa90f005 | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 3,926 | py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from layers.self_multihead_attn_func import self_attn_func
from layers.fast_self_multihead_attn_func import fast_self_attn_func
from apex.normalization.fused_layer_norm import FusedLayerNorm
class SelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=False, impl='fast'):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * \
num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = bias
self.impl = impl
self.scaling = self.head_dim**-0.5
self.q_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.v_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.q_bias = Parameter(torch.Tensor(embed_dim))
self.k_bias = Parameter(torch.Tensor(embed_dim))
self.v_bias = Parameter(torch.Tensor(embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.reset_parameters()
if impl == 'fast':
self.attn_func = fast_self_attn_func
elif impl == 'default':
self.attn_func = self_attn_func
else:
assert False, "Unsupported impl: {} !".format(impl)
def reset_parameters(self):
nn.init.xavier_uniform_(self.q_weight)
nn.init.xavier_uniform_(self.k_weight)
nn.init.xavier_uniform_(self.v_weight)
nn.init.xavier_uniform_(self.out_proj_weight)
nn.init.constant_(self.q_bias, 0.)
nn.init.constant_(self.k_bias, 0.)
nn.init.constant_(self.v_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
def forward(self, query, attn_mask=None, is_training=True):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
mask = attn_mask
input_weights = torch.cat([self.q_weight.view(self.num_heads, 1, self.head_dim, self.embed_dim), self.k_weight.view(self.num_heads, 1, self.head_dim,
self.embed_dim), self.v_weight.view(self.num_heads, 1, self.head_dim, self.embed_dim)], dim=1).reshape(3*self.embed_dim, self.embed_dim).contiguous()
input_bias = torch.cat([self.q_bias.view(self.num_heads, 1, self.head_dim), self.k_bias.view(
self.num_heads, 1, self.head_dim), self.v_bias.view(self.num_heads, 1, self.head_dim)], dim=1).reshape(3*self.embed_dim).contiguous()
if self.impl == 'fast':
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, query,
input_weights, self.out_proj_weight, input_bias, self.out_proj_bias, mask, False, self.dropout)
else:
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, self.scaling, query,
input_weights, self.out_proj_weight,
input_bias, self.out_proj_bias,
mask, False, self.dropout)
return outputs
| [
"jia.guo@iluvatar.ai"
] | jia.guo@iluvatar.ai |
f52bf406ca44a3c1069a2d0b80f6794f749d1cf2 | a6562be99956a631d85dc0a13bae03f60dd3c5c3 | /Algorithm/chap10/Radix Sort Algorithm.py | be9111724b8742d40b945a2db668137c440f449f | [] | no_license | ghkdwl1203/python-ml | b6cffaeda0a732f0b0e9aa2065d5ef52bf986dc9 | dceff1a4ee140e97ba93ef40a01d9e0f295a3a74 | refs/heads/master | 2020-04-15T01:25:10.340164 | 2019-10-28T10:08:29 | 2019-10-28T10:08:29 | 164,275,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | from math import log10
from random import randint
def get_digit(number,base,pos):
return (number//base**pos)%base
def prefix_sum(array):
for i in range(1,len(array)):
array[i] = array[i] + array[i-1]
return array
def radixsort(l,base=10):
passes = int( log10 (max (l))+1)
output=[0]*len(l)
for pos in range(passes):
count = [0] * base
for i in l:
digit = get_digit(i,base,pos)
count[digit] +=1
count = prefix_sum(count)
for i in reversed(l):
digit = get_digit(i,base,pos)
count[digit] -= 1
new_pos = count[digit]
output[new_pos] = i
l=list(output)
return output
if __name__ == '__main__':
l=[]
l= [randint(1,9999) for x in range(100)]
print("<정렬 전>")
print(l)
sorted = radixsort(l)
print("<정렬 후>")
print(sorted) | [
"46411842+ghkdwl1203@users.noreply.github.com"
] | 46411842+ghkdwl1203@users.noreply.github.com |
6a255bb67ec9b498d13f2e3027922810f1ba61e4 | a7c33625436a0fc770b795121b65223aca0920c3 | /django_test/settings.py | f4f8a9cacff3cf705e7b3125a5aba60a9aeff8c9 | [] | no_license | parkslyn/mydjango | c876c304df6808b2b70e475fe6622896c8a540ff | aeb0071c0d9f44f2a87705efdca3115fcfd47938 | refs/heads/master | 2020-04-01T22:51:33.102440 | 2018-10-19T06:26:19 | 2018-10-19T06:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | """
Django settings for django_test project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mk4&fbt2*v)2j)zuy2hgn&=3woxbfd+%&jg9+xul=l%n7c9mng'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'music'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"parkslyn@gmail.com"
] | parkslyn@gmail.com |
5e7f0c164fc792ed2978a8a20dbed1f8855921d4 | bd22ab0eef0fa3be5b958772bea0aa6e813cbdd0 | /MyShop/settings.py | 0de109fbde7d2905909fc99e40acca31233d64bf | [] | no_license | DNahian/My-Shop | babcf12fc8b429cbff924d5ee93830b5d0f3f147 | d5f7b5e45e7247be1ea668f49b5cb364d61a62ff | refs/heads/master | 2022-12-03T08:32:04.057352 | 2020-08-20T07:21:15 | 2020-08-20T07:21:15 | 288,934,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | """
Django settings for MyShop project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cqjglbdqrtx1f!9j%x--v_9(=7xhn@po#0xz!gwza0ywm*ki2!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Products.apps.ProductsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyShop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyShop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"nahiyan313@.gmailcom"
] | nahiyan313@.gmailcom |
33c8c7e6cc382a9dbcd9a3eb49171fbcf67e4e72 | bedf68a6e2bb337d2848a4a55a24c71fd62484c7 | /tests/test_NMT_architectures/bidir_deep_LSTM_ConditionalGRU.py | 0151b7f437481dcaae0d6cdc7546fecde3951030 | [
"MIT"
] | permissive | 19ai/nmt-keras | 941d5bbffe1889d72e4d58ae77fd92d8db3b0df7 | ec56acb619b0c4be0558f737d5d848971fa282db | refs/heads/master | 2020-03-11T16:37:06.633273 | 2018-04-18T11:46:03 | 2018-04-18T11:46:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,746 | py | import argparse
import pytest
from keras import backend as K
from config import load_parameters
from data_engine.prepare_data import build_dataset
from main import train_model, apply_NMT_model
from sample_ensemble import sample_ensemble
from score import score_corpus
def load_tests_params():
params = load_parameters()
params['BATCH_SIZE'] = 10
params['WEIGHT_DECAY'] = 1e-4
params['RECURRENT_WEIGHT_DECAY'] = 1e-4
params['DROPOUT_P'] = 0.01
params['RECURRENT_INPUT_DROPOUT_P'] = 0.01
params['RECURRENT_DROPOUT_P'] = 0.01
params['USE_NOISE'] = True
params['NOISE_AMOUNT'] = 0.01
params['USE_BATCH_NORMALIZATION'] = True
params['BATCH_NORMALIZATION_MODE'] = 1
params['SOURCE_TEXT_EMBEDDING_SIZE'] = 8
params['TARGET_TEXT_EMBEDDING_SIZE'] = 8
params['DECODER_HIDDEN_SIZE'] = 4
params['ENCODER_HIDDEN_SIZE'] = 4
params['ATTENTION_SIZE'] = params['DECODER_HIDDEN_SIZE']
params['SKIP_VECTORS_HIDDEN_SIZE'] = params['DECODER_HIDDEN_SIZE']
params['DOUBLE_STOCHASTIC_ATTENTION_REG'] = 0.7
params['RELOAD'] = 0
params['MAX_EPOCH'] = 2
return params
def test_NMT_Bidir_deep_LSTM_ConditionalGRU():
params = load_tests_params()
# Current test params: Two-layered LSTM - ConditionalGRU
params['BIDIRECTIONAL_ENCODER'] = True
params['N_LAYERS_ENCODER'] = 2
params['BIDIRECTIONAL_DEEP_ENCODER'] = False
params['ENCODER_RNN_TYPE'] = 'LSTM'
params['DECODER_RNN_TYPE'] = 'ConditionalGRU'
params['N_LAYERS_DECODER'] = 2
params['REBUILD_DATASET'] = True
dataset = build_dataset(params)
params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]]
params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]]
params['MODEL_NAME'] = \
params['TASK_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '_' + params['MODEL_TYPE'] + \
'_src_emb_' + str(params['SOURCE_TEXT_EMBEDDING_SIZE']) + \
'_bidir_' + str(params['BIDIRECTIONAL_ENCODER']) + \
'_enc_' + params['ENCODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_ENCODER']) + '_' + str(
params['ENCODER_HIDDEN_SIZE']) + \
'_dec_' + params['DECODER_RNN_TYPE'] + '_*' + str(params['N_LAYERS_DECODER']) + '_' + str(
params['DECODER_HIDDEN_SIZE']) + \
'_deepout_' + '_'.join([layer[0] for layer in params['DEEP_OUTPUT_LAYERS']]) + \
'_trg_emb_' + str(params['TARGET_TEXT_EMBEDDING_SIZE']) + \
'_' + params['OPTIMIZER'] + '_' + str(params['LR'])
params['STORE_PATH'] = K.backend() + '_test_train_models/' + params['MODEL_NAME'] + '/'
# Test several NMT-Keras utilities: train, sample, sample_ensemble, score_corpus...
train_model(params)
params['RELOAD'] = 2
apply_NMT_model(params)
parser = argparse.ArgumentParser('Parser for unit testing')
parser.dataset = params['DATASET_STORE_PATH'] + '/Dataset_' + params['DATASET_NAME'] + '_' + params['SRC_LAN'] + params['TRG_LAN'] + '.pkl'
parser.text = params['DATA_ROOT_PATH'] + '/' + params['TEXT_FILES']['val'] + params['SRC_LAN']
parser.splits = ['val']
parser.config = params['STORE_PATH'] + '/config.pkl'
parser.models = [params['STORE_PATH'] + '/epoch_' + str(2)]
parser.verbose = 0
parser.dest = None
parser.source = params['DATA_ROOT_PATH'] + '/' + params['TEXT_FILES']['val'] + params['SRC_LAN']
parser.target = params['DATA_ROOT_PATH'] + '/' + params['TEXT_FILES']['val'] + params['TRG_LAN']
parser.weights = []
for n_best in [True, False]:
parser.n_best = n_best
sample_ensemble(parser, params)
score_corpus(parser, params)
if __name__ == '__main__':
pytest.main([__file__])
| [
"lvapeab@gmail.com"
] | lvapeab@gmail.com |
96af95f675dfa3f4c9a6680800458e6cd0f96d66 | 408f5374f51a00506bed47b8ab242ad5a736f6a4 | /test2.py | 8f1de18d8716023676f4ddf8fae3ee209350331c | [] | no_license | anneaarts/4CK00-Assignment-github- | 96b0f9487d32b1e13f665c250472cacdb168a8c0 | 339a3a989a4904c78f4b3f79835d0aaf825d93b9 | refs/heads/master | 2020-03-19T06:40:38.326504 | 2018-06-04T15:42:20 | 2018-06-04T15:42:20 | 136,044,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 3 16:20:54 2018
@author: s161694
"""
print("Hellllllllo") | [
"anneaarts@live.nl"
] | anneaarts@live.nl |
81e2ac7b48eaae6b4cc377ebf848b672f0e76268 | 9e84117406990da84717bc2a4f70d64207bced7b | /backend/employeeprofile/apps.py | 49c35cff0ebf7d4d329f2d26d9ccd471afc87f58 | [] | no_license | joshhammer/propulsion-final-project | 5063ed5be652f69dd0b6012781614822d9c8d66f | 27c8a8875da591fa3883c1af88fc2a2a5f3bb803 | refs/heads/master | 2023-05-11T01:38:21.326322 | 2020-02-18T18:42:26 | 2020-02-18T18:42:26 | 241,432,633 | 1 | 0 | null | 2023-05-08T01:51:12 | 2020-02-18T18:09:11 | JavaScript | UTF-8 | Python | false | false | 105 | py | from django.apps import AppConfig
class EmployeeprofileConfig(AppConfig):
name = 'employeeprofile'
| [
"rebeccakurup@protonmail.com"
] | rebeccakurup@protonmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.