blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
841ec67c6b8f49086b9082e331023721812821e4 | f9d586d51825ebfe83736f188b0694b58d46f034 | /jobutils.py | 668ab47002237d5c5c7d84514336dc3e381295d6 | [] | no_license | ricardoromanj/datawarehouseetl | d4e50ec6f4e3bec1539d349399efeb2e5a3e2202 | 002255e4177c4430fe07ea8ada17da40f25fd3a2 | refs/heads/master | 2021-09-17T04:53:12.350236 | 2018-06-28T03:44:34 | 2018-06-28T03:44:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,468 | py | import datetime
def printJobStart(jobname):
now = datetime.datetime.now()
print("")
print(" ________ _____ ______ ___ ___ ")
print("|\ ____\|\ _ \ _ \|\ \|\ \ ")
print("\ \ \___|\ \ \ \__\ \ \ \ \ \ \ ")
print(" \ \ \ \ \ \ |__| \ \ \ \ \ \ ")
print(" \ \ \____\ \ \ \ \ \ \ \ \ \ ")
print(" \ \_______\ \__\ \ \__\ \_______\ ")
print(" \|_______|\|__| \|__|\|_______| ")
print("")
print("CARNEGIE MELLON UNIVERSITY - HEINZ COLLEGE")
print("95797-Z5: DATA WAREHOUSING")
print("Group 8 Project: ")
print("SPARCS (Statewide Planning and Research Cooperative System)")
print("")
print("***************************** ******************************")
print("********************* **********************")
print("************* **************")
print("********* **********")
print(" "+ jobname +" Started @ "+ now.strftime("%Y/%m/%d - %H:%M:%S") +"")
print("********* **********")
print("************* **************")
print("********************* **********************")
print("***************************** ******************************")
print("")
def printJobEnd(jobname):
now = datetime.datetime.now()
print("")
print("***************************** ******************************")
print("********************* **********************")
print("************* **************")
print("********* **********")
print(" "+ jobname +" Ended @ "+ now.strftime("%Y/%m/%d - %H:%M:%S") +"")
print("********* **********")
print("************* **************")
print("********************* **********************")
print("***************************** ******************************")
print("")
def printStepStart(stepNo):
now = datetime.datetime.now()
print("")
print("")
print("# Beginning JS " + stepNo + " @ " + now.strftime("%Y%m%d:%H%M%S"))
print("###########################################################")
print("")
| [
"r.rmn92@gmail.com"
] | r.rmn92@gmail.com |
54b7fdf7282cb80922196e6544b75b92be496fc1 | e527e4af59ea73997b919dc6dbc3547ee14e61de | /qr-code/qr-code.py | d7f265ef0222a36d0333dec18a1affcb9184276a | [] | no_license | whywidodo/belajar-python | 4565d0ea2390d25df3aa02769cd3b4447159ee9d | cc48cb7591ea3c9bad70a7044ae8e76c2a34eeef | refs/heads/master | 2023-04-06T13:31:00.221279 | 2021-04-02T10:37:09 | 2021-04-02T10:37:09 | 319,338,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | import qrcode
from PIL import Image
qr = qrcode.QRCode(
version=2,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=20,
border=4,
)
data = 'https://karyane.com'
qr.add_data(data)
qr.make(fit=True)
convert = qr.make_image(fill_color="black", back_color="white").convert('RGB')
convert.save("qr-code/hasil.png")
| [
"mr.wahyuwidodo@gmail.com"
] | mr.wahyuwidodo@gmail.com |
f6df32ec5bcf9f925e01fe21accab9e9c3f90965 | 043720a6e65e0b2e8da8e1798d6c17035440c50e | /bin/uploader.py | 2a6e8b4bcde9c60109d8684f9e3c65429b5201ae | [] | no_license | wsoyinka/certeduploader | ebf830e262ab6c5c1503917545f6b66114cd6c18 | 79c41281201c33eeaa2a87748fb02d5df8199926 | refs/heads/master | 2021-01-13T17:20:54.757465 | 2017-02-13T02:24:24 | 2017-02-13T02:24:24 | 81,776,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | #!/usr/bin/env python
import certeduploader.uploader
certeduploader.uploader.upload_file()
| [
"wsoyinka@gmail.com"
] | wsoyinka@gmail.com |
0de72ae1b8a75408d34f1cd297aec4aae70084a1 | f96f2e7593d05ab022c6ad184da61065b224a57d | /16_password_generator.py | 7ba30b3a1d61252b1d776838a6a1ce5218d721f3 | [] | no_license | vlvs/pratusevich-exercises | 62e71529af796159fa1b1434b22c052f38c33c5c | f6930ef3496eb3785abaa30e629ff811b78423f2 | refs/heads/master | 2021-06-24T20:33:10.515755 | 2021-03-18T17:38:15 | 2021-03-18T17:38:15 | 215,191,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | '''
Write a password generator. Strong passwords have a mix of lowercase
letters, uppercase letters, numbers, and symbols. The passwords should
be random, a new password should be generated every time the user asks
for a new password. Include your run-time code in a main method.
Extra: Ask the user how strong they want their password to be. For weak
passwords, pick a word or two from a list.
'''
import random
import string
fruits = ["apple", "banana", "lemon", "mango", "orange", "tangerine"]
password_strength_levels = ["1", "2", "3", "weak", "so-so", "strong"]
password_strength_message = '''How strong do you want your password to be?
(1) Weak\n(2) So-So\n(3) Strong\n'''
password_strength_invalid_message = '''Invalid input. Choose '1', '2' or '3':
(1) Weak\n(2) So-So\n(3) Strong\n'''
available_characters = [
string.ascii_lowercase,
string.ascii_uppercase,
string.punctuation,
string.digits
]
def get_password_strength():
global password_strength
password_strength = input(password_strength_message)
while password_strength.lower() not in password_strength_levels:
password_strength = input(password_strength_invalid_message)
if password_strength.lower() == "weak": password_strength = "1"
if password_strength.lower() == "so-so": password_strength = "2"
if password_strength.lower() == "strong": password_strength = "3"
def generate_password(strength):
if password_strength == "1": return ''.join(random.choices(fruits, k = 2))
password = []; chars_per_group = 2
if password_strength == "3": chars_per_group = 5
for group in available_characters:
password.append(''.join(random.choices(group, k = chars_per_group)))
password = list(''.join(password))
random.shuffle(password)
return ''.join(password)
get_password_strength()
print(f"Here's the generated password: {generate_password(password_strength)}")
| [
"vlvsjr@gmail.com"
] | vlvsjr@gmail.com |
6f6a3f5a7244d69c773653f720280a184a73071d | 6eb342c822ae8dc10b5a90e07ce879ff6cc7a779 | /Examples/Python2/HighDynamicRange.py | 23afa63f9e2507eddfb03f6ff82a45b942a94d1b | [] | no_license | btw1027/USB_CAM | f5f7bee7795ee355c4c7319537cbe26ce1cffa0f | 11f0b33c9a2789cac6e7912232a442127052023c | refs/heads/master | 2022-12-08T02:11:23.847174 | 2019-07-24T19:37:03 | 2019-07-24T19:37:03 | 198,698,667 | 1 | 0 | null | 2022-11-22T02:39:52 | 2019-07-24T19:35:16 | Jupyter Notebook | UTF-8 | Python | false | false | 9,259 | py | # =============================================================================
# Copyright (c) 2001-2018 FLIR Systems, Inc. All Rights Reserved.
#
# This software is the confidential and proprietary information of FLIR
# Integrated Imaging Solutions, Inc. ("Confidential Information"). You
# shall not disclose such Confidential Information and shall use it only in
# accordance with the terms of the license agreement you entered into
# with FLIR Integrated Imaging Solutions, Inc. (FLIR).
#
# FLIR MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE
# SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT. FLIR SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING
# THIS SOFTWARE OR ITS DERIVATIVES.
# =============================================================================
#
# HighDynamicRange.py
# This example shows how to set High Dynamic Range (HDR) if it is available on the camera.
import PySpin
import os
NUM_IMAGES = 4 # number of images to grab
K_HDR_SHUTTER1 = 1000 # us
K_HDR_SHUTTER2 = 5000
K_HDR_SHUTTER3 = 15000
K_HDR_SHUTTER4 = 30000
K_HDR_GAIN1 = 0 # dB
K_HDR_GAIN2 = 5
K_HDR_GAIN3 = 10
K_HDR_GAIN4 = 15
def print_device_info(nodemap):
"""
Helper for outputting camera information
:param nodemap: Transport layer device nodemap.
:type INodeMap
:returns: True if successful, False otherwise.
:rtype: bool
"""
print '*** DEVICE INFORMATION ***'
try:
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceControl'))
if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print '%s: %s' % (node_feature.GetName(),
node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable')
else:
print 'Device control information not available.'
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
return False
return True
def check_node_accessibility(node):
"""
Helper for checking GenICam node accessibility
:param node: GenICam node being checked
:type node: CNodePtr
:return: True if accessible, False otherwise
:rtype: bool
"""
return PySpin.IsAvailable(node) and (PySpin.IsReadable(node) or PySpin.IsWritable(node))
def toggle_hdr_mode(nodemap, hdr_on):
"""
Helper for toggling HDR mode on camera
:param nodemap: Transport layer device nodemap.
:type: INodeMap
:param hdr_on: True if want to turn hdr mode on, False otherwise.
:type hdr_on: bool
:return: True if successful, False otherwise.
:rtype: bool
"""
node_hdr_enabled = PySpin.CBooleanPtr(nodemap.GetNode("PGR_HDRModeEnabled"))
if check_node_accessibility(node_hdr_enabled):
node_hdr_enabled.SetValue(hdr_on)
else:
return False
print'HDR mode turned to', hdr_on
return True
def initialize_hdr_images(nodemap):
"""
Helper for initializing HDR images
:param nodemap: Transport layer device nodemap.
:type: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
hdr_image_selector = PySpin.CEnumerationPtr(nodemap.GetNode("PGR_HDRImageSelector"))
hdr_exposure_abs = PySpin.CFloatPtr(nodemap.GetNode("PGR_HDR_ExposureTimeAbs"))
hdr_gain_abs = PySpin.CFloatPtr(nodemap.GetNode("PGR_HDR_GainAbs"))
if not check_node_accessibility(hdr_image_selector):
return False
if not check_node_accessibility(hdr_exposure_abs):
return False
if not check_node_accessibility(hdr_gain_abs):
return False
# Configure Image1
hdr_image_selector.SetIntValue(hdr_image_selector.GetEntryByName("Image1").GetValue())
hdr_exposure_abs.SetValue(K_HDR_SHUTTER1)
hdr_gain_abs.SetValue(K_HDR_GAIN1)
print'Initialized HDR Image1...'
# Configure Image2
hdr_image_selector.SetIntValue(hdr_image_selector.GetEntryByName("Image2").GetValue())
hdr_exposure_abs.SetValue(K_HDR_SHUTTER2)
hdr_gain_abs.SetValue(K_HDR_GAIN2)
print'Initialized HDR Image2...'
# Configure Image3
hdr_image_selector.SetIntValue(hdr_image_selector.GetEntryByName("Image3").GetValue())
hdr_exposure_abs.SetValue(K_HDR_SHUTTER3)
hdr_gain_abs.SetValue(K_HDR_GAIN3)
print'Initialized HDR Image3...'
# Configure Image4
hdr_image_selector.SetIntValue(hdr_image_selector.GetEntryByName("Image4").GetValue())
hdr_exposure_abs.SetValue(K_HDR_SHUTTER4)
hdr_gain_abs.SetValue(K_HDR_GAIN4)
print'Initialized HDR Image4...'
return True
def run_single_camera(cam):
"""
Helper for running example on single camera
:param cam: Camera to run on.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
try:
result = True
# Initialize camera
cam.Init()
# Get GenICam NodeMap info from camera
nodemap = cam.GetNodeMap()
# Get camera information through NodeMap
print_device_info(nodemap)
# Verify whether HDR is supported on this device
node_hdr_enabled = PySpin.CBooleanPtr(nodemap.GetNode("PGR_HDRModeEnabled"))
if not PySpin.IsAvailable(node_hdr_enabled):
print'HDR is not supported! Exiting...'
return True
# HDR needs to be enabled prior to configure individual HDR images
toggle_hdr_mode(nodemap, True)
if not initialize_hdr_images(nodemap):
print'Error configuring HDR image! Exiting...'
return False
# Retrieve Device ID
device_id = cam.GetTLDeviceNodeMap().GetNode("DeviceID")
# Begin capturing images
print'Starting grabbing images...'
cam.BeginAcquisition()
for i in range(NUM_IMAGES):
try:
# Retrieve the next received image
raw_image = cam.GetNextImage()
width = raw_image.GetWidth()
height = raw_image.GetHeight()
print'Grabbed image %d, width = %d, height = %d' % (i, width, height)
# Convert image to Mono8
converted_image = raw_image.Convert(PySpin.PixelFormat_Mono8)
# Create a unique filename
filename = 'HighDynamicRange-%s-%d.jpg' % (device_id, i)
# Save image
converted_image.Save(filename)
# Image need to be released after use
raw_image.Release()
except PySpin.SpinnakerException as ex:
print'Error Retrieving Image: %s' % ex
result = False
continue
# End capturing of images
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print'Error: %s' % ex
result = False
print''
return result
def main():
"""
Example entry point; please see Enumeration example for more in-depth
comments on preparing and cleaning up the system.
:return: True if successful, False otherwise.
:rtype: bool
"""
# Since this application saves images in the current folder
# we must ensure that we have permission to write to this folder.
# If we do not have permission, fail right away.
try:
test_file = open('test.txt', 'w+')
except IOError:
print 'Unable to write to current directory. Please check permissions.'
raw_input('Press Enter to exit...')
return False
test_file.close()
os.remove(test_file.name)
result = True
# Retrieve singleton reference to system object
system = PySpin.System.GetInstance()
# Get current library version
version = system.GetLibraryVersion()
print'Library version: %d.%d.%d.%d' % (version.major, version.minor, version.type, version.build)
# Retrieve list of cameras from the system
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print'Number of cameras detected: %d' % num_cameras
# Finish if there are no cameras
if num_cameras == 0:
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
print'Not enough cameras!'
raw_input('Done! Press Enter to exit...')
return False
# Run example on each camera
for cam in cam_list:
result &= run_single_camera(cam)
# Release reference to camera
# NOTE: Unlike the C++ examples, we cannot rely on pointer objects being automatically
# cleaned up when going out of scope.
# The usage of del is preferred to assigning the variable to None.
del cam
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
raw_input('Done! Press Enter to exit...')
return result
if __name__ == '__main__':
main()
| [
"benjamin_wilks@brown.edu"
] | benjamin_wilks@brown.edu |
fd1883d0963f4f3e119e56d8f842fab3aca4366b | 3caa46fd763aafd94ec46a1faf9d0aa9fa858217 | /back_end/urls.py | f1a765723ad5bc56a15c75e5d3a80434260383a7 | [] | no_license | Kazathur92/pokeMaster-Back-End | 2e2a02c06baa908289577b5a5c6c85379f1e8dfa | 7ad8809609c4c7ecf4d9c28f778c2dc9a0ccd915 | refs/heads/master | 2020-04-24T14:19:33.567387 | 2019-03-20T15:34:25 | 2019-03-20T15:34:25 | 172,017,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | """back_end URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.authtoken.views import obtain_auth_token
from django.conf.urls import url, include
from pokeMaster import views
# app_name = "pokeMaster"
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('pokeMaster.urls')),
url(r'^api/v1/register/', views.register_user),
url(r'^api/v1/api-token-auth/', obtain_auth_token),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| [
"Gabmin21@hotmail.com"
] | Gabmin21@hotmail.com |
099cc4fda3d3ac97352d0deffc1bdb6a06428dc2 | 6c28060d11ec001b48a091760d0f883b23a72eaa | /notification/context_processor.py | cd66be1f866e7ca6946f8fafb29de4b9f29741eb | [] | no_license | s-soroosh/rose | 8b37904781d382fbac58fbaf9668391dddee2fc7 | 1f7ab356656696de06c56f8a86808ae59474c649 | refs/heads/master | 2021-05-26T18:22:37.349231 | 2014-07-02T07:25:54 | 2014-07-02T07:25:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from assign.models import Assign
__author__ = 'soroosh'
def notification(request):
count = Assign.objects.filter(target__id=request.user.id, status='pending').count()
return {'notifications_count': count}
| [
"soroosh.sarabadani@gmail.com"
] | soroosh.sarabadani@gmail.com |
d954695fe65b1d734375cf15c176583be91d2ad8 | 8287c322bcbb39a6ead7c911a85b8bd4fa3ce429 | /sparkFigure3.py | 286a85cb81b7c91fb2677541112c02f50b230421 | [] | no_license | YaminArefeen/sparkIsmrm2021 | 7e100f2ebbc171512746c5f4f26c68598119e04a | 594032c8af31d43de61ced63da8bcc029af92a54 | refs/heads/main | 2023-02-15T03:02:26.422529 | 2021-01-05T17:16:25 | 2021-01-05T17:16:25 | 319,468,953 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,738 | py | #Code used to generate Figure 3 in the ISMRM abstract "Extending Scan-specific Artifact Reduction in K-space (SPARK) to Advanced Encoding and Reconstruction Schemes" (Yamin Arefeen et al.). Note, results may not be exact, as the initialization of neural network weights is not standardized across different runs of the experiment.
#The script takes the following steps
# -Load the grappa reconstructed kspace, kspace where the ACS was undersampled at 2 x 2 acceleration and then recnostructed using an external reference, and object with the parameters for the reconstruction (acceleration factors both for undersampling and in the acs region, acs size, etc)
# -Reformat the appropriate kspace (acs kspace and grappa kspace) to be inputted into the SPARK model
# -Train a set of real and imaginary models for each coil
# -Apply the kspace correction through the SPARK model
# -Save the reesults for future comparison
import time
print('Importing libraries, defining helper functions, and loading the dataset... ',end='')
start = time.time()
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import scipy as sp
import cupy as cp
from utils import cfl
from utils import signalprocessing as sig
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#DEFINING HELPER FUNCTIONS AND SPARK MODEL
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def fft3(x):
return sig.fft(sig.fft(sig.fft(x,-3),-2),-1)
def ifft3(x):
return sig.ifft(sig.ifft(sig.ifft(x,-3),-2),-1)
class SPARK_3D_net(nn.Module):
def __init__(self,coils,kernelsize,acsx,acsy,acsz):
super().__init__()
self.acsx = acsx
self.acsy = acsy
self.acsz = acsz
self.conv1 = nn.Conv3d(coils*2,coils*2,kernelsize,padding=1,bias = False)
self.conv2 = nn.Conv3d(coils*2,coils,1,padding=0,bias=False)
self.conv3 = nn.Conv3d(coils, coils*2, kernelsize, padding=1, bias=False)
self.conv4 = nn.Conv3d(coils*2,coils*2,kernelsize,padding=1,bias = False)
self.conv5 = nn.Conv3d(coils*2,coils,1,padding=0,bias=False)
self.conv6 = nn.Conv3d(coils, coils*2, kernelsize, padding=1, bias=False)
self.conv7 = nn.Conv3d(coils*2, coils, kernelsize, padding=1, bias=False)
self.conv8 = nn.Conv3d(coils, coils//4, 1, padding=0, bias=False)
self.conv9 = nn.Conv3d(coils//4, 1, kernelsize, padding=1, bias=False)
def nl(self,inp):
return inp + F.relu((inp-1)/2) + F.relu((-inp-1)/2)
def forward(self, x):
y = self.nl(self.conv1(x))
y = self.nl(self.conv2(y))
y = self.nl(self.conv3(y))
y = x + y
z = self.nl(self.conv4(y))
z = self.nl(self.conv5(z))
z = self.nl(self.conv6(z))
out = z + y
out = self.conv9(self.nl(self.conv8(self.nl(self.conv7(out)))))
loss_out = out[:,:,self.acsx[0]:self.acsx[-1]+1,self.acsy[0]:self.acsy[-1]+1,self.acsz[0]:self.acsz[-1]+1]
return out, loss_out
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Setting Parameters and loading the dataset
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Loading acs reconstructed kspace, grappa recon, and parmaters for BRAIN dataset
kspace = np.transpose(cfl.readcfl('data/kspaceAcsReconFig3'),(3,0,1,2))
kspaceGrappa = np.transpose(cfl.readcfl('data/kspaceGrappaFig3'),(3,0,1,2))
for3dspark = sp.io.loadmat('data/forSparkFig3.mat')['for3Dspark'][0][0]
[C,M,N,P] = kspace.shape
Rx = for3dspark[0][0][0]
Ry = for3dspark[0][0][1]
Rz = for3dspark[0][0][2]
acsx = for3dspark[1][0][0]
acsy = for3dspark[1][0][2]
acsz = for3dspark[1][0][2]
#Defining some SPARK parameters
normalizationflag = 1 #If we want to normalize datasets befoe training SPARK model
measuredReplace = 1 #If we want to replace measured data (as well as ACS)
iterations = 1000 #Number of iterations to train each spark network
learningRate = .002 #Learning rate to train each spark network
kernelsize = 3 #3 x 3 x 3 convolutional kernels in the SPARK network
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
print('Elapsed Time is %.3f seconds' % (time.time()-start))
print('GRAPPA Parameters: ')
print(' Dimensions: %d x %d x %d x %d' %(C,M,N,P))
print(' Acceleration: %d x %d x %d' % (Rx,Ry,Rz))
print(' ACS Sizes: %d x %d x %d' % (acsx,acsy,acsz))
print('SPARK Parameters: ')
print(' Iterations: %d' % iterations)
print(' Stepsize: %.3f' % learningRate)
print(' Kernel: %d' % kernelsize)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Generating Zerofilled ACS, grappa recon, and reformatting kspace for SPARK
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print('Generating zerofilled ACS, grappa replaced recon, and reformatting kspace for SPARK... ',end='')
start = time.time()
#-Generating zero-filled ACS kspace from the reconstructed ACS kspace. This will be used as the reference to train SPARK
acsregionX = np.arange(M//2 - acsx // 2,M//2 + acsx//2)
acsregionY = np.arange(N//2 - acsy // 2,N//2 + acsy//2)
acsregionZ = np.arange(P//2 - acsz // 2,P//2 + acsz//2)
kspaceAcsZerofilled = np.zeros(kspace.shape,dtype = complex)
kspaceAcsZerofilled[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1] = \
kspace[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1]
#-Generating ACS replaced GRAPPA recon for comparisons later on
tmp = np.copy(kspaceGrappa)
tmp[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1] = \
kspace[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1]
#-Reformatting kspace for SPARK
kspaceAcsCrop = kspace[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1]
kspaceAcsGrappa = kspaceGrappa[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1]
kspaceAcsDifference = kspaceAcsCrop - kspaceAcsGrappa
acs_difference_real = np.real(kspaceAcsDifference)
acs_difference_imag = np.imag(kspaceAcsDifference)
kspace_grappa = np.copy(kspaceGrappa)
kspace_grappa_real = np.real(kspace_grappa)
kspace_grappa_imag = np.imag(kspace_grappa)
kspace_grappa_split = np.concatenate((kspace_grappa_real, kspace_grappa_imag), axis=0)
#Normalizing data if specified
chan_scale_factors_real = np.zeros(C,dtype = 'float')
chan_scale_factors_imag = np.zeros(C,dtype = 'float')
if(normalizationflag):
scale_factor_input = 1/np.amax(np.abs(kspace_grappa_split))
kspace_grappa_split *= scale_factor_input
for c in range(C):
if(normalizationflag):
scale_factor_real = 1/np.amax(np.abs(acs_difference_real[c,:,:,:]))
scale_factor_imag = 1/np.amax(np.abs(acs_difference_imag[c,:,:,:]))
else:
scale_factor_real = 1
scale_factor_imag = 1
chan_scale_factors_real[c] = scale_factor_real
chan_scale_factors_imag[c] = scale_factor_imag
acs_difference_real[c,:,:,:] *= scale_factor_real
acs_difference_imag[c,:,:,:] *= scale_factor_imag
acs_difference_real = np.expand_dims(np.expand_dims(acs_difference_real,axis=1), axis=1)
acs_difference_imag = np.expand_dims(np.expand_dims(acs_difference_imag,axis=1), axis=1)
kspace_grappa_split = torch.unsqueeze(torch.from_numpy(kspace_grappa_split),axis = 0)
kspace_grappa_split = kspace_grappa_split.to(device, dtype=torch.float)
acs_difference_real = torch.from_numpy(acs_difference_real)
acs_difference_real = acs_difference_real.to(device, dtype=torch.float)
acs_difference_imag = torch.from_numpy(acs_difference_imag)
acs_difference_imag = acs_difference_imag.to(device, dtype=torch.float)
print('Elapsed Time is %.3f seconds' % (time.time()-start))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Training the SPARK Networks
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
#-Training the real spark networks
real_models = {}
real_model_names = []
criterion = nn.MSELoss()
realLoss = np.zeros((iterations,C))
for c in range(0,C):
model_name = 'model'+ 'C' + str(c) + 'r'
model = SPARK_3D_net(coils=C,kernelsize=kernelsize,acsx=acsregionX,acsy=acsregionY,acsz=acsregionZ)
model.to(device)
print('Training {}'.format(model_name))
start = time.time()
optimizer = optim.Adam(model.parameters(),lr=learningRate)
running_loss = 0
for epoch in range(iterations):
optimizer.zero_grad()
_,loss_out = model(kspace_grappa_split)
loss = criterion(loss_out,acs_difference_real[c,:,:,:,:,:])
loss.backward()
optimizer.step()
running_loss = loss.item()
realLoss[epoch,c] = running_loss;
if(epoch == 0):
print(' Starting Loss: %.10f' % running_loss)
real_model_names.append(model_name)
real_models.update({model_name:model})
print(' Ending Loss: %.10f' % (running_loss))
print(' Training Time: %.3f seconds' % (time.time() - start))
#-Training the imaginary spark networks
imag_models = {}
imag_model_names = []
criterion = nn.MSELoss()
imagLoss = np.zeros((iterations,C))
for c in range(0,C):
model_name = 'model'+ 'C' + str(c) + 'i'
model = SPARK_3D_net(coils=C,kernelsize=kernelsize,acsx=acsregionX,acsy=acsregionY,acsz=acsregionZ)
model.to(device)
print('Training {}'.format(model_name))
start = time.time()
optimizer = optim.Adam(model.parameters(),lr=learningRate)
running_loss = 0
for epoch in range(iterations):
optimizer.zero_grad()
_,loss_out = model(kspace_grappa_split)
loss = criterion(loss_out,acs_difference_imag[c,:,:,:,:,:])
loss.backward()
optimizer.step()
running_loss = loss.item()
imagLoss[epoch,c] = running_loss;
if(epoch == 0):
print(' Starting Loss: %.10f' % running_loss)
imag_model_names.append(model_name)
imag_models.update({model_name:model})
print(' Ending Loss: %.10f' % (running_loss))
print(' Training Time: %.3f seconds' % (time.time() - start))
#~~~~~~~~~~~~~~~~~~~~~~
#Performing corrections
#~~~~~~~~~~~~~~~~~~~~~~
print('Performing coil-by-coil correction... ', end = '')
start = time.time()
kspaceCorrected = np.zeros((C,M,N,P),dtype = complex)
for c in range(0,C):
#Perform reconstruction coil by coil
model_namer = 'model' + 'C' + str(c) + 'r'
model_namei = 'model' + 'C' + str(c) + 'i'
real_model = real_models[model_namer]
imag_model = imag_models[model_namei]
correctionr = real_model(kspace_grappa_split)[0].cpu().detach().numpy()
correctioni = imag_model(kspace_grappa_split)[0].cpu().detach().numpy()
kspaceCorrected[c,:,:,:] = correctionr[0,0,:,:,:]/chan_scale_factors_real[c] + \
1j * correctioni[0,0,:,:,:] / chan_scale_factors_imag[c] + kspaceGrappa[c,:,:,:]
print('Elapsed Time is %.3f seconds' % (time.time()-start))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Perofrming ACS replacement and ifft/rsos coil combine reconstruction
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print('Performing ACS replacement, ifft, and rsos coil combination... ', end = '')
start = time.time()
#ACS replaced
kspaceCorrectedReplaced = np.copy(kspaceCorrected)
kspaceCorrectedReplaced[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1] = \
kspace[:,acsregionX[0]:acsregionX[acsx-1]+1,acsregionY[0]:acsregionY[acsy-1]+1,acsregionZ[0]:acsregionZ[acsz-1]+1]
#Sampled Data replacement
if(measuredReplace):
kspaceCorrectedReplaced[:,::Rx,::Ry,::Rz] = kspace[:,::Rx,::Ry,::Rz]
#Perform IFFT and coil combine
truth = for3dspark[2]
grappa = sig.rsos(ifft3(tmp),-4)
spark = sig.rsos(ifft3(kspaceCorrectedReplaced),-4)
print('Elapsed Time is %.3f seconds' % (time.time()-start))
#~~~~~~~~~~~~~~~~~~
#Saving the Results
#~~~~~~~~~~~~~~~~~~
print('Saving results... ', end = '')
start = time.time()
results = {'groundTruth': np.squeeze(truth),
'grappa': np.squeeze(grappa),
'spark': np.squeeze(spark),
'Ry': Ry,
'Rz': Rz,
'acsy': acsy,
'acsz': acsz,
'Iterations': iterations,
'learningRate': learningRate,
'realLoss':realLoss,
'imagLoss':imagLoss}
sp.io.savemat('figure3results.mat', results, oned_as='row')
print('Elapsed Time is %.3f seconds' % (time.time()-start))
| [
"noreply@github.com"
] | YaminArefeen.noreply@github.com |
0ffffe8d297ffb2e1f1c45a9839eb983154cc594 | bc7b8f90f35ef1fb6f4accf1e8258a502df31a5a | /unitframe/unitframe.py | 68137035c1e4a7296190d3ba44c4bf2a9740128d | [
"MIT"
] | permissive | daviskirk/unitframe | 97f6e4bf7d0d6d226bf34279bc51d32acdec0c1d | 3021aa1d1a1f369360f16880e177f3fa44f29a77 | refs/heads/master | 2021-01-10T19:38:46.858405 | 2015-05-06T19:14:17 | 2015-05-06T19:14:17 | 34,793,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,874 | py | #!/usr/bin/env python
"""
Units
"""
import pandas as pd
import numpy as np
from collections import Sequence, Mapping, OrderedDict, Iterable
from numbers import Number
from functools import partial
import operator
from copy import deepcopy
from pint import UnitRegistry, UndefinedUnitError, DimensionalityError
UREG = UnitRegistry()
Q_ = UREG.Quantity
def to_unit(q):
"""Convert to unit
"""
if q is None:
return 1
if isinstance(q, str):
q = Q_(q)
u = 1
if isinstance(q, Q_):
for k, v in q.units.items():
u *= Q_(k)**v
elif isinstance(q, Number):
pass
else:
raise TypeError(('Incorrect unit initialization. '
'{} of type {} can not be converted to unit').format(
q, type(q)))
return u
class UnitSeries(pd.Series):
_metadata = ['_unit']
def __init__(self,*args,**kwargs):
if 'unit' in kwargs:
unit = kwargs['unit']
del kwargs['unit']
else:
unit = 1
kwargs['dtype'] = np.double
super().__init__(*args,**kwargs)
if not self.dtype == np.double:
raise TypeError('dtypes must all be doubles')
self.unit = unit
@property
def _constructor(self):
return UnitSeries
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
unit = 1
else:
unit = to_unit(unit)
self._unit = unit
def do_op(self, other, op):
series = self.copy()
is_unit_change_possible = op not in (operator.add, operator.sub)
if hasattr(other, '_unit'):
v = op(self.values*self.unit, other.values*other.unit)
series.values[:] = v
if is_unit_change_possible and isinstance(v, Q_):
series._unit = to_unit(v)
elif isinstance(other, Q_):
for i in range(self.shape[1]):
v = op(self.values*self.unit, other)
series.values[:] = v
if is_unit_change_possible and isinstance(v, Q_):
series._unit = to_unit(v)
else:
if op == operator.pow:
if not isinstance(other, Number):
raise ValueError('Only scalar numbers are supported for ** operator')
series._unit = series._unit**other
series.values[:] = op(series.values, other)
return series
def __add__(self, other):
return self.do_op(other, operator.add)
def __sub__(self, other):
return self.do_op(other, operator.sub)
def __mul__(self, other):
return self.do_op(other, operator.mul)
def __truediv__(self, other):
return self.do_op(other, operator.truediv)
def __pow__(self, other):
return self.do_op(other, operator.pow)
def to(self, unit):
unit = to_unit(unit)
try:
self.values = (self.values * self._unit).to(unit)
except TypeError:
if isinstance(unit, Number) and unit == 1:
pass
else:
raise
self._unit = unit
def min(self, *args, **kwargs):
return super().min(*args, **kwargs)*self._unit
def max(self, *args, **kwargs):
return super().max(*args, **kwargs)*self._unit
def mean(self, *args, **kwargs):
return super().mean(*args, **kwargs)*self._unit
def std(self, *args, **kwargs):
return super().std(*args, **kwargs)*self._unit
def __str__(self):
old_name = self.name
if self.name is None:
self.name = ''
try:
unit_str = ', Unit: [{}]'.format(self._unit.units)
except AttributeError:
if self._unit == 1:
unit_str = ', Unit: []'
else:
raise
self.name += unit_str
s = super().__str__()
self.name = old_name
return s
class UnitFrame(pd.DataFrame):
_metadata = ['_units']
def __init__(self,*args,**kwargs):
if 'units' in kwargs:
units = kwargs['units']
del kwargs['units']
else:
units = 1
kwargs['dtype'] = np.double
super().__init__(*args,**kwargs)
if not all(self.dtypes == np.double):
raise TypeError('dtypes must all be doubles')
self.units = units
@property
def units(self):
return self._units
@units.setter
def units(self, units):
if units is None:
units = 1
elif isinstance(units, Mapping):
# for dict like
units = [to_unit(units[k]) for k in self.columns]
elif isinstance(units, (Sequence, np.ndarray, pd.Series)):
# for lists, tuples and array likes
units = [to_unit(units[i]) for i,k in enumerate(self.columns)]
else:
units = to_unit(units)
self._units = pd.Series(units,index=self.columns, dtype=object)
@property
def _constructor(self):
def tmp_constr(*args, **kwargs):
if 'units' not in kwargs:
kwargs['units'] = self._units
return UnitFrame(*args, **kwargs)
return tmp_constr
def copy(self, deep=True):
"""
Make a copy of this UnitFrame object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : UnitFrame
"""
# FIXME: this will likely be unnecessary in pandas >= 0.13
data = self._data
if deep:
data = data.copy()
return UnitFrame(data).__finalize__(self)
def do_op(self, other, op):
df = self.copy()
is_unit_change_possible = op not in (operator.add, operator.sub)
if hasattr(other, '_units'):
for i in range(self.shape[1]):
v = op(self.values[:,i]*self.units.iat[i], (other.values[:,i])*other.units.iat[i])
df.values[:,i] = v
if is_unit_change_possible and isinstance(v, Q_):
df._units.iat[i] = to_unit(v)
elif isinstance(other, Q_):
for i in range(self.shape[1]):
v = op(self.values[:,i]*self.units.iat[i], other)
df.values[:,i] = v
if is_unit_change_possible and isinstance(v, Q_):
df._units.iat[i] = to_unit(v)
else:
if op == operator.pow:
if not isinstance(other, Number):
raise ValueError('Only scalar numbers are supported for ** operator')
for i in range(self.shape[1]):
df._units.iat[i] = df._units.iat[i]**other
try:
df.values[:] = op(df.values, other)
except Exception:
if isinstance(other, pd.Series):
raise ValueError(
'UnitFrame cannot use operation {} on {}'.format(
op, type(other)))
else:
raise
return df
def __add__(self, other):
return self.do_op(other, operator.add)
def __sub__(self, other):
return self.do_op(other, operator.sub)
def __mul__(self, other):
return self.do_op(other, operator.mul)
def __truediv__(self, other):
return self.do_op(other, operator.truediv)
def __pow__(self, other):
return self.do_op(other, operator.pow)
def to(self, unit_dict):
for k,v in unit_dict.items():
v = to_unit(v)
if not isinstance(v, Number):
self[k] = (self[k].values * self._units[k]).to(v)
self._units[k] = v
def __str__(self):
if hasattr(self, '_units'):
s = self.rename(columns={
c:str(c) + ' [{}]'.format(u.units)
for c, u in self._units.items() if isinstance(u, Q_)
})
else:
s=self
return super(UnitFrame,s).__str__()
def __setitem__(self, key, value):
self._units[key] = to_unit(value)
super().__setitem__(key, value)
def __getitem__(self, key):
item = super().__getitem__(key)
if item.ndim == 1:
item = UnitSeries(item, unit=self._units[key])
elif isinstance(key, slice):
item._units = self.units
return item
def min(self, **kwargs):
vals = super().min(axis=0, **kwargs)
return pd.Series((v*u for v, u in zip(vals.tolist(), self._units)),
index=vals.index)
def max(self, **kwargs):
vals = super().max(axis=0, **kwargs)
return pd.Series((v*u for v, u in zip(vals.tolist(), self._units)),
index=vals.index)
def mean(self, **kwargs):
vals = super().mean(axis=0, **kwargs)
return pd.Series((v*u for v, u in zip(vals.tolist(), self._units)),
index=vals.index)
def std(self, **kwargs):
vals = super().std(axis=0, **kwargs)
return pd.Series((v*u for v, u in zip(vals.tolist(), self._units)),
index=vals.index)
def to_df(self):
return pd.DataFrame(self)
def _get_demo_ddf():
return UnitFrame([[4,5],[6,7],[13.2,1.2]], index=list('ABC'), units=['m','kg'], columns=list('ab'))
if __name__ == '__main__':
rr = UnitFrame([[4,5],[6,7],[13.2,1.2]], index=list('ABC'), units=['m','kg'], columns=list('ab'))
print(rr)
b = rr + rr
print(b)
c = rr + 100
print(c)
print('mult')
print(rr)
b = rr * rr
print(b)
c = rr * 100
print(c)
print('div')
print(rr)
b = rr / rr
print(b)
c = rr / 100
print(c)
| [
"davis.kirkendall@rwth-aachen.de"
] | davis.kirkendall@rwth-aachen.de |
0621261bead3ecfcb35630fd2ffb1926684431d1 | 0e647273cffc1fb6cbd589fa3c7c277b221ba247 | /configs/hpt-pretrain/chexpert-r18/no_basetrain/5000-iters.py | eea6cd8ca1b09c010a379fe19bbf022ffb5a8f90 | [
"Apache-2.0"
] | permissive | Berkeley-Data/OpenSelfSup | e9976bf011b69ebf918506ba184f464b1073ec13 | 221191b88d891de57725b149caf237ffef72e529 | refs/heads/master | 2023-05-12T07:34:52.268476 | 2021-04-08T00:58:37 | 2021-04-08T00:58:37 | 343,654,823 | 0 | 1 | Apache-2.0 | 2021-04-08T00:58:37 | 2021-03-02T05:20:27 | Python | UTF-8 | Python | false | false | 206 | py | _base_="../base-chexpert-r18-config.py"
# this will merge with the parent
# epoch related
total_iters=5000
checkpoint_config = dict(interval=total_iters)
checkpoint_config = dict(interval=total_iters//2)
| [
"taeil.goh@gmail.com"
] | taeil.goh@gmail.com |
cb18c48983d5d4f173a0a471bfd9297f0638df6e | ed98ce07c46a24fc884e6dddcd3e7b85ffdbcbba | /imgDownload.py | 97f16060893b1b20a64d69c8fc00f4e148b185e8 | [] | no_license | moumita-das/tensorflowExp | 0f1e2f57582d4b8eaaac9547ed59f8cafa2dc448 | 54380fa51e148e4ec4bd72698af2fb5d630c66eb | refs/heads/master | 2021-05-16T05:07:28.266160 | 2017-10-11T22:30:35 | 2017-10-11T22:30:35 | 106,287,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,692 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import json
import urllib2
import sys
import time
# adding path to geckodriver to the OS environment variable
#os.environ["PATH"] += os.pathsep + os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
chrome_dir_path=os.path.join(dir_path,"/chrome_dir/chromedriver")
download_path = "dataset/"
def main():
search_file=open('search_items.txt','r')
search_arr=search_file.read().splitlines()
for item in range(len(search_arr)):
searchtext = str(search_arr[item])
num_requested = 400 #number of requested images of each category
number_of_scrolls = num_requested / 400 + 1
# number_of_scrolls * 400 images will be opened in the browser
if not os.path.exists(download_path + searchtext.replace(" ", "_")):
os.makedirs(download_path + searchtext.replace(" ", "_"))
url = "https://www.google.co.in/search?q="+searchtext+"&source=lnms&tbm=isch"
driver = webdriver.Chrome(executable_path=chrome_dir_path)
driver.get(url)
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
extensions = {"jpg", "jpeg", "png", "gif"}
img_count = 0
downloaded_img_count = 0
for _ in xrange(number_of_scrolls):
for __ in xrange(10):
# multiple scrolls needed to show all 400 images
driver.execute_script("window.scrollBy(0, 1000000)")
time.sleep(0.2)
# to load next 400 images
time.sleep(0.5)
try:
driver.find_element_by_xpath("//input[@value='Show more results']").click()
except Exception as e:
print "Less images found:", e
break
# imges = driver.find_elements_by_xpath('//div[@class="rg_meta"]') # not working anymore
imges = driver.find_elements_by_xpath('//div[contains(@class,"rg_meta")]')
print "Total images:", len(imges), "\n"
for img in imges:
img_count += 1
img_url = json.loads(img.get_attribute('innerHTML'))["ou"]
img_type = json.loads(img.get_attribute('innerHTML'))["ity"]
print "Downloading image", img_count, ": ", img_url
try:
if img_type not in extensions:
img_type = "jpg"
req = urllib2.Request(img_url, headers=headers)
raw_img = urllib2.urlopen(req).read()
f = open(download_path+searchtext.replace(" ", "_")+"/"+str(downloaded_img_count)+"."+img_type, "wb")
f.write(raw_img)
f.close
downloaded_img_count += 1
except Exception as e:
print "Download failed:", e
finally:
print
if downloaded_img_count >= num_requested:
break
print "Total downloaded: ", downloaded_img_count, "/", img_count
driver.quit()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | moumita-das.noreply@github.com |
4b22bc1672d0cb5edf940c5940d9748062ae83bf | 4c117ea3617a576ddd07d8ea8aaab1a925fc402f | /bin/Race/Statistic/StatPlotRace.py | 4c58180d8fb9441a99b11f09fe6b364418a4f2eb | [] | no_license | 452990729/Rep-seq | 7be6058ba3284bea81282f2db7fd3bd7895173ba | e217b115791e0aba064b2426e4502a5c1b032a94 | refs/heads/master | 2021-12-11T14:27:46.912144 | 2019-06-04T03:49:40 | 2019-06-04T03:49:40 | 190,124,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,436 | py | #!/usr/bin/env python
import re
from glob import glob
import os
import sys
import random
from copy import deepcopy
from collections import Counter
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.
'''
cap = plt.cm.get_cmap(name, n+1)
list_tmp = [cap(i) for i in range(n)]
random.shuffle(list_tmp)
return list_tmp
def PlotCDR3(list_path, xlb, tp):
paths = []
if tp == 'n':
for path in list_path:
paths += glob(path+'/*.nCDR3.len.stat')
st = 'Distribution of CDR3 Length(Nucleotide)'
sf = 'CDR3LengthOfNucleotide.png'
ylim = 90
elif tp == 'a':
for path in list_path:
paths += glob(path+'/*.aCDR3.len.stat')
st = 'Distribution of CDR3 Length(Amino)'
sf = 'CDR3LengthOfAmino.png'
ylim = 30
label = []
list_np = []
medians = []
fig, axes = plt.subplots()
for path in paths:
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[:, 1].astype('int')
list_np.append(np_in)
medians.append(np.median(np_in))
label.append(re.split('\.', os.path.basename(path))[0])
vplot = axes.violinplot(list_np, showmeans=False,\
showmedians=False, showextrema=False, widths=0.2)
bplot = axes.boxplot(list_np, vert=True, patch_artist=True,\
showfliers=False, widths=0.03, medianprops={'linestyle': 'None'})
inds = np.arange(1, len(medians)+1)
axes.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
for patch in bplot['boxes']:
patch.set_facecolor('black')
for patch, color in zip(vplot['bodies'], get_cmap(len(label))):
patch.set_color(color)
axes.set_xticks([y+1 for y in range(len(label))], )
axes.set_xlabel(xlb)
axes.set_ylabel('Length(bp)')
axes.set_xticklabels(label)
axes.set_ylim(0, ylim)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.set_title(st)
plt.savefig(sf)
def PlotCDR3Bar(list_path, tp, pathout):
paths = []
if tp == 'n':
for path in list_path:
paths += glob(path+'/*.nCDR3.len.stat')
st = 'Distribution of CDR3 Length(Nucleotide)'
sf = 'CDR3LengthOfNucleotide.png'
xlim = 120
elif tp == 'a':
for path in list_path:
paths += glob(path+'/*.aCDR3.len.stat')
st = 'Distribution of CDR3 Length(Amino)'
sf = 'CDR3LengthOfAmino.png'
xlim = 40
if len(list_path) == 2:
colors = ['g','r']
fig, axes = plt.subplots()
m = 0
labels = []
for path in sorted(paths):
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[:, 1].astype('int')
label = re.split('\.', os.path.basename(path))[0]
dict_tmp = dict(Counter(list(np_in)))
keys = sorted(dict_tmp.keys())
x = np.array(keys)
tl = len(np_in)
y = np.array([round(float(dict_tmp[i])*100/tl, 2) for i in keys])
axes.bar(x, y, width=0.8, align='center', color=colors[m], alpha=0.4, label=label)
m += 1
axes.legend(loc='upper right')
axes.set_xlim(0, xlim)
axes.set_xlabel('Length')
axes.set_ylabel('Pecentage (%)')
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.set_title(st)
else:
colors = get_cmap(len(list_path))
if len(list_path)%2 == 0:
fig, axes = plt.subplots(nrows=len(list_path)/2, ncols=2, figsize=(10, len(list_path)), dpi=300)
handle = len(paths)/2
else:
fig, axes = plt.subplots(nrows=len(list_path)/2+1, ncols=2, figsize=(10, (len(list_path)/2+1)*2), dpi=300)
axes[-1,-1].spines['right'].set_visible(False)
axes[-1,-1].spines['left'].set_visible(False)
axes[-1,-1].spines['top'].set_visible(False)
axes[-1,-1].spines['bottom'].set_visible(False)
axes[-1,-1].set_xticks([])
axes[-1,-1].set_yticks([])
handle = len(paths)/2+1
m = 0
n = 0
c = 0
for path in sorted(paths):
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[:, 1].astype('int')
label = re.split('\.', os.path.basename(path))[0]
dict_tmp = dict(Counter(list(np_in)))
keys = sorted(dict_tmp.keys())
x = np.array(keys)
tl = len(np_in)
y = np.array([round(float(dict_tmp[i])*100/tl, 2) for i in keys])
axes[m, n].bar(x, y, width=0.8, align='center', color=colors[c],\
alpha=0.8, label=label)
axes[m, n].legend(loc='upper right')
axes[m, n].set_xlim(0, xlim)
# axes[m ,n].set_xlabel('Length')
axes[m, n].spines['right'].set_visible(False)
axes[m, n].spines['top'].set_visible(False)
c += 1
if c < handle:
m += 1
d = deepcopy(m)
# axes[m, n].set_ylabel('Pecentage (%)')
else:
m = c-d-1
n = 1
# axes[0, 0].set_ylabel('Pecentage (%)')
fig.subplots_adjust(hspace=0.4)
plt.savefig(os.path.join(pathout, sf))
def PlotVDJ(path_in, xlb):
fig, ax = plt.subplots(2,2)
axs = ax.flatten()
def PlotPie(np_in, ax_in, ns):
nps = np_in[:,1].astype('int')
porcent = 100.*nps/nps.sum()
patches, texts = ax_in.pie(nps, colors=get_cmap(len(np_in[:,0])),\
shadow=True, startangle=90)
labels = ['{0} - {1:1.2f} %'.format(i,j) for i,j in zip(np_in[:,0], porcent)]
if len(labels) <= 6:
ax_in.legend(patches, labels, loc='center left', bbox_to_anchor=(-0.9, 0.5),
fontsize=8)
else:
ax_in.legend(patches[:6], labels[0:], loc='center left', bbox_to_anchor=(-0.9, 0.5),
fontsize=8)
ax_in.set_title('Fraction of {}'.format(ns))
V = glob(path_in+'/*.V.stat')[0]
D = glob(path_in+'/*.D.stat')[0]
J = glob(path_in+'/*.J.stat')[0]
list_tmp = ['V', 'D', 'J']
name = re.split('\.', os.path.basename(V))[0]
dir_s = os.path.dirname(V)
i = 0
for path in [V,D,J]:
np_tmp = np.loadtxt(path, dtype='S10')
PlotPie(np_tmp, axs[i], list_tmp[i])
i += 1
axs[-1].axis('off')
fig.subplots_adjust(wspace=1)
fig.suptitle('Usage of VDJ genes ({})'.format(xlb))
plt.savefig(os.path.join(dir_s, 'FractionOfVDJOf{}.png'.format(xlb)), bbox_inches='tight')
def ReplaceLabel(array_in):
dict_tmp = {}
m = 0
n = 0
array_out = np.zeros(array_in.shape)
for i in array_in:
if i not in dict_tmp:
dict_tmp[i] = m
m += 1
array_out[n] = dict_tmp[i]
else:
array_out[n] = dict_tmp[i]
n += 1
return array_out
def PlotVJComb(path_in, xlb):
'''
plot 3d-hist of VJ combination
'''
fig = plt.figure(figsize=(20, 10), dpi=300)
ax = fig.add_subplot(111, projection='3d')
VJ = glob(path_in+'/*.VJCom.stat')[0]
name = re.split('\.', os.path.basename(VJ))[0]
dir_s = os.path.dirname(VJ)
np_tmp = np.loadtxt(VJ, dtype='S10')
list_s = []
ms = 0
for ay in np_tmp:
ms += 1
if ay[0] not in list_s:
list_s.append(ay[0])
if len(list_s) == 30:
break
np_tmp = np_tmp[:ms,:]
x = np_tmp[:,0]
xpos = ReplaceLabel(x)
y = np_tmp[:,1]
ypos = ReplaceLabel(y)
z = np.zeros(x.shape)
dx = 0.5*np.ones_like(z)
dy = dx.copy()
dz = np_tmp[:,2].astype('int')
col = get_cmap(len(set(list(ypos))))
colors = np.array([col[i] for i in ypos.astype('int')])
ax.bar3d(xpos, ypos, z, dx, dy, dz, color=colors, zsort='average',\
alpha=0.5)
ax.w_xaxis.set_ticks(xpos)
ax.w_xaxis.set_ticklabels(x, rotation=20, va='center', ha='right', fontsize=6)
ax.set_xlabel('V Gene')
ax.w_yaxis.set_ticks(ypos)
ax.w_yaxis.set_ticklabels(y)
ax.set_ylabel('J Gene')
ax.set_zlabel('Count')
ax.xaxis.labelpad=15
ax.yaxis.labelpad=15
ax.zaxis.labelpad=15
fig.suptitle('Distribution of VJ combination ({})'.format(xlb))
plt.savefig(os.path.join(dir_s,\
'DistributionOfVJCombinationOf{}.png'.format(xlb)), bbox_inches='tight')
def CountBar(np_in):
a,b,c,d,e,f,g,h = 0,0,0,0,0,0,0,0
tl = len(np_in)
for i in np_in:
if i <= 5:
a += 1
elif 5<i<=10:
b += 1
elif 10<i<=30:
c += 1
elif 30<i<=50:
d += 1
elif 50<i<=100:
e += 1
elif 100<i<=1000:
f += 1
elif 1000<i<=10000:
g += 1
elif i>10000:
h += 1
np_c = np.array([int(round(m, 2)*100) for m in [float(n)/tl for n in\
[a,b,c,d,e,f,g,h]]])
return np.array(['0','<5','5-10','10-30','30-50','50-100',\
'100-1000','1000-10000', '>10000']),np_c
def PlotConst(path_in, xlb):
path = glob(path_in+'/*_atleast-2_headers.tab')[0]
fig, axes = plt.subplots()
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[1:, 1].astype('int')
label, y = CountBar(np_in)
axes.bar(range(1,9),y)
axes.set_xlabel(xlb)
axes.set_ylabel('Percentage(%)')
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.set_xticklabels(label, fontsize=7)
axes.set_title('Distribution of CONSCOUNT')
plt.savefig('DistributionOfCONSCOUNTOf{}.png'.format(xlb))
def main():
if sys.argv[1] == 'PlotCDR3Bar':
list_path = re.split(',', sys.argv[2])
sample = sys.argv[3]
PlotCDR3Bar(list_path, sample, sys.argv[4])
elif sys.argv[1] == 'PlotVDJ':
PlotVDJ(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'PlotVJComb':
PlotVJComb(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'PlotConst':
PlotConst(sys.argv[2], sys.argv[3])
if __name__ == '__main__':
main()
| [
"452990729@qq.com"
] | 452990729@qq.com |
ca610dd346c5bd9069afea75db9310d0c62e1ae5 | 82150be796c525c18e4cc6d3293b5330887914d9 | /NanoAODAnalyzer/pyroot/NtupleAnalyzer/Nesan/ZprimeToTT_read_helper.py | 7361544347b1a46d73ff16ad89fd4685c5624541 | [] | no_license | Ismah214/JetTaggingAnalysis | 1c0c6f1a62480458e21830a9d67682f410bd40f3 | 1bdda858ae61535aaac274ac43192443f1611f7e | refs/heads/master | 2023-03-17T23:03:54.254140 | 2021-03-02T05:08:10 | 2021-03-02T05:08:10 | 343,637,419 | 0 | 0 | null | 2021-03-02T04:28:36 | 2021-03-02T03:47:12 | null | UTF-8 | Python | false | false | 3,152 | py | import sys
import os
import glob
import ROOT
import plotter
from collections import OrderedDict
from array import array
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Event
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import InputTree
ROOT.gROOT.SetBatch()
#### check if gp -> W -> qq in AK8 ###
#def decayToWAK8(gp):
# if len(gp.dauIdx) == 0:
# raise ValueError('Particle has no daughters!')
# for idx in gp.dauIdx:
# dp=particles[idx]
# dpart=getFinal(dp)
# if abs(dpart.pdgId) == 24 and inAK8(dpart) and decayTo2QAK8(dpart):
# return True
# return False
#### check if gp -> b in AK8 ###
#def decayToBAK8(gp):
# if len(gp.dauIdx) == 0:
# raise ValueError('Particle has no daughters!')
# for idx in gp.dauIdx:
# dpart=particles[idx]
# if abs(dpart.pdgId) == 5 and inAK8(dpart):
# return True
# return False
### check if gp decays to at least 2 quarks in AK8 ###
#def decayTo2QAK8(gp):
# if len(gp.dauIdx) == 0:
# raise ValueError('Particle has no daughters!')
# for idx in gp.dauIdx:
# dpart=particles[idx]
# if abs(dpart.pdgId) < 6 and inAK8(dpart):
# gp.dauIdx.reverse()
# for idx in gp.dauIdx:
# dpart=particles[idx]
# if abs(dpart.pdgId)<6 and inAK8(dpart):
# return True
# return False
#looping over all particles and storing how many quark daughters each particle produces
def isHadronic(gp):
dautemp=[x for x in gp.dauIdx if abs(particles[x].pdgId)<6 and abs(particles[x].eta)<2.4]
if len(dautemp)==2: #fullhadron
return 0
elif len(dautemp)==1: #semilept
return 1
elif len(dautemp)==0: #fulllept
return 2
def BeginLeg(ll):
## LEGEND POSITION ##
legpos = "Right"
if legpos == "Left":
xLat = 0.13
elif legpos == "Right":
xLat = 0.65 # move left and right
else:
xLat = 0.2
yLat = 0.85 # move legend up and down, the larger( about 1 already at the edge of canvas) the higher it goes
xLeg = xLat
yLeg = yLat
leg_g = 0.04 * ll # num of legend entries
leg = ROOT.TLegend( xLeg+0.05, yLeg - leg_g, xLeg+0.15, yLeg )
leg.SetNColumns(1)
leg.SetFillStyle(0)
leg.SetTextFont(43)
leg.SetTextSize(14)
leg.SetBorderSize(0)
return leg
## define dauIdx for the mother particles ###
def AssignDauIdx(gp):
for idx, dp in enumerate(gp):
if not hasattr(dp, 'dauIdx'):
dp.dauIdx = []
if dp.genPartIdxMother >= 0: #access particles with a mother Id i.e. all daughter particles of some mother
mom = gp[dp.genPartIdxMother] #the mother particle is accessed using the genpartIdxMother and stored in mom
if not hasattr(mom, 'dauIdx'):
mom.dauIdx = [idx] #feed the index of the daughter to the mother particle
else:
mom.dauIdx.append(idx) | [
"nsubrama@lxplus753.cern.ch"
] | nsubrama@lxplus753.cern.ch |
157d369702bec630b730984870fff4996b38d54e | b28df8f2cd9a4b4fe274eb1688e7410ae19f9da1 | /kwippy/models/login.py | f89b8a0d9b01ef11b043d89d16a026f3e3f39269 | [] | no_license | kwippy-com/kwippycore | ba2d8b584e2171fd5322446df409e6983e23409b | d0647405cf77c4490cb40194b35e385955d56707 | refs/heads/master | 2020-06-14T14:56:35.169865 | 2014-12-08T16:44:20 | 2014-12-08T16:44:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from django.db import models
from django.contrib.auth.models import *
class Login(models.Model):
user = models.ForeignKey(User)
login_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self) :
return '%s' % (self.user)
class Meta:
app_label="kwippy"
| [
"dipankarsarkar@gmail.com"
] | dipankarsarkar@gmail.com |
8ea3e10135ca0157f0da80e4b57c7db8fec6cd25 | 94fac88a1487fee0e44385fbb5617228e1179169 | /run/run_all.py | d77000090fe5c205ca0a726b8352960476381372 | [] | no_license | juxiaona/api_auto | ef835f710c4e3fcbd21f8cd5ef9f5fea66d72ef9 | 65a51dfd621cac31684381e9d1170b40287fe1e1 | refs/heads/master | 2021-05-16T05:18:09.085475 | 2018-04-14T03:04:18 | 2018-04-14T03:04:18 | 106,382,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | import sys
sys.path.append('../')
from common.testrunner import TestRunner
from common.sendmail import SendMail
if __name__ == '__main__':
run_all=TestRunner("../test_cases","API Auto Test Report","Test case execution")
run_all.run()
| [
"juxiaona@xxkuaipao.com"
] | juxiaona@xxkuaipao.com |
e0b8f7d22e753dfc6148fb80be247be49b79fd3e | ce44586089be9707221e8928265e255c47fea2bc | /bookmarks/api/serializers.py | 7f8f65dbb153c252179a6d5a09c29a50937ad3f5 | [
"MIT"
] | permissive | weTeams/linkding | 8ac8a2d63d4334c31ffe63d3acbc28ec891992eb | 1dabd0266b84a184c684eb32dff4d654a5195820 | refs/heads/master | 2023-08-12T12:49:36.982660 | 2021-10-16T03:49:56 | 2021-10-16T03:49:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,230 | py | from rest_framework import serializers
from bookmarks.models import Bookmark, Tag, build_tag_string
from bookmarks.services.bookmarks import create_bookmark, update_bookmark
from bookmarks.services.tags import get_or_create_tag
class TagListField(serializers.ListField):
child = serializers.CharField()
class BookmarkSerializer(serializers.ModelSerializer):
class Meta:
model = Bookmark
fields = [
'id',
'url',
'title',
'description',
'website_title',
'website_description',
'tag_names',
'date_added',
'date_modified'
]
read_only_fields = [
'website_title',
'website_description',
'date_added',
'date_modified'
]
# Override optional char fields to provide default value
title = serializers.CharField(required=False, allow_blank=True, default='')
description = serializers.CharField(required=False, allow_blank=True, default='')
# Override readonly tag_names property to allow passing a list of tag names to create/update
tag_names = TagListField(required=False, default=[])
def create(self, validated_data):
bookmark = Bookmark()
bookmark.url = validated_data['url']
bookmark.title = validated_data['title']
bookmark.description = validated_data['description']
tag_string = build_tag_string(validated_data['tag_names'], ' ')
return create_bookmark(bookmark, tag_string, self.context['user'])
def update(self, instance: Bookmark, validated_data):
instance.url = validated_data['url']
instance.title = validated_data['title']
instance.description = validated_data['description']
tag_string = build_tag_string(validated_data['tag_names'], ' ')
return update_bookmark(instance, tag_string, self.context['user'])
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ['id', 'name', 'date_added']
read_only_fields = ['date_added']
def create(self, validated_data):
return get_or_create_tag(validated_data['name'], self.context['user'])
| [
"noreply@github.com"
] | weTeams.noreply@github.com |
cce00240af79b52f5debad52fa91b451574aaca4 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/primat.py | 30563ebedac47163924a1ceba50956c489a8723a | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 508 | py | ii = [('CookGHP3.py', 2), ('CoolWHM2.py', 1), ('SadlMLP.py', 1), ('CookGHP.py', 2), ('ShawHDE.py', 1), ('LeakWTI2.py', 1), ('LeakWTI3.py', 4), ('ClarGE2.py', 3), ('ClarGE.py', 9), ('DaltJMA.py', 47), ('WestJIT2.py', 1), ('DibdTRL2.py', 1), ('WadeJEB.py', 6), ('FerrSDO2.py', 1), ('NewmJLP.py', 2), ('LeakWTI4.py', 9), ('LeakWTI.py', 4), ('SoutRD.py', 1), ('MereHHB3.py', 1), ('MackCNH.py', 1), ('WestJIT.py', 1), ('MackCNH2.py', 2), ('WilbRLW3.py', 1), ('BrewDTO.py', 2), ('ClarGE3.py', 2), ('TaylIF.py', 3)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
030b53ffcf1f68a4c58612e806754f9d93b449cf | a93fb346096f131ecfc37c1b6a2a2ce1b5f3ec1e | /machine_learning2/main.py | eb6ee6b4810379e4be5878b69e055605b7a75745 | [] | no_license | AndreiCatalinN/university | a0bed78e4ed3b79453e9cc22b0b38ab529a8acf2 | 7d18c56f12bebbce7c42c016ca2831bba98c945b | refs/heads/master | 2021-08-08T01:19:52.823214 | 2020-07-08T17:48:26 | 2020-07-08T17:48:26 | 199,484,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import pandas as pd
def traders_by_county(df):
counties = df['county'].unique()
for county in counties:
fuel_stops = df['trading_name'][df['county'] == county]
print(county, ' : ', fuel_stops.count())
def traders_by_county2(df):
data = df.groupby(['county']).count()
print(data)
def clear_data(df):
return df
def main():
df = pd.read_csv("./register-marked-fuel-traders.csv")
df = clear_data(df)
traders_by_county2(df)
return 0
if __name__ == '__main__':
main() | [
"c16733435@mydit.ie"
] | c16733435@mydit.ie |
05d41fd3cc414a5c7cebfedef4c6663551478993 | bc2f5e603a9e5542ecd3e23186e8f75b9b831be5 | /demo/backtest/dynamicIndicatorDemo.py | 05a009625a967e990d490b10c86067abc93ad3ed | [] | no_license | Gahyu96/xuefu | ba7335828432e8962f33d49e655cdeeb03f8e8ec | fa0414e15d0b2dba6f699ba40dbef21a2d7b9f38 | refs/heads/master | 2023-08-31T17:26:55.549193 | 2023-08-21T07:24:29 | 2023-08-21T07:24:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,021 | py | # -*- coding: utf-8 -*-
"""
Created on 2017-07-20
1.这个demo包含两部分,主要演示动态计算指标,如日线的macd,其实在当日的过程中他的数据是不断变化的,macd 12,26这个指标使用的是前N-1日的
数据再加上当日该5分钟的close数据合并出来12,16日数据,当当日结束的时候,5分钟计算出来的数据和日线级别使用当日收盘价计算出的结果相等。
2.这里的例子使用布林带,使用的方法为原始的talib指标
3.pyalgotrade里面的dataSeries中获取数据list 代码为self.day_feed[instrument].getCloseDataSeries()._SequenceDataSeries__values.data()
4.里面附带写了下ATR通道计算例子。
5.本指标为分钟线和日线混合例子
测试bolling线
"""
import pandas as pd
import dynamicIndicatorDemo_run as mdd
from cnx import pyalg_utils
from cnx import dataFramefeed
from pyalgotrade import plotter, bar
from pyalgotrade.tools import resample
from pyalgotrade.stratanalyzer import returns, sharpe
def bolling_trans_data():
dat = pd.read_csv("../../api/stock/csv/600281SH_min.csv", index_col=['datetime'], encoding='gbk')
feed = dataFramefeed.Feed(frequency=bar.Frequency.MINUTE)
feed.addBarsFromDataFrame("orcl", dat)
resample.resample_to_csv(feed, bar.Frequency.MINUTE * 5,"../../api/stock/csv/600281SH_5min.csv") # 这样resample的数据是对的
def bolling(code='600281'):
try:
dat = pd.read_csv("../../api/stock/csv/%sSH_5min.csv" % code, encoding='gbk')
dat = dat.fillna(method='pad')
dat['Adj Close'] = dat['Close']
dat = dat.rename(columns={'Open': 'open', 'High': 'high', 'Volume': 'volume', 'Close': 'close', 'Low': 'low'})
dat.index = dat['Date Time']
dat.index.name = 'date'
except:
from api.stock.lib import gm_api as gm
dat = gm.gm_api().get_hist_data(code, '2015-01-01', ktype='5')
print dat
dat['Adj Close'] = dat['close']
# dat = dat.ix[17000:18000,:]
feed = dataFramefeed.Feed(frequency=bar.Frequency.MINUTE)
feed.addBarsFromDataFrame("orcl", dat)
myStrategy = mdd.bolling_backtest(feed, 'orcl')
sharpeRatioAnalyzer = sharpe.SharpeRatio()
myStrategy.attachAnalyzer(sharpeRatioAnalyzer)
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
plt = plotter.StrategyPlotter(myStrategy, True, True, True)
plt.getOrCreateSubplot("returns").addDataSeries("Simple returns", returnsAnalyzer.getReturns())
ds = pyalg_utils.dataSet(myStrategy) # 抽取交易数据集语句,若使用系统自带画图功能则不需要该项
myStrategy.run()
plt.plot()
print u'最大回撤:\t%f\t 交易笔数:\t%d\t 盈利笔数:\t%d\n' % (ds.getMaxDrawDown(), ds.getCount(), ds.getProfitableCount())
print u'累计收益:\t%f\t 夏普比率:\t%f\t' % (returnsAnalyzer.getCumulativeReturns()[-1], ds.getSharpeRatio())
if __name__ == '__main__':
#bolling_trans_data()
bolling('600281')
| [
"root123"
] | root123 |
f4cbb4ee12d7c959f1b26caaa16f6fa14aa4c28c | 933105e728d41c6e9ad2e0b354e443d804b7d4dd | /stain_selection_gui.py | 8734ac15e21d072fbdce72883e51d38d646cd804 | [] | no_license | reillyp/Stain-Selection | f2ecf58bb807b1eed42c254fb7c2e12dc2610297 | ec9155525da50816c6c4fb7022efea3904fa2137 | refs/heads/main | 2023-01-01T12:07:46.654344 | 2020-10-26T19:06:38 | 2020-10-26T19:06:38 | 307,396,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,957 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.22
# in conjunction with Tcl version 8.6
# Sep 19, 2019 03:55:29 PM CDT platform: Windows NT
# This is Rev 2 of Stain Selection GUI
import sys
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import stain_selection_gui_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
stain_selection_gui_support.set_Tk_var()
top = Toplevel1 (root)
stain_selection_gui_support.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = tk.Toplevel (root)
stain_selection_gui_support.set_Tk_var()
top = Toplevel1 (w)
stain_selection_gui_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
font9 = "-family {Arial} -size 12"
top.geometry("802x654+306+124")
top.title("Stain Selection Rev 2")
top.configure(background="#d9d9d9")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.ButtonInputImg = tk.Button(top)
self.ButtonInputImg.place(relx=0.075, rely=0.031, height=30, width=141)
self.ButtonInputImg.configure(activebackground="#ececec")
self.ButtonInputImg.configure(activeforeground="#000000")
self.ButtonInputImg.configure(background="#d9d9d9")
self.ButtonInputImg.configure(disabledforeground="#a3a3a3")
self.ButtonInputImg.configure(font="-family {Arial} -size 12")
self.ButtonInputImg.configure(foreground="#000000")
self.ButtonInputImg.configure(highlightbackground="#d9d9d9")
self.ButtonInputImg.configure(highlightcolor="black")
self.ButtonInputImg.configure(pady="0")
self.ButtonInputImg.configure(text='''Select Input Image''')
self.ButtonInputImg.bind('<ButtonRelease-1>',lambda e:stain_selection_gui_support.btn_select_input_img(e))
self.LabelLowHSV = tk.Label(top)
self.LabelLowHSV.place(relx=0.075, rely=0.168, height=24, width=285)
self.LabelLowHSV.configure(activebackground="#f9f9f9")
self.LabelLowHSV.configure(activeforeground="black")
self.LabelLowHSV.configure(anchor='w')
self.LabelLowHSV.configure(background="#d9d9d9")
self.LabelLowHSV.configure(disabledforeground="#a3a3a3")
self.LabelLowHSV.configure(font="-family {Arial} -size 12")
self.LabelLowHSV.configure(foreground="#000000")
self.LabelLowHSV.configure(highlightbackground="#d9d9d9")
self.LabelLowHSV.configure(highlightcolor="black")
self.LabelLowHSV.configure(justify='right')
self.LabelLowHSV.configure(text='''Enter Lower Threshold Values (0-255):''')
self.LabelUpperHSV = tk.Label(top)
self.LabelUpperHSV.place(relx=0.075, rely=0.306, height=24, width=275)
self.LabelUpperHSV.configure(activebackground="#f9f9f9")
self.LabelUpperHSV.configure(activeforeground="black")
self.LabelUpperHSV.configure(anchor='w')
self.LabelUpperHSV.configure(background="#d9d9d9")
self.LabelUpperHSV.configure(disabledforeground="#a3a3a3")
self.LabelUpperHSV.configure(font="-family {Arial} -size 12")
self.LabelUpperHSV.configure(foreground="#000000")
self.LabelUpperHSV.configure(highlightbackground="#d9d9d9")
self.LabelUpperHSV.configure(highlightcolor="black")
self.LabelUpperHSV.configure(justify='right')
self.LabelUpperHSV.configure(text='''Enter Upper Threshold Values (0-255):''')
self.LabelInputImg = tk.Label(top)
self.LabelInputImg.place(relx=0.075, rely=0.092, height=24, width=685)
self.LabelInputImg.configure(activebackground="#f9f9f9")
self.LabelInputImg.configure(activeforeground="black")
self.LabelInputImg.configure(anchor='w')
self.LabelInputImg.configure(background="#d9d9d9")
self.LabelInputImg.configure(disabledforeground="#a3a3a3")
self.LabelInputImg.configure(font="-family {Arial} -size 12")
self.LabelInputImg.configure(foreground="#000000")
self.LabelInputImg.configure(highlightbackground="#d9d9d9")
self.LabelInputImg.configure(highlightcolor="black")
self.LabelInputImg.configure(text='''Input Image:''')
self.LabelAreaThresh = tk.Label(top)
self.LabelAreaThresh.place(relx=0.075, rely=0.52, height=24, width=165)
self.LabelAreaThresh.configure(activebackground="#f9f9f9")
self.LabelAreaThresh.configure(activeforeground="black")
self.LabelAreaThresh.configure(anchor='w')
self.LabelAreaThresh.configure(background="#d9d9d9")
self.LabelAreaThresh.configure(disabledforeground="#a3a3a3")
self.LabelAreaThresh.configure(font="-family {Arial} -size 12")
self.LabelAreaThresh.configure(foreground="#000000")
self.LabelAreaThresh.configure(highlightbackground="#d9d9d9")
self.LabelAreaThresh.configure(highlightcolor="black")
self.LabelAreaThresh.configure(justify='right')
self.LabelAreaThresh.configure(text='''Enter Area Threshold:''')
self.ButtonIOutDir = tk.Button(top)
self.ButtonIOutDir.place(relx=0.075, rely=0.627, height=30, width=141)
self.ButtonIOutDir.configure(activebackground="#ececec")
self.ButtonIOutDir.configure(activeforeground="#000000")
self.ButtonIOutDir.configure(background="#d9d9d9")
self.ButtonIOutDir.configure(disabledforeground="#a3a3a3")
self.ButtonIOutDir.configure(font="-family {Arial} -size 12")
self.ButtonIOutDir.configure(foreground="#000000")
self.ButtonIOutDir.configure(highlightbackground="#d9d9d9")
self.ButtonIOutDir.configure(highlightcolor="black")
self.ButtonIOutDir.configure(pady="0")
self.ButtonIOutDir.configure(text='''Select Output Dir''')
self.ButtonIOutDir.bind('<ButtonRelease-1>',lambda e:stain_selection_gui_support.btn_select_output_dir(e))
self.LabelOutDir = tk.Label(top)
self.LabelOutDir.place(relx=0.075, rely=0.688, height=24, width=685)
self.LabelOutDir.configure(activebackground="#f9f9f9")
self.LabelOutDir.configure(activeforeground="black")
self.LabelOutDir.configure(anchor='w')
self.LabelOutDir.configure(background="#d9d9d9")
self.LabelOutDir.configure(disabledforeground="#a3a3a3")
self.LabelOutDir.configure(font="-family {Arial} -size 12")
self.LabelOutDir.configure(foreground="#000000")
self.LabelOutDir.configure(highlightbackground="#d9d9d9")
self.LabelOutDir.configure(highlightcolor="black")
self.LabelOutDir.configure(text='''Output Dir:''')
self.ButtonSelectStains = tk.Button(top)
self.ButtonSelectStains.place(relx=0.274, rely=0.765, height=30
, width=158)
self.ButtonSelectStains.configure(activebackground="#ececec")
self.ButtonSelectStains.configure(activeforeground="#000000")
self.ButtonSelectStains.configure(background="#d9d9d9")
self.ButtonSelectStains.configure(disabledforeground="#a3a3a3")
self.ButtonSelectStains.configure(font="-family {Arial} -size 12 -weight bold")
self.ButtonSelectStains.configure(foreground="#000000")
self.ButtonSelectStains.configure(highlightbackground="#d9d9d9")
self.ButtonSelectStains.configure(highlightcolor="black")
self.ButtonSelectStains.configure(pady="0")
self.ButtonSelectStains.configure(text='''SELECT STAINS''')
self.ButtonSelectStains.bind('<ButtonRelease-1>',lambda e:stain_selection_gui_support.btn_select_stains(e))
self.ButtonExit = tk.Button(top)
self.ButtonExit.place(relx=0.037, rely=0.856, height=44, width=87)
self.ButtonExit.configure(activebackground="#ececec")
self.ButtonExit.configure(activeforeground="#000000")
self.ButtonExit.configure(background="#d9d9d9")
self.ButtonExit.configure(disabledforeground="#a3a3a3")
self.ButtonExit.configure(font="-family {Arial} -size 16 -weight bold")
self.ButtonExit.configure(foreground="#000000")
self.ButtonExit.configure(highlightbackground="#d9d9d9")
self.ButtonExit.configure(highlightcolor="black")
self.ButtonExit.configure(pady="0")
self.ButtonExit.configure(text='''EXIT''')
self.ButtonExit.bind('<Button-1>',lambda e:stain_selection_gui_support.btn_exit(e))
self.menubar = tk.Menu(top,font=('Arial',12,),bg=_bgcolor,fg=_fgcolor)
top.configure(menu = self.menubar)
self.menubar.add_command(
activebackground="#ececec",
activeforeground="#000000",
background="#d9d9d9",
command=stain_selection_gui_support.prog_descript,
font="TkDefaultFont",
foreground="#000000",
label="Program Description")
self.menubar.add_separator(
background="#d9d9d9")
self.menubar.add_command(
activebackground="#ececec",
activeforeground="#000000",
background="#d9d9d9",
command=stain_selection_gui_support.prog_instruct,
font="TkDefaultFont",
foreground="#000000",
label="Program Instructions")
self.menubar.add_separator(
background="#d9d9d9")
self.menubar.add_command(
activebackground="#ececec",
activeforeground="#000000",
background="#d9d9d9",
command=stain_selection_gui_support.quit,
font="TkDefaultFont",
foreground="#000000",
label="Quit")
self.EntryLowHue = tk.Entry(top)
self.EntryLowHue.place(relx=0.087, rely=0.245, height=22, relwidth=0.125)
self.EntryLowHue.configure(background="white")
self.EntryLowHue.configure(disabledforeground="#a3a3a3")
self.EntryLowHue.configure(font="-family {Arial} -size 12")
self.EntryLowHue.configure(foreground="#000000")
self.EntryLowHue.configure(highlightbackground="#d9d9d9")
self.EntryLowHue.configure(highlightcolor="black")
self.EntryLowHue.configure(insertbackground="black")
self.EntryLowHue.configure(selectbackground="#c4c4c4")
self.EntryLowHue.configure(selectforeground="black")
self.EntryLowHue.configure(textvariable=stain_selection_gui_support.low_hue)
self.EntryLowSat = tk.Entry(top)
self.EntryLowSat.place(relx=0.237, rely=0.245, height=22, relwidth=0.125)
self.EntryLowSat.configure(background="white")
self.EntryLowSat.configure(disabledforeground="#a3a3a3")
self.EntryLowSat.configure(font="-family {Arial} -size 12")
self.EntryLowSat.configure(foreground="#000000")
self.EntryLowSat.configure(highlightbackground="#d9d9d9")
self.EntryLowSat.configure(highlightcolor="black")
self.EntryLowSat.configure(insertbackground="black")
self.EntryLowSat.configure(selectbackground="#c4c4c4")
self.EntryLowSat.configure(selectforeground="black")
self.EntryLowSat.configure(textvariable=stain_selection_gui_support.low_sat)
self.EntryLowVal = tk.Entry(top)
self.EntryLowVal.place(relx=0.387, rely=0.245, height=22, relwidth=0.125)
self.EntryLowVal.configure(background="white")
self.EntryLowVal.configure(disabledforeground="#a3a3a3")
self.EntryLowVal.configure(font="-family {Arial} -size 12")
self.EntryLowVal.configure(foreground="#000000")
self.EntryLowVal.configure(highlightbackground="#d9d9d9")
self.EntryLowVal.configure(highlightcolor="black")
self.EntryLowVal.configure(insertbackground="black")
self.EntryLowVal.configure(selectbackground="#c4c4c4")
self.EntryLowVal.configure(selectforeground="black")
self.EntryLowVal.configure(textvariable=stain_selection_gui_support.low_val)
self.EntryUpHue = tk.Entry(top)
self.EntryUpHue.place(relx=0.087, rely=0.382,height=22, relwidth=0.125)
self.EntryUpHue.configure(background="white")
self.EntryUpHue.configure(disabledforeground="#a3a3a3")
self.EntryUpHue.configure(font="-family {Arial} -size 12")
self.EntryUpHue.configure(foreground="#000000")
self.EntryUpHue.configure(highlightbackground="#d9d9d9")
self.EntryUpHue.configure(highlightcolor="black")
self.EntryUpHue.configure(insertbackground="black")
self.EntryUpHue.configure(selectbackground="#c4c4c4")
self.EntryUpHue.configure(selectforeground="black")
self.EntryUpHue.configure(textvariable=stain_selection_gui_support.up_hue)
self.EntryUpSat = tk.Entry(top)
self.EntryUpSat.place(relx=0.237, rely=0.382,height=22, relwidth=0.125)
self.EntryUpSat.configure(background="white")
self.EntryUpSat.configure(disabledforeground="#a3a3a3")
self.EntryUpSat.configure(font="-family {Arial} -size 12")
self.EntryUpSat.configure(foreground="#000000")
self.EntryUpSat.configure(highlightbackground="#d9d9d9")
self.EntryUpSat.configure(highlightcolor="black")
self.EntryUpSat.configure(insertbackground="black")
self.EntryUpSat.configure(selectbackground="#c4c4c4")
self.EntryUpSat.configure(selectforeground="black")
self.EntryUpSat.configure(textvariable=stain_selection_gui_support.up_sat)
self.EntryUpVal = tk.Entry(top)
self.EntryUpVal.place(relx=0.387, rely=0.382,height=22, relwidth=0.125)
self.EntryUpVal.configure(background="white")
self.EntryUpVal.configure(disabledforeground="#a3a3a3")
self.EntryUpVal.configure(font="-family {Arial} -size 12")
self.EntryUpVal.configure(foreground="#000000")
self.EntryUpVal.configure(highlightbackground="#d9d9d9")
self.EntryUpVal.configure(highlightcolor="black")
self.EntryUpVal.configure(insertbackground="black")
self.EntryUpVal.configure(selectbackground="#c4c4c4")
self.EntryUpVal.configure(selectforeground="black")
self.EntryUpVal.configure(textvariable=stain_selection_gui_support.up_val)
self.EntryGaussianBlur = tk.Entry(top)
self.EntryGaussianBlur.place(relx=0.087, rely=0.474, height=22
, relwidth=0.125)
self.EntryGaussianBlur.configure(background="white")
self.EntryGaussianBlur.configure(disabledforeground="#a3a3a3")
self.EntryGaussianBlur.configure(font="-family {Arial} -size 12")
self.EntryGaussianBlur.configure(foreground="#000000")
self.EntryGaussianBlur.configure(insertbackground="black")
self.EntryGaussianBlur.configure(textvariable=stain_selection_gui_support.gaussian_blur)
self.EntryGaussianBlur.configure(width=104)
self.EntryAreaThresh = tk.Entry(top)
self.EntryAreaThresh.place(relx=0.087, rely=0.566, height=22
, relwidth=0.125)
self.EntryAreaThresh.configure(background="white")
self.EntryAreaThresh.configure(disabledforeground="#a3a3a3")
self.EntryAreaThresh.configure(font="-family {Arial} -size 12")
self.EntryAreaThresh.configure(foreground="#000000")
self.EntryAreaThresh.configure(highlightbackground="#d9d9d9")
self.EntryAreaThresh.configure(highlightcolor="black")
self.EntryAreaThresh.configure(insertbackground="black")
self.EntryAreaThresh.configure(selectbackground="#c4c4c4")
self.EntryAreaThresh.configure(selectforeground="black")
self.EntryAreaThresh.configure(textvariable=stain_selection_gui_support.area_thresh)
self.EntryAreaThresh.configure(width=100)
self.LabelHueLow = tk.Label(top)
self.LabelHueLow.place(relx=0.087, rely=0.199, height=24, width=34)
self.LabelHueLow.configure(activebackground="#f9f9f9")
self.LabelHueLow.configure(activeforeground="black")
self.LabelHueLow.configure(anchor='w')
self.LabelHueLow.configure(background="#d9d9d9")
self.LabelHueLow.configure(disabledforeground="#a3a3a3")
self.LabelHueLow.configure(font="-family {Arial} -size 12")
self.LabelHueLow.configure(foreground="#000000")
self.LabelHueLow.configure(highlightbackground="#d9d9d9")
self.LabelHueLow.configure(highlightcolor="black")
self.LabelHueLow.configure(text='''Hue''')
self.LabelSatLow = tk.Label(top)
self.LabelSatLow.place(relx=0.237, rely=0.199, height=24, width=77)
self.LabelSatLow.configure(activebackground="#f9f9f9")
self.LabelSatLow.configure(activeforeground="black")
self.LabelSatLow.configure(anchor='w')
self.LabelSatLow.configure(background="#d9d9d9")
self.LabelSatLow.configure(disabledforeground="#a3a3a3")
self.LabelSatLow.configure(font="-family {Arial} -size 12")
self.LabelSatLow.configure(foreground="#000000")
self.LabelSatLow.configure(highlightbackground="#d9d9d9")
self.LabelSatLow.configure(highlightcolor="black")
self.LabelSatLow.configure(text='''Saturation''')
self.LabelValueLow = tk.Label(top)
self.LabelValueLow.place(relx=0.387, rely=0.199, height=24, width=46)
self.LabelValueLow.configure(activebackground="#f9f9f9")
self.LabelValueLow.configure(activeforeground="black")
self.LabelValueLow.configure(anchor='w')
self.LabelValueLow.configure(background="#d9d9d9")
self.LabelValueLow.configure(disabledforeground="#a3a3a3")
self.LabelValueLow.configure(font="-family {Arial} -size 12")
self.LabelValueLow.configure(foreground="#000000")
self.LabelValueLow.configure(highlightbackground="#d9d9d9")
self.LabelValueLow.configure(highlightcolor="black")
self.LabelValueLow.configure(text='''Value''')
self.LabelUpSat = tk.Label(top)
self.LabelUpSat.place(relx=0.237, rely=0.336, height=24, width=84)
self.LabelUpSat.configure(activebackground="#f9f9f9")
self.LabelUpSat.configure(activeforeground="black")
self.LabelUpSat.configure(anchor='w')
self.LabelUpSat.configure(background="#d9d9d9")
self.LabelUpSat.configure(disabledforeground="#a3a3a3")
self.LabelUpSat.configure(font="-family {Arial} -size 12")
self.LabelUpSat.configure(foreground="#000000")
self.LabelUpSat.configure(highlightbackground="#d9d9d9")
self.LabelUpSat.configure(highlightcolor="black")
self.LabelUpSat.configure(text='''Saturation''')
self.LabelUpVal = tk.Label(top)
self.LabelUpVal.place(relx=0.387, rely=0.336, height=24, width=54)
self.LabelUpVal.configure(activebackground="#f9f9f9")
self.LabelUpVal.configure(activeforeground="black")
self.LabelUpVal.configure(anchor='w')
self.LabelUpVal.configure(background="#d9d9d9")
self.LabelUpVal.configure(disabledforeground="#a3a3a3")
self.LabelUpVal.configure(font="-family {Arial} -size 12")
self.LabelUpVal.configure(foreground="#000000")
self.LabelUpVal.configure(highlightbackground="#d9d9d9")
self.LabelUpVal.configure(highlightcolor="black")
self.LabelUpVal.configure(text='''Value''')
self.LabelUpHue = tk.Label(top)
self.LabelUpHue.place(relx=0.087, rely=0.336, height=24, width=34)
self.LabelUpHue.configure(activebackground="#f9f9f9")
self.LabelUpHue.configure(activeforeground="black")
self.LabelUpHue.configure(anchor='w')
self.LabelUpHue.configure(background="#d9d9d9")
self.LabelUpHue.configure(disabledforeground="#a3a3a3")
self.LabelUpHue.configure(font="-family {Arial} -size 12")
self.LabelUpHue.configure(foreground="#000000")
self.LabelUpHue.configure(highlightbackground="#d9d9d9")
self.LabelUpHue.configure(highlightcolor="black")
self.LabelUpHue.configure(text='''Hue''')
self.LabelBlur = tk.Label(top)
self.LabelBlur.place(relx=0.075, rely=0.428, height=24, width=245)
self.LabelBlur.configure(anchor='w')
self.LabelBlur.configure(background="#d9d9d9")
self.LabelBlur.configure(disabledforeground="#a3a3a3")
self.LabelBlur.configure(font="-family {Arial} -size 12")
self.LabelBlur.configure(foreground="#000000")
self.LabelBlur.configure(justify='left')
self.LabelBlur.configure(text='''Gaussian Blur (ODD Integers Only)''')
self.LabelBlur.configure(width=245)
if __name__ == '__main__':
vp_start_gui()
| [
"noreply@github.com"
] | reillyp.noreply@github.com |
4d415dba725c748bc58a7e00c0dbecaa6786ce03 | 0f67aafb602a50376790c86e3369652ca4dd9190 | /src/main.py | 5983bd81176f22c758a59ab28669b14231512805 | [] | no_license | danilojpl/Q-Learning | 1eec4834f604f577b52b1e62e5f697aed579cefe | 020490057068b5ea6f6eb4314e3077333c44612b | refs/heads/main | 2023-05-06T03:39:05.734396 | 2021-05-29T17:29:26 | 2021-05-29T17:29:26 | 370,101,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from config import GRID
from treinar import Treinar
from agente import estadoTerminal
from grid import melhorCaminho
def escolher():
x = input("linha: ")
y = input("coluna: ")
while estadoTerminal(int(x),int(y),GRID):
print ("vaga ocupada, digite novamente: ")
x = input("linha: ")
y = input("coluna: ")
return x, y
aprender = Treinar()
aprender.criarDicionario(GRID)
recompensas = aprender.calcPontuacao(GRID)
print ("digite a linha e coluna onde o carro está estacionado")
x, y = escolher()
while True:
melhorCaminho(recompensas,int(x),int(y))
print ("deseja testar em um novo local?:\ndigite 1 para sim ou qualquer outra tecla para finalizar o programa: ")
resp = input()
if (resp == "1"):
x, y = escolher()
else:
break
| [
"danilo.duartejp@outlook.com"
] | danilo.duartejp@outlook.com |
fbd5a0f6abb602e65d216522cbe43a9702dbb210 | 5bf614a818d6b62cfa7f6ee67a90082e2cf92824 | /python/tools/camera_stats/server/utils/mongo_handle.py | 51638d395c67b12da1dd92ea30726921b25231cc | [] | no_license | Jerry315/scripts | d28f37fe976e922a22699c3f9e778475f518d9ac | 6ba57724311a465e733f21bed3578dcd246a4ac5 | refs/heads/master | 2022-12-06T13:15:31.039207 | 2020-08-29T02:18:26 | 2020-08-29T02:18:26 | 291,177,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,068 | py | # -*- coding: utf-8 -*-
import json
import time
from schematics.models import Model
from pymongo import ReturnDocument, MongoClient
from settings import config
# print config
mongo_config = config['mongodb']
db_session = MongoClient(mongo_config['url'], connect=False,)
db = db_session[mongo_config['db']]
class BaseModel(Model):
__database__ = ''
__collection__ = ''
def __init__(self, **kwargs):
super(BaseModel, self).__init__(**kwargs)
self._collection = self.__database__.get_collection(self.__collection__)
def to_json(self):
return json.dumps(self.serialize())
def to_dict(self, fields, func=None):
dct = {}
for field in fields:
value = self.get(field)
if func:
value = func(value)
dct[field] = value
return dct
@classmethod
def next_id(self):
filter = {'_id': self.__collection__}
update = {'$inc': {'seq': 1}}
rv = self.__database__.counters.find_one_and_update(
filter, update, upsert=True, return_document=ReturnDocument.AFTER
)
return rv['seq']
@classmethod
def get_collection(cls):
return cls.__database__.get_collection(cls.__collection__)
@classmethod
def distinc(cls, field, filter=None):
collection = cls.get_collection()
rv = collection.distinct(field, filter)
return rv
@classmethod
def find_by_id(cls, _id, fields=None):
filter = {'_id': _id}
if fields:
fields = dict([(f, True) for f in fields])
collection = cls.__database__.get_collection(cls.__collection__)
record = collection.find_one(filter, fields)
if not record:
return None
return cls(record)
@classmethod
def find_one(cls, filter, fields=None, add_empty=False, **kwargs):
if fields:
fields = dict([(f, True) for f in fields])
collection = cls.__database__.get_collection(cls.__collection__)
record = collection.find_one(filter, fields, **kwargs)
if not record:
if add_empty:
return cls({})
return None
return cls(record)
@classmethod
def find_last_one(cls, filter, time_filed, fileds=None, time_limit=None):
if fileds:
fileds = dict([(f, True) for f in fileds])
collection = cls.__database__.get_collection(cls.__collection__)
if time_limit:
filter.update({'create_time': {'$gte': time_limit}})
record = collection.find_one(
filter, fileds, sort=[(time_filed, -1)]
)
if not record:
return {}
return record
@classmethod
def find(cls, filter, fields=None, **kwargs):
if fields:
fields = dict([(f, True) for f in fields])
collection = cls.__database__.get_collection(cls.__collection__)
records = collection.find(filter, fields, **kwargs)
return records
@classmethod
def find_by_ids(cls, ids, fields=None):
return cls._find_by_field_data('_id', ids, fields)
@classmethod
def find_one_and_update(cls, filter, updates, fields=None, upsert=False,
return_doc=ReturnDocument.BEFORE, set_on_insert=None):
if fields:
fields = dict([(f, True) for f in fields])
if set_on_insert:
updates['$setOnInsert'] = set_on_insert
updates['update_time'] = time.time()
updates = {'$set': updates}
collection = cls.get_collection()
record = collection.find_one_and_update(
filter, updates, projection=fields, return_document=return_doc, upsert=upsert
)
if not record:
return None
return record
@classmethod
def _find_by_field_data(cls, field, data, fields=None):
if fields:
fields = dict([(f, True) for f in fields])
filter = {field: {'$in': data}}
collection = cls.get_collection()
records = collection.find(filter, fields)
return records
@classmethod
def _find_one_by_field_data(cls, field, data, fields=None):
if fields:
fields = dict([(f, True) for f in fields])
filter = {field: {'$in': data}}
collection = cls.get_collection()
record = collection.find_one(filter, fields)
if not record:
return None
return record
@classmethod
def insert(cls, doc):
collection = cls.__database__.get_collection(cls.__collection__)
collection.insert(doc)
@classmethod
def multi_insert(cls, docs):
collection = cls.__database__.get_collection(cls.__collection__)
collection.insert_many([dict(doc.items()) for doc in docs])
@classmethod
def total(cls, filter={}):
collection = cls.__database__.get_collection(cls.__collection__)
return collection.count(filter)
class DeviceModel(BaseModel):
__database__ = db
__collection__ = 'camera_stats'
| [
"710899905@qq.com"
] | 710899905@qq.com |
bf0749712c8e51643cf50ef608885242ebd8e5df | 7c1f6ef28991c681bfd30d2059533ccf471110d3 | /myViewBox.py | b96c30ef780227c8e874dfcc69420ec478f475f9 | [] | no_license | lolizz00/RF_TOOL | e546da667cca38402654519eeca4a4f836a427b4 | a9c70cc93a64550f16ec33cddc00f615844c3979 | refs/heads/master | 2023-06-21T13:27:00.203422 | 2021-07-17T09:16:03 | 2021-07-17T09:16:03 | 360,627,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Point import Point
from pyqtgraph import functions as fn
from PyQt5.QtCore import pyqtSignal
__all__ = ['ViewBox']
class CustomViewBox(pg.ViewBox):
sigRangeSelect = pyqtSignal(QtCore.QRectF)
def __init__(self, *args, **kwds):
pg.ViewBox.__init__(self, *args, **kwds)
self.setMouseMode(self.RectMode)
self.setMouseMode(self.RectMode)
def wheelEvent(self, ev, axis=None):
mask = np.array(self.state['mouseEnabled'], dtype=np.float)
if axis is not None and axis >= 0 and axis < len(mask):
mv = mask[axis]
mask[:] = 0
mask[axis] = mv
s = ((mask * 0.02) + 1) ** (ev.delta() * self.state['wheelScaleFactor']) # actual scaling factor
center = Point(fn.invertQTransform(self.childGroup.transform()).map(ev.pos()))
# center = ev.pos()
self._resetTarget()
self.scaleBy(s, center)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
ev.accept()
rect = QtCore.QRectF()
rect.setCoords(self.state['viewRange'][0][0], self.state['viewRange'][1][0], self.state['viewRange'][0][1],
self.state['viewRange'][1][1])
self.sigRangeSelect.emit(rect)
def mouseDragEvent(self, ev, axis=None):
ev.accept()
pos = ev.pos()
lastPos = ev.lastPos()
dif = pos - lastPos
dif = dif * -1
mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float)
mask = mouseEnabled.copy()
if axis is not None:
mask[1 - axis] = 0.0
if ev.button() & (QtCore.Qt.LeftButton | QtCore.Qt.MidButton):
tr = dif * mask
tr = self.mapToView(tr) - self.mapToView(Point(0, 0))
x = tr.x() if mask[0] == 1 else None
y = tr.y() if mask[1] == 1 else None
self._resetTarget()
if x is not None or y is not None:
self.translateBy(x=x, y=y)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
rect = QtCore.QRectF()
rect.setCoords(self.state['viewRange'][0][0], self.state['viewRange'][1][0], self.state['viewRange'][0][1], self.state['viewRange'][1][1])
self.sigRangeSelect.emit(rect)
elif ev.button() & QtCore.Qt.RightButton:
if ev.isFinish():
self.rbScaleBox.hide()
ax = QtCore.QRectF(Point(ev.buttonDownPos(ev.button())), Point(pos))
ax = self.childGroup.mapRectFromParent(ax)
rect = ax
self.showAxRect(ax)
self.axHistoryPointer += 1
self.axHistory = self.axHistory[:self.axHistoryPointer] + [ax]
self.sigRangeSelect.emit(rect)
else:
self.updateScaleBox(ev.buttonDownPos(), ev.pos())
| [
"timofei.volkov@mail.ru"
] | timofei.volkov@mail.ru |
f457f6e9c5266147f1d9d0e3d6d45eee4bff3159 | 17377d4a64ca42aee854097b26fa1ba088995a5a | /server.py | 3c0b309bc0ff0fc151d4e9af80326353aa59b7cf | [] | no_license | grecabral/mmserver | dce4153439b390c5ef50e8c56440c195172e100d | 4c2832987b32ce97f888eabc4116ecb114ad760a | refs/heads/master | 2022-12-18T06:12:43.753463 | 2017-04-10T18:09:30 | 2017-04-10T18:09:30 | 294,192,491 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,752 | py | from config import *
from server_config import *
import logging
import socket
import sys
from thread import *
from time import time
class clientWorker:
def __init__(self, clientSock):
self.clientSock = clientSock
self.pingAvgMS = -1
self.localIP = ""
# Get peer name of client(external ip)
clientTuple = clientSock.getpeername()
self.externalIP = clientTuple[0]
self.externalPort = clientTuple[1]
self.str = "{0}:{1}".format(self.externalIP, self.externalPort)
self.clientLogger = logging.getLogger("{0}".format(self.str))
start_new_thread(self.mainLoop ,())
def __str__(self):
return self.str
def __unicode__(self):
return unicode(self.str)
def pingLoop(self):
# Initializing variables for ping measurement
s_time = []
r_time = []
s_qt = 0
r_qt = 0
# Sending first ping
s_time.append(time())
self.clientLogger.warning('Sending ping({0})'.format(s_qt))
self.clientSock.sendall(MSG_PING)
s_qt += 1
# Ping Loop, waits for user to ping back, counts the time and then send another ping
self.clientLogger.warning(PING_LOOP)
while True:
try:
data = self.clientSock.recv(BUFF)
# Checks if received message is Ping and count it
if MSG_QUIT in data or not data:
self.clientSock.sendall(MSG_DC)
self.clientLogger.warning("PingLoop - Empty data or msg quit")
break
self.clientLogger.warning("{}({})".format(data,len(data)))
if MSG_PING in data:
self.clientLogger.warning('Received ping({0})'.format(r_qt))
r_time.append(time())
r_qt += 1
# Checks if there was enough pings
if r_qt == PING_QT:
# Calculate average ping
pingAvg = 0
for x in xrange(0, PING_QT):
pingAvg += r_time[x] - s_time[x]
pingAvg /= PING_QT
self.pingAvgMS = int(round(pingAvg*1000))
self.clientLogger.warning("Average ping = {0}s = {1}ms".format(pingAvg, self.pingAvgMS))
break
# Checks if it can send another ping
if s_qt < PING_QT:
s_time.append(time())
self.clientLogger.warning('Sending ping({0})'.format(s_qt))
self.clientSock.sendall(MSG_PING)
s_qt += 1
except socket.error:
# If the client disconnected with an error
self.clientLogger.warning("Forced Disconnection!")
break
def localIpLoop(self):
self.clientLogger.warning("Sending Local IP Message")
self.clientSock.sendall(MSG_LOCALIP)
self.clientLogger.warning(LOCALIP_LOOP)
while True:
try:
data = self.clientSock.recv(BUFF)
if MSG_QUIT in data or not data:
self.clientSock.sendall(MSG_DC)
self.clientLogger.warning("LocalIPLoop - Empty data or msg quit")
break
self.clientLogger.warning("{}({})".format(data,len(data)))
if MSG_LOCALIP in data:
ip = data.split()[1]
self.localIP = ip
self.clientLogger.warning(data)
self.clientLogger.warning("Senging done message")
self.clientSock.sendall("{} - {} = {}ms".format(MSG_DONE,MSG_PING_RESULT, self.pingAvgMS))
break
except socket.error:
# If the client disconnected with an error
self.clientLogger.warning("Forced Disconnection!")
break
def mainLoop(self):
# Creates logger for the client messages
self.pingLoop()
self.localIpLoop()
# Client listener main loop
self.clientLogger.warning(MAIN_LOOP)
self.clientSock.sendall('Welcome to the server. Type something and hit enter\n')
while True:
# Receiving from client
try:
data = self.clientSock.recv(BUFF)
self.clientLogger.warning("{}({})".format(data,len(data)))
reply = 'I dont know this command = ' + data
# Breaks if received data is quit or none
if MSG_QUIT in data or not data:
self.clientSock.sendall(MSG_DC)
self.clientLogger.warning("MainLoop - Empty data or msg quit")
break
if MSG_LOCALIP in data:
self.clientLogger.warning("LocalIP Response = {}".format(data))
# TO-DO
# Refreshes ping
if MSG_PING in data:
self.pingLoop()
# If the client wants to connect to a game
if MSG_CONNECT in data:
self.clientLogger.warning("Searching for the best player to match")
# Remove self from the list of players to search
bestMatch = None
# Search for the best ping in the list of players
for ip, p_array in players.iteritems():
for op in p_array:
if op != self:
if not bestMatch:
bestMatch = op
else:
if op.pingAvgMS < bestMatch.pingAvgMS:
bestMatch = op
# If there is a player trying to connect
if bestMatch:
self.clientLogger.warning("Found best match")
hostMessage = "{}".format(MSG_HOST)
# If the other player's ping is better then this client's
if bestMatch.pingAvgMS < self.pingAvgMS:
self.clientLogger.warning("Client")
# Sends a message to this clients saying that he will be the client
if bestMatch.externalIP != self.externalIP:
clientMessage = "{}-{}".format(MSG_CLIENT, bestMatch.externalIP)
else:
clientMessage = "{}-{}".format(MSG_CLIENT, bestMatch.localIP)
reply = clientMessage
# Sends a message to the other client saying he will be the host
bestMatch.clientSock.sendall(hostMessage)
else:
# else do the oposite
self.clientLogger.warning("Host")
if bestMatch.externalIP != self.externalIP:
clientMessage = "{}-{}".format(MSG_CLIENT, self.externalIP)
else:
clientMessage = "{}-{}".format(MSG_CLIENT, self.localIP)
reply = hostMessage
bestMatch.clientSock.sendall(clientMessage)
else:
# In case no other player is trying to play online
self.clientLogger.warning("No match found")
reply = MSG_NONEFOUND
# If the client sends a request for the list of players
if MSG_LISTPLAYERS in data:
self.clientLogger.warning("Listing players to {}".format(self.str))
# Create a list of players without the client
otherPlayers = list(players)
otherPlayers.remove(self)
# If there are players connected to the server
if otherPlayers:
reply = "{} = \n".format(MSG_LISTPLAYERS)
# Adds the player and his ping to the list
for op in otherPlayers:
reply += "{} - ({}ms)\n".format(op.str, op.pingAvgMS)
else:
reply = MSG_NONEFOUND
# Sends the answer to the client
self.clientSock.sendall(reply)
except socket.error:
# If the client disconnected with an error
self.clientLogger.warning("Forced Disconnection!")
break
# Came out of loop, log closed connection, remove from list and closes socket
logger.warning("{0} - Closed Connection".format(self))
removeClient(self)
self.clientSock.close()
# Basic logging config
logging.basicConfig(format='%(asctime)s - %(name)s > %(message)s', level=logging.DEBUG)
# Players list init
players = {}
# Add client to list
def addClient(clientWorker):
externalIP = clientWorker.externalIP
if externalIP not in players:
players[externalIP] = []
players[externalIP].append(clientWorker)
listClients()
# Remove client from list
def removeClient(clientWorker):
externalIP = clientWorker.externalIP
if externalIP in players:
for p in players[externalIP]:
if p == clientWorker:
players[externalIP].remove(p)
if not len(players[externalIP]):
players.pop(externalIP, None)
listClients()
# List clients on list
def listClients():
# Default text if there are no clients connected
text = "No players connected"
# Check list's size
if len(players):
# Change default text and append each client in the text
text = "Players connected:"
for ip, p_array in players.iteritems():
for p in p_array:
t_ping = "Not measured yet"
if p.pingAvgMS != -1:
t_ping = "{}ms".format(p.pingAvgMS)
text += "\n{} ({})".format(p, t_ping)
# Log text
logger.warning(text)
if __name__=='__main__':
# Sets main logger
logger = logging.getLogger("{0} v{1}".format(NAME, VERSION))
for s in sys.argv:
if PORTCMD in s:
PORT = int(s[len(PORTCMD):])
print 'Argument List:', PORT
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.warning('Socket created')
# Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
logger.warning('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
logger.warning('Socket bind complete on host {0} and port {1}'.format(socket.gethostbyname(socket.gethostname()), PORT))
# Start listening on socket
s.listen(10)
logger.warning('Socket now listening')
# Function for handling connections. This will be used to create threads
while True:
# Wait to accept a connection - blocking call
clientSock, addr = s.accept()
logger.warning('Connected with ' + addr[0] + ':' + str(addr[1]))
# Start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.
# Add client to list if it is new
client = clientWorker(clientSock)
addClient(client)
s.close()
| [
"csf2@cin.ufpe.br"
] | csf2@cin.ufpe.br |
5566d9fe68f4a8e90970c0c0c27916071980e61a | 2ec14fd1724fc8959e1d3a1b4d3f61d5c0cf6f48 | /test/functional/feature_uacomment.py | e8b6937d62c8a3a6b6640d2715077b8465f8deaf | [
"MIT"
] | permissive | vitae-labs/Vitae | 7ddf8142d1e663f406399ec17de1c7bbba5e32fd | fa301e714cb26e742cfe29164a25961f1ff6d52c | refs/heads/main | 2022-07-28T15:48:24.765770 | 2022-01-29T06:13:19 | 2022-01-29T06:13:19 | 451,559,855 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Copyright (c) 2020-2021 The Vitae Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import VitaeTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(VitaeTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')', '₿', '🏃']:
expected = r"Error: User Agent comment \(" + re.escape(unsafe_char) + r"\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
| [
"hemant.singh.leu@gmail.com"
] | hemant.singh.leu@gmail.com |
4f5bf9e4b0a644d5fc52e36b483901fb4267793f | fdec534f5ae2ab1fd6c14d84ce6f60bdc5fb9cc3 | /demo.py | df94b86d20c360b43344e316dee88cef9f13e1a5 | [] | no_license | nithinr07/neurosky-data-logger | 7e555cb9fd7f2919dd3db942f2d9446944f7f69b | 0436611110039b29fee44697e7ee78c62caa6acb | refs/heads/main | 2023-03-07T18:02:57.174583 | 2021-02-22T14:11:43 | 2021-02-22T14:11:43 | 341,221,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from NeuroSkyPy.NeuroSkyPy import NeuroSkyPy
from time import sleep
import os
os.system("sudo rfcomm connect /dev/rfcomm0 C4:64:E3:E6:E3:7D 1 &")
sleep(3)
neuropy = NeuroSkyPy("/dev/rfcomm0")
neuropy.start()
while(True):
#neuropy.start()
print("Attention = ", neuropy.attention)
print("Loose Contact = ", neuropy.poorSignal)
print("Meditation = ", neuropy.meditation)
print()
sleep(0.2) # Don't eat the CPU cycles
| [
"noreply@github.com"
] | nithinr07.noreply@github.com |
d09d651c8b884b3ed825d329a4531ec94b0b54d5 | ad71c89863122dfb4093db0d9f9c40d962d567ff | /Week 10/3-HorizontalHistogram.py | 487bedcb770ac0989049098209474854bb385e10 | [] | no_license | jacktnp/PSIT60 | 8958e7cca278c81d2c5d3af6956728c35425628d | b63c63d8d9c1e97ce66bbb0b884b1f19fecf7b6b | refs/heads/master | 2021-08-16T07:53:33.900161 | 2017-11-19T10:07:02 | 2017-11-19T10:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """ PSIT Week 10
Wiput Pootong (60070090)
HorizontalHistogram
"""
def main():
""" Display histogram of alphabets """
text = input()
upper = {}
lower = {}
for char in text:
if char.isupper():
if char not in upper:
upper[char] = 0
upper[char] += 1
else:
if char not in lower:
lower[char] = 0
lower[char] += 1
for char in sorted(lower):
print("%s : " %char, end='')
for count in range(lower[char]):
print("-", end='')
if count % 5 == 4 and count != (lower[char] - 1):
print("|", end='')
print()
for char in sorted(upper):
print("%s : " %char, end='')
for count in range(upper[char]):
print("-", end='')
if count % 5 == 4 and count != (upper[char] - 1):
print("|", end='')
print()
main()
| [
"wiput.pootong@gmail.com"
] | wiput.pootong@gmail.com |
c17ad1ba1dfe17e3fa802c32622852702517642a | 3424161b573d2fe8873905d434d459a28336e87c | /head_soccer_06_3/source/database/mysql.py | fbb8e3d541273717b2f80f718259dc62c29cae0d | [] | no_license | newtonis/Head-Soccer-Network | 412f7717b97bcb2216bc8086ef131e9e9a4f3908 | fd76920c486fb4af903b0e92b0d014a7d254f124 | refs/heads/master | 2023-05-23T23:08:46.952852 | 2021-06-27T00:20:12 | 2021-06-27T00:20:12 | 30,889,769 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | __author__ = 'Dylan'
import _mysql
import time
try:
con = _mysql.connect('db4free.net','grandt','1221dylan','headsoccerdb')
except:
print "[Info] Not able to reach internet"
con = None
class SQLEngine:
def CheckDeadServers(self):
actual = self.GetServers()
for x in actual:
if float(x["Created"]) < time.time() - 120:
self.RemoveServer(x["IP"],x["Name"])
def AddServer(self,name,ip):
### Add server to Servers database ###
con.query("SELECT * FROM Servers WHERE Name = '"+name+"'")
if con.store_result().num_rows() == 0:
con.query("INSERT INTO Servers (Name,IP,Created) VALUES ('"+name+"','"+ip+"',"+str(time.time())+")")
return True
else:
return False
def RemoveServer(self,ip,name):
### Remove server from Servers database by IP
con.query("DELETE FROM Servers WHERE IP = '"+ip+"' AND Name = '"+name+"'")
def GetServers(self):
### Return list of servers ###
if not con:
return []
con.query("SELECT * FROM Servers")
res = con.store_result()
servers = []
for x in range(res.num_rows()):
data = list(res.fetch_row())[0]
servers.append({"Name":data[0],"IP":data[1],"Created":data[2]})
return servers
def UpdateServer(self,ip,name):
try:
con.query("UPDATE Servers SET Created="+str(time.time())+" WHERE IP = '"+ip+"' AND Name = '"+name+"'")
except:
pass
MySQL = SQLEngine() | [
"newtonis.penguin@gmail.com"
] | newtonis.penguin@gmail.com |
7e4b7a5bdfb6d25d255b784d6a4bb658c8cefabc | 5b878283551105da0585bdd6633654f2a1f93727 | /run_dv_seq2seq.py | 6a34ab8ef4be73a8e89d2ea2b551f359b2e58502 | [
"MIT"
] | permissive | xuefei1/RWG_DV-Seq2Seq | 5d3e8489aaafa03f2d9133f3d31dd3c8cfaadbcc | 2bbcdbdb005839241538323281471183d5c1959b | refs/heads/master | 2020-05-22T22:10:26.612804 | 2019-11-19T16:15:00 | 2019-11-19T16:15:00 | 186,541,479 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,971 | py | import utils.model_utils as mutil
from utils.lang_utils import *
from data_loaders import DVSeq2SeqDataLoader
from model_dv_seq2seq import *
from data.read_data import *
from constants import *
from params import prepare_params, merge_params
from components import LRDecayOptimizer, get_masked_nll_criterion, get_nll_criterion
def main(params):
params.model_name = "dv_seq2seq"
mutil.DEVICE_STR_OVERRIDE = params.device_str
data_train = read_col_word_delim_data("data/dv_seq2seq_train.txt")
data_valid = read_col_word_delim_data("data/dv_seq2seq_valid.txt")
data_test = read_col_word_delim_data("data/dv_seq2seq_test.txt")
w2c_src = build_w2c_from_seg_word_lists([t[0] + t[1] for t in data_train])
w2c_tgt = build_w2c_from_seg_word_lists([t[2] for t in data_train], limit=40000) # limit output vocab size
print("data_train len: {}".format(len(data_train)))
print("data_valid len: {}".format(len(data_valid)))
print("data_test len: {}".format(len(data_test)))
print("src w2c len: {}".format(len(w2c_src)))
print("tgt w2c len: {}".format(len(w2c_tgt)))
pre_built_w2v = None
src_vocab_cache_file = "cache/dv_seq2seq_src_vocab.pkl"
tgt_vocab_cache_file = "cache/dv_seq2seq_tgt_vocab.pkl"
if os.path.isfile(src_vocab_cache_file):
print("Loading src vocab from cache " + src_vocab_cache_file)
with open(src_vocab_cache_file, "rb") as f:
src_vocab = pickle.load(f)
else:
print("Building src vocab")
if pre_built_w2v is None:
pre_built_w2v = load_gensim_word_vec(params.word_vec_file,
cache_file=params.vocab_cache_file)
src_vocab = W2VTrainableVocab(w2c_src, pre_built_w2v, embedding_dim=params.word_embedding_dim, rand_oov_embed=True,
special_tokens=(
PAD_TOKEN,
OOV_TOKEN,
), light_weight=True)
with open(src_vocab_cache_file, "wb") as f:
pickle.dump(src_vocab, f, protocol=4)
params.src_vocab_size = len(src_vocab.w2i)
print("src vocab size: ", params.src_vocab_size)
if os.path.isfile(tgt_vocab_cache_file):
print("Loading tgt vocab from cache " + tgt_vocab_cache_file)
with open(tgt_vocab_cache_file, "rb") as f:
tgt_vocab = pickle.load(f)
else:
print("Building tgt vocab")
if pre_built_w2v is None:
pre_built_w2v = load_gensim_word_vec(params.word_vec_file,
cache_file=params.vocab_cache_file)
tgt_vocab = W2VTrainableVocab(w2c_tgt, pre_built_w2v, embedding_dim=params.word_embedding_dim, rand_oov_embed=False,
special_tokens=(
PAD_TOKEN,
OOV_TOKEN,
SOS_TOKEN,
EOS_TOKEN,
), light_weight=True)
with open(tgt_vocab_cache_file, "wb") as f:
pickle.dump(tgt_vocab, f, protocol=4)
params.tgt_vocab_size = len(tgt_vocab.w2i)
print("tgt vocab size: ", params.tgt_vocab_size)
params.src_w2i = src_vocab.w2i
params.src_i2w = src_vocab.i2w
params.tgt_w2i = tgt_vocab.w2i
params.tgt_i2w = tgt_vocab.i2w
params.w2i = tgt_vocab.w2i
params.i2w = tgt_vocab.i2w
params.pad_idx = tgt_vocab.pad_idx
params.oov_idx = tgt_vocab.oov_idx
params.sos_idx = tgt_vocab.w2i[SOS_TOKEN]
params.eos_idx = tgt_vocab.w2i[EOS_TOKEN]
print("Preparing data loaders")
train_loader = DVSeq2SeqDataLoader(params.batch_size, src_vocab, tgt_vocab, src_vocab, data_train)
valid_loader = DVSeq2SeqDataLoader(params.batch_size, src_vocab, tgt_vocab, src_vocab, data_valid)
test_loader = DVSeq2SeqDataLoader(params.batch_size, src_vocab, tgt_vocab, src_vocab, data_test)
print("{} overlapped train/test instances detected".format(len(train_loader.get_overlapping_data(test_loader))))
print("{} overlapped train/valid instances detected".format(len(train_loader.get_overlapping_data(valid_loader))))
print("{} overlapped valid/test instances detected".format(len(valid_loader.get_overlapping_data(test_loader))))
print("Initializing " + params.model_name)
criterion_gen = get_masked_nll_criterion(len(tgt_vocab))
criterion_cpy = get_nll_criterion()
model = make_dv_seq2seq_model(src_vocab.w2v_mat if params.use_pretrained_embedding else None,
tgt_vocab.w2v_mat if params.use_pretrained_embedding else None,
params, len(src_vocab), len(tgt_vocab),
same_word_embedding=params.same_word_embedding)
model_opt = LRDecayOptimizer(
torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=params.lrd_initial_lr,
betas=(params.adam_betas_1, params.adam_betas_2),
eps=params.adam_eps, weight_decay=params.adam_l2),
initial_lr=params.lrd_initial_lr, shrink_factor=params.lrd_lr_decay_factor,
min_lr=params.lrd_min_lr, past_scores_considered=params.lrd_past_lr_scores_considered,
score_method="min", verbose=True, max_fail_limit=params.lrd_max_fail_limit)
completed_epochs = 0
best_eval_result = 0
best_eval_epoch = 0
past_eval_results = []
if os.path.isfile(params.saved_model_file):
print("Found saved model {}, loading".format(params.saved_model_file))
sd = mutil.model_load(params.saved_model_file)
saved_params = sd[CHKPT_PARAMS]
params = merge_params(saved_params, params)
model.load_state_dict(sd[CHKPT_MODEL])
model_opt.load_state_dict(sd[CHKPT_OPTIMIZER])
best_eval_result = sd[CHKPT_BEST_EVAL_RESULT]
best_eval_epoch = sd[CHKPT_BEST_EVAL_EPOCH]
past_eval_results = sd[CHKPT_PAST_EVAL_RESULTS]
completed_epochs = sd[CHKPT_COMPLETED_EPOCHS]
print(model)
print("Model name: {}".format(params.model_name))
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Number of trainable parameters: {}".format(n_params))
if not os.path.isfile(params.saved_model_file) or \
(os.path.isfile(params.saved_model_file) and params.continue_training):
print("Training")
try:
train_dv_seq2seq(params, model, train_loader, criterion_gen, criterion_cpy, model_opt,
completed_epochs=completed_epochs, best_eval_result=best_eval_result,
best_eval_epoch=best_eval_epoch, past_eval_results=past_eval_results,
eval_loader=valid_loader)
except KeyboardInterrupt:
print("training interrupted")
if len(test_loader) > 0:
fn = params.saved_models_dir + params.model_name + "_best.pt"
exclude_tokens = [SOS_TOKEN, EOS_TOKEN, PAD_TOKEN, "", " "]
if os.path.isfile(fn):
sd = mutil.model_load(fn)
completed_epochs = sd[CHKPT_COMPLETED_EPOCHS]
model.load_state_dict(sd[CHKPT_MODEL])
print("Loaded best model after {} epochs of training".format(completed_epochs))
with torch.no_grad():
model.eval()
write_line_to_file("input|pred|truth|", f_path=params.model_name + "_test_results.txt")
for batch in tqdm(test_loader, mininterval=2, desc="Test", leave=False, ascii=True):
beam_rvs = dv_seq2seq_beam_decode_batch(model, batch, params.sos_idx, tgt_vocab.i2w,
eos_idx=params.eos_idx,
len_norm=params.bs_len_norm, gamma=params.bs_div_gamma,
max_len=params.max_decoded_seq_len,
beam_width=params.beam_width_test)
for bi in range(batch[DK_BATCH_SIZE]):
msg_str = "".join(batch[DK_SRC_SEG_LISTS][bi])
truth_rsp_seg = [w for w in batch[DK_TGT_SEG_LISTS][bi] if w not in exclude_tokens]
truth_rsp_str = " ".join(truth_rsp_seg)
truth_rsp_str = re.sub(" +", " ", truth_rsp_str)
best_rv = [w for w in beam_rvs[bi][0][3] if w not in exclude_tokens] # word seg list
rsp = " ".join(best_rv)
write_line_to_file(msg_str + "|" + rsp + "|" + truth_rsp_str,
params.model_name + "_test_results.txt")
if __name__ == "__main__":
args = prepare_params()
main(args)
print("done")
| [
"xuefei1@ualberta.ca"
] | xuefei1@ualberta.ca |
637080b209cceba9f9dabed353282427b275870c | e42c3356609b94cf69ec58b977a33039884bf887 | /Qiskit/GHZ.py | ba9093e15fceeaedc2bc348430f784da446ff5cc | [] | no_license | arecibokck/Moonlight | dd146fcfad8dce3d501ab9dc41d8280898d9eb88 | 2e291c7fcee4d16d1578fe670de6de0504e4fcfe | refs/heads/master | 2020-12-25T17:38:14.472308 | 2020-12-19T10:38:58 | 2020-12-19T10:38:58 | 42,864,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | import qiskit as qk
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
from matplotlib import gridspec
# Define the Quantum and Classical Registers
q = qk.QuantumRegister(3)
c = qk.ClassicalRegister(3)
# Build the circuit
qcircuit = qk.QuantumCircuit(q, c)
qcircuit.h(q[0])
qcircuit.h(q[1])
qcircuit.x(q[2])
qcircuit.cx(q[1], q[2])
qcircuit.cx(q[0], q[2])
qcircuit.h(q[0])
qcircuit.h(q[1])
qcircuit.h(q[2])
qcircuit.measure(q, c)
# Execute the circuit
backend_sim = qk.BasicAer.get_backend('qasm_simulator')
shots_num = 1000
job = qk.execute(qcircuit,backend_sim,shots=shots_num)
result = job.result()
data = result.get_counts(qcircuit)
# Print the result
print(data)
#Draw and Save the circuit
diagram = qcircuit.draw(output = "mpl")
diagram.savefig('GHZState.png', dpi = 100)
fig = plt.figure(figsize = (15,5))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
#Show Circuit
a = fig.add_subplot(gs[0])
a.set_title('Quantum Circuit')
a.set_xticks([])
a.set_yticks([])
img = mpimg.imread('./GHZState.png')
imgplot = plt.imshow(img)
#Plot Histogram
a = fig.add_subplot(gs[1])
a.set_title('Simulation Result')
#plt.xlabel('States', fontsize=11)
plt.ylabel('Probability', fontsize=11)
dk = list(data.keys())
dv = list(data.values())
dv = [x / shots_num for x in dv]
index = np.arange(len(dk))
plt.xticks(index, dk, fontsize=11, rotation=30)
plt.bar(index, dv)
for i in range(len(dk)):
plt.text(x = index[i]-0.15 , y = dv[i]+0.005, s = str(dv[i]), size = 11)
plt.show()
fig.savefig('Sim_Result.png')
| [
"arecibokck@gmail.com"
] | arecibokck@gmail.com |
9263ed5ca2681646dd28b49829187f3ea45292f5 | d3d9dfb2e8221c24f1de3a102edafc0f312561f7 | /tests/obst/noguess/script | 19068f5a601f2d68443a454df6b60edb2f0f991a | [] | no_license | stebulus/gmwbot | b099e0ed5271ac17777f04d950c3266866a5db70 | 6bd880f37b9060424e08058c1e3adf7da9f3d780 | refs/heads/master | 2020-06-05T07:21:32.266566 | 2014-08-12T00:35:36 | 2014-08-12T00:38:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | #!/usr/bin/env python
from string import ascii_lowercase
import gmwbot
words = list(ascii_lowercase)
g = gmwbot.obstguesser(words, [1]*len(words), [0]*(len(words)+1))
print g('p', 'r')
try:
print g('p', 'q')
except gmwbot.NoGuessError, e:
print type(e), e
g = gmwbot.obstguesser_sjtbot2(words, [1]*len(words), [0]*(len(words)+1))
print g('p', 'r')
try:
print g('p', 'q')
except gmwbot.NoGuessError, e:
print type(e), e
| [
"steven@amotlpaa.org"
] | steven@amotlpaa.org | |
5b2bb6915ae85da79bb36109143b4c62c8f98224 | 0077f37ce48e2b15d7b16454f25fe984972ea1e9 | /alexSite/utils.py | fba2bbf3bffe178d114e2902c465df5106240eb9 | [] | no_license | alex6xu/alexSite | 7e39375cad28c31daffe894055b2df36d3602e8c | a68a8f5bbe6fb0d4492638997c5cfe02772ef42a | refs/heads/master | 2020-07-05T18:09:40.341479 | 2019-07-20T15:19:51 | 2019-07-20T15:19:51 | 66,235,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | from typing import (
Any,
Dict,
Mapping,
List,
Tuple,
Match,
Callable,
Type,
Sequence,
)
class ObjectDict(Dict[str, Any]):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value | [
"permike@163.com"
] | permike@163.com |
8a7206749b4cbf2bdd38e1d693156ba7dbd8f1cd | 5e12fa68b2c3342d51204903f50fd69708ed095f | /313E/Assignment1b.py | 2a2de1d3df074bfb4b413053bf228844fea13398 | [] | no_license | xlyds/python-misc | 657be4b380869e22c99172d6e5b86f815e561ab6 | e2f8dc7ab22f3d69e2ac186977b43c1e5cbc87b9 | refs/heads/master | 2021-11-10T17:09:51.670255 | 2014-01-11T19:11:48 | 2014-01-11T19:11:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | # Files: Assignment1a.py
#
# Description: Draws a house
#
# Student's Name: Zach Tidwell
#
# Student's UT EID: zt659
#
# Course Name: CS 313E
#
# Unique Number: 52295
#
# Date Created: 9/14/11
#
# Date Last Modified:9/14/11
###############################################
from turtle import *
import time
def Kochcurve(turtle, x):
# x means length
if x<3:
turtle.forward(x)
return
else:
Kochcurve(turtle, x/3.0)
turtle.left(60)
Kochcurve(turtle, x/3.0)
turtle.right(120)
Kochcurve(turtle, x/3.0)
turtle.left(60)
Kochcurve(turtle, x/3.0)
#creates the outer flake
def Kochflake1(turtle,x):
for i in range(3):
Kochcurve(turtle,x)
turtle.left(120)
#creates the center flake
def Kochflake2(turtle,x):
for i in range(3):
Kochcurve(turtle,x)
turtle.right(120)
ttl = Turtle()
ttl.speed(0)
ttl.up()
ttl.goto(-150,125)
ttl.down()
ttl.color('purple')
Kochflake2(ttl, 300)
ttl.up()
ttl.goto(-50,0)
ttl.down()
ttl.color('red')
Kochflake1(ttl,100)
ttl.hideturtle()
exitonclick() | [
"zftidwell@gmail.com"
] | zftidwell@gmail.com |
dd79bea0677916fb08bd396f3d4f6a716264d86c | 7152978f66223889e6285b7b05847b6388ad35d2 | /day-07/question-060.py | fbad141ae3ee9e41fb05e3401a2eb71601df53eb | [] | no_license | martinleeq/python-100 | 8a65acf3ad8544bfe0abcdbc5bfcc9ee5231c2c4 | 695e5d2fba1d6174767b381bda4a24b2776f2711 | refs/heads/master | 2023-02-05T04:28:32.703367 | 2020-12-28T03:53:17 | 2020-12-28T03:53:17 | 322,327,007 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | """
问题60
f(n)=f(n-1)+100, 当n >0
f(0)=0, n=0
当给定一个n时,计算f(n)的值.
"""
while True:
n = int(input('Input: '))
if n == 0:
print(0)
else:
fn, fn1 = 0, 0
for i in range(1, n + 1):
fn = fn1 + 100
fn1 = fn
print(fn)
| [
"martinleeq@hotmail.com"
] | martinleeq@hotmail.com |
a97af72b318fdcde986eef86fb95de738a1d1561 | cc4d19b685d77f9cdfb2a34492dd5ef84f0ab49e | /manage.py | 685fa7a26023ff3b0874b82bdbc0241d5683ec28 | [] | no_license | Zoli1212/videosharing | 1eb424525b97286650e771f1b5e13b515d716fb2 | 7e1b2504f0faeab953ac4a48379af464dba35f57 | refs/heads/master | 2023-01-03T01:17:47.226223 | 2020-10-30T17:10:40 | 2020-10-30T17:10:40 | 308,698,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'videosharing.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"43321960+Zoli1212@users.noreply.github.com"
] | 43321960+Zoli1212@users.noreply.github.com |
a1c8c70909a8fc1c7ae9cd1d3c59271506fb61df | cb73fe89463892c8c147c6995e220f5b1635fabb | /AtCoder Beginner Contest 174/q3.py | ac39b97c6a5853fd19f555ce923192f306771a27 | [] | no_license | Haraboo0814/AtCoder | 244f6fd17e8f6beee2d46fbfaea6a8e798878920 | 7ad794fd85e8d22d4e35087ed38f453da3c573ca | refs/heads/master | 2023-06-15T20:08:37.348078 | 2021-07-17T09:31:30 | 2021-07-17T09:31:30 | 254,162,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | import sys
from io import StringIO
import unittest
def resolve():
k = int(input())
x = 7 % k
for i in range(1, k + 1):
if x == 0:
print(i)
return
x = (x * 10 + 7) % k
print(-1)
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """101"""
output = """4"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2"""
output = """-1"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """999983"""
output = """999982"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
| [
"harada-kyohei-fj@ynu.jp"
] | harada-kyohei-fj@ynu.jp |
48664102dbf2fccbd738d7f41fde8645c5e6df09 | 8a90c218278329b824dbb1c9dedfa7549d04340a | /python/src/lib/python/pelix/ipopo/handlers/constants.py | ab0c376510101e4442007fb5af64f4d972c73ced | [
"Apache-2.0"
] | permissive | cohorte/cohorte-runtime | 8155084a24c6212d407efc19f7c786cda3938c9a | 686556cdde20beba77ae202de9969be46feed5e2 | refs/heads/master | 2021-10-24T02:35:04.294753 | 2019-03-21T13:08:41 | 2019-03-21T13:08:41 | 24,407,102 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,688 | py | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
iPOPO handlers constants and base classes
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.7
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 7)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
SERVICE_IPOPO_HANDLER_FACTORY = 'ipopo.handler.factory'
"""
iPOPO handler factory service specification. Those services should implement
the methods of HandlerFactory.
"""
PROP_HANDLER_ID = 'ipopo.handler.id'
""" Service property: the ID of the iPOPO handler factory """
# ------------------------------------------------------------------------------
KIND_PROPERTIES = 'properties'
"""
Represents the 'properties' kind of handler, which manipulates the component
to notify property changes.
"""
KIND_DEPENDENCY = 'dependency'
"""
Represents the 'dependency' kind of handler.
Those handlers must implement the following methods:
* get_bindings(): Retrieves the list of bound service references
* is_valid(): Returns True if the dependency is in a valid state
"""
KIND_SERVICE_PROVIDER = 'service_provider'
"""
Represents the 'service_provider' kind of handler.
Those handlers must implement the following method:
* get_service_reference(): Retrieves the reference of the provided service
(a ServiceReference object).
It should also implement the following ones:
* on_controller_changer(): Called when a component controller has been
modified. The publication of a service might be stopped if its controller is
set to False.
* on_property_change(): Called when a component property has been modified.
The provided service properties should be modified accordingly.
"""
# ------------------------------------------------------------------------------
class HandlerFactory(object):
"""
Handler factory abstract class
"""
def get_handlers(self, component_context, instance):
"""
Prepares handlers for the given component
:param component_context: The ComponentContext bean
:param instance: The component instance
:return: The list of handlers associated to the given component
"""
return None
# ------------------------------------------------------------------------------
class Handler(object):
"""
Basic handler abstract class
"""
def get_kinds(self):
"""
Returns the kinds of this handler
:return: A tuple of the kinds of this handler, or None
"""
return None
def manipulate(self, stored_instance, component_instance):
"""
Manipulates the associated component instance
"""
pass
def check_event(self, event):
"""
Tests if the given service event must be handled or ignored, based
on the state of the iPOPO service and on the content of the event.
:param event: A service event
:return: True if the event can be handled, False if it must be ignored
"""
return True
def is_valid(self):
"""
Checks this handler is valid. All handlers must be valid for a
component to be validated
:return: True if the handler is in a valid state
"""
return True
def on_controller_change(self, name, value):
"""
Notifies the change of state of the controller with the given name
:param name: The name of the controller
:param value: The new value of the controller
"""
pass
def on_property_change(self, name, old_value, new_value):
"""
Handles a property changed event
:param name: The changed property name
:param old_value: The previous property value
:param new_value: The new property value
"""
pass
def start(self):
"""
Starts the handler (listeners, ...). Called once, after the component
has been manipulated by all handlers.
"""
pass
def stop(self):
"""
Stops the handler. Called once, just after the component has been
killed
"""
pass
def clear(self):
"""
Called just after a component has been killed and all handlers have
been stopped. The handler should release all its resources here.
"""
pass
def pre_validate(self):
"""
Called just before a component is validated
"""
pass
def post_validate(self):
"""
Called just after a component has been validated
"""
pass
def pre_invalidate(self):
"""
Called just before a component is invalidated
"""
pass
def post_invalidate(self):
"""
Called just after a component has been invalidated
"""
pass
# ------------------------------------------------------------------------------
class ServiceProviderHandler(Handler):
"""
Service provider handler abstract class
"""
def get_service_reference(self):
"""
Returns the reference to the service provided by this handler
"""
return None
# ------------------------------------------------------------------------------
class DependencyHandler(Handler):
"""
Dependency handler abstract class
"""
def get_field(self):
"""
Returns the name of the field where to inject the dependency
"""
return None
def try_binding(self):
"""
Forces the handler to try to bind to existing services
"""
pass
def get_bindings(self):
"""
Retrieves the list of the references to the bound services
:return: A list of ServiceReferences objects
"""
return None
def get_value(self):
"""
Returns the value to inject
"""
return None
| [
"dbassem@gmail.com"
] | dbassem@gmail.com |
06aedfb3216e2eaac1c2235aa66ef85e9bef765f | f9318e10a48e9e87f7c1537c8f2e647dda677e4c | /octopus/modules/account/account.py | f4cf289eb6fd78bae031846b8ce19af629b05fda | [
"Apache-2.0"
] | permissive | JiscPER/magnificent-octopus | 250579020ead7a8d8424e96a8c3e811b12cc4d90 | e1be9a5f779a38122c5ad9d1d5365bb9a1dc2cf1 | refs/heads/master | 2023-07-20T00:27:26.018800 | 2016-01-22T16:07:24 | 2016-01-22T16:07:24 | 46,710,009 | 0 | 1 | NOASSERTION | 2023-07-10T11:22:43 | 2015-11-23T09:29:26 | Python | UTF-8 | Python | false | false | 10,571 | py | import uuid, json
from flask import Blueprint, request, url_for, flash, redirect, make_response
from flask import render_template, abort
from flask.ext.login import login_user, logout_user, current_user, login_required
from octopus.core import app
from octopus.lib.webapp import ssl_required, request_wants_json, flash_with_url, is_safe_url
from octopus.lib import mail
from octopus.modules.account.factory import AccountFactory
from octopus.modules.account import exceptions
blueprint = Blueprint('account', __name__)
@app.login_manager.user_loader
def load_account_for_login_manager(userid):
from octopus.modules.account.factory import AccountFactory
acc = AccountFactory.get_model().pull(userid)
return acc
def get_redirect_target(form=None):
form_target = ''
if form and hasattr(form, 'next') and getattr(form, 'next'):
form_target = form.next.data
for target in form_target, request.args.get('next', []):
if not target:
continue
if target == is_safe_url(target):
return target
return url_for(app.config.get("ACCOUNT_LOGIN_REDIRECT_ROUTE", "index"))
def _do_login(user):
return login_user(user, remember=True)
def _do_logout():
logout_user()
@blueprint.route('/login', methods=['GET', 'POST'])
@ssl_required
def login():
# current_info = {'next': request.args.get('next', '')}
fc = AccountFactory.get_login_formcontext(request.form)
if request.method == 'POST':
if fc.validate():
password = fc.form.password.data
email = fc.form.email.data
Account = AccountFactory.get_model()
try:
user = Account.pull_by_email(email)
except exceptions.NonUniqueAccountException:
flash("Permanent Error: unable to log you in with these credentials - please contact an administrator", "error")
return fc.render_template()
if user is not None:
if not user.can_log_in():
flash('Invalid credentials', 'error')
return fc.render_template()
if user.check_password(password):
inlog = _do_login(user)
if not inlog:
flash("Problem logging in", "error")
return fc.render_template()
else:
flash('Welcome back.', 'success')
return redirect(get_redirect_target(form=fc.form))
else:
flash('Incorrect username/password', 'error')
return fc.render_template()
else:
flash('Incorrect username/password', 'error')
return fc.render_template()
else:
flash('Invalid credentials', 'error')
return fc.render_template()
@blueprint.route('/logout')
@ssl_required
def logout():
_do_logout()
flash('You are now logged out', 'success')
return redirect(url_for(app.config.get("ACCOUNT_LOGOUT_REDIRECT_ROUTE", "index")))
@blueprint.route('/<username>', methods=['GET', 'POST', 'DELETE'])
@login_required
@ssl_required
def username(username):
Account = AccountFactory.get_model()
acc = Account.pull(username)
if acc is None:
try:
acc = Account.pull_by_email(username)
except exceptions.NonUniqueAccountException:
flash("Permanent Error: these user credentials are invalid - please contact an administrator", "error")
return redirect(url_for(("logut")))
if acc is None:
abort(404)
# actions on this page are only availble to the actual user, or a user with the edit-users role
if current_user.id != acc.id or not current_user.has_role(app.config.get("ACCOUNT_EDIT_USERS_ROLE")):
abort(401)
# if this is a request for the user page, just render it
if request.method == "GET":
fc = AccountFactory.get_user_formcontext(acc)
return fc.render_template()
is_delete = request.method == "DELETE" or (request.method == "POST" and request.values.get("submit", False) == "Delete")
if is_delete:
# validate the delete
if not current_user.check_password(request.values.get("password")):
flash("Incorrect password", "error")
fc = AccountFactory.get_user_formcontext(acc=acc)
return fc.render_template()
# if the password validates, go ahead and do it
acc.remove() # Note we don't use the DAO's delete method - this allows the model to decide the delete behaviour
_do_logout()
flash('Account {x} deleted'.format(x=username), "success")
return redirect(url_for(app.config.get("ACCOUNT_LOGOUT_REDIRECT_ROUTE", "index")))
if request.method == "POST":
fc = AccountFactory.get_user_formcontext(acc=acc, form_data=request.form)
# attempt to validate the form
if not fc.validate():
flash("There was a problem when submitting the form", "error")
return fc.render_template()
# if the form validates, then check the legality of the submission
try:
fc.legal()
except exceptions.AccountException as e:
flash(e.message, "error")
return fc.render_template()
# if we get to here, then update the user record
fc.finalise()
# tell the user that everything is good
flash("Account updated", "success")
# end with a redirect because some details have changed
return redirect(url_for("account.username", username=fc.target.email))
@blueprint.route('/forgot', methods=['GET', 'POST'])
@ssl_required
def forgot():
if request.method == "GET":
fc = AccountFactory.get_forgot_formcontext()
return fc.render_template()
if request.method == 'POST':
fc = AccountFactory.get_forgot_formcontext(form_data=request.form)
# attempt to validate the form
if not fc.validate():
flash("There was a problem when submitting the form", "error")
return fc.render_template()
# call finalise on the context, to trigger the reset process
try:
fc.finalise()
except exceptions.NonUniqueAccountException:
flash("Permanent Error: cannot reset password for this account - please contact an administrator", "error")
return fc.render_template()
except exceptions.AccountNotFoundException:
flash('Your account email address is not recognised.', 'error')
return fc.render_template()
except exceptions.AccountException:
flash("Unable to reset the password for this account", "error")
return fc.render_template()
# if we get to here, reset was successful, so we should redirect the user
return redirect(url_for(app.config.get("ACCOUNT_FORGOT_REDIRECT_ROUTE", "account.forgot_pending")))
@blueprint.route("/forgot-pending", methods=["GET"])
@ssl_required
def forgot_pending():
return render_template("account/forgot_pending.html")
@blueprint.route("/reset/<reset_token>", methods=["GET", "POST"])
@ssl_required
def reset(reset_token):
Account = AccountFactory.get_model()
acc = Account.get_by_reset_token(reset_token)
if acc is None:
abort(404)
if not acc.can_log_in():
abort(404)
if request.method == "GET":
fc = AccountFactory.get_reset_formcontext(acc)
return fc.render_template()
elif request.method == "POST":
fc = AccountFactory.get_reset_formcontext(acc, request.form)
if not fc.validate():
flash("There was a problem with your form", "error")
return fc.render_template()
# if the form is good, finalise the user's password change
fc.finalise()
# log the user in
_do_login(acc)
flash("Password has been reset and you have been logged in", "success")
return redirect(url_for(app.config.get("ACCOUNT_LOGIN_REDIRECT_ROUTE", "index")))
@blueprint.route('/')
@login_required
@ssl_required
def index():
if not app.config.get("ACCOUNT_LIST_USERS", False):
abort(404)
if not current_user.has_role(app.config.get("ACCOUNT_LIST_USERS_ROLE", "list_users")):
abort(401)
return render_template('account/users.html')
@blueprint.route('/register', methods=['GET', 'POST'])
@ssl_required
def register():
# access to registration may not be for the public
if current_user.is_anonymous() and not app.config.get("ACCOUNT_ALLOW_REGISTER", False):
abort(404)
if request.method == "GET":
fc = AccountFactory.get_register_formcontext()
return fc.render_template()
elif request.method == "POST":
fc = AccountFactory.get_register_formcontext(request.form)
if not fc.validate():
flash("There was a problem with your form", "error")
return fc.render_template()
# if the form validates, then check the legality of the submission
try:
fc.legal()
except exceptions.AccountException as e:
flash(e.message, "error")
return fc.render_template()
# if we get to here, then create the user record
fc.finalise()
# tell the user that everything is good
flash("Account created - activation token sent", "success")
# redirect to the appropriate next page
return redirect(url_for(app.config.get("ACCOUNT_REGISTER_REDIECT_ROUTE")))
@blueprint.route("/activate/<activation_token>", methods=["GET", "POST"])
@ssl_required
def activate(activation_token):
account = AccountFactory.get_model().get_by_activation_token(activation_token)
if account is None:
abort(404)
if not account.can_log_in():
abort(404)
if request.method == "GET":
fc = AccountFactory.get_activate_formcontext(account)
return fc.render_template()
elif request.method == "POST":
fc = AccountFactory.get_activate_formcontext(account, request.form)
if not fc.validate():
flash("There was a problem with your form", "error")
return fc.render_template()
# if the form is good, finalise the user's password change
fc.finalise()
# log the user in
_do_login(account)
flash("Your account has been activated and you have been logged in", "success")
return redirect(url_for(app.config.get("ACCOUNT_LOGIN_REDIRECT_ROUTE", "index")))
| [
"richard@cottagelabs.com"
] | richard@cottagelabs.com |
c3ff731e7c2a53423e89b6edc552126dacdd17d8 | 826f97c4c2d94503ff156a5fc33b60ef981509d8 | /oj/settings.py | 46335f3c2ad1048c138dbe3713c2685e0e73ece1 | [
"MIT"
] | permissive | geek-Xie/OnlineJudge | ee0efb2b4b290cbfccf26de1177073cde7750995 | 5ed43489d39c4feea1ec688d36a557352e388e5f | refs/heads/master | 2023-04-09T20:32:21.058744 | 2021-04-21T15:04:41 | 2021-04-21T15:04:41 | 357,925,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,544 | py | """
Django settings for oj project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import raven
from copy import deepcopy
from utils.shortcuts import get_env
production_env = get_env("OJ_ENV", "dev") == "production"
if production_env:
from .production_settings import *
else:
from .dev_settings import *
with open(os.path.join(DATA_DIR, "config", "secret.key"), "r") as f:
SECRET_KEY = f.read()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Applications
VENDOR_APPS = [
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_dramatiq',
'django_dbconn_retry',
]
if production_env:
VENDOR_APPS.append('raven.contrib.django.raven_compat')
LOCAL_APPS = [
'account',
'announcement',
'conf',
'problem',
'contest',
'utils',
'submission',
'options',
'judge',
]
INSTALLED_APPS = VENDOR_APPS + LOCAL_APPS
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'account.middleware.APITokenAuthMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'account.middleware.AdminRoleRequiredMiddleware',
'account.middleware.SessionRecordMiddleware',
# 'account.middleware.LogSqlMiddleware',
)
ROOT_URLCONF = 'oj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [],
'DIRS': ['/home/ojadmin/OJSource/OnlineJudgeFE/dist'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'oj.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'account.User'
TEST_CASE_DIR = os.path.join(DATA_DIR, "test_case")
LOG_PATH = os.path.join(DATA_DIR, "log")
AVATAR_URI_PREFIX = "/public/avatar"
AVATAR_UPLOAD_DIR = f"{DATA_DIR}{AVATAR_URI_PREFIX}"
UPLOAD_PREFIX = "/public/upload"
UPLOAD_DIR = f"{DATA_DIR}{UPLOAD_PREFIX}"
# STATICFILES_DIRS = [os.path.join(DATA_DIR, "public")]
STATICFILES_DIRS = [os.path.join(DATA_DIR, "/home/ojadmin/OJSource/OnlineJudgeFE/dist/static")]
LOGGING_HANDLERS = ['console', 'sentry'] if production_env else ['console']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] - [%(levelname)s] - [%(name)s:%(lineno)d] - %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
'formatter': 'standard'
}
},
'loggers': {
'django.request': {
'handlers': LOGGING_HANDLERS,
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'handlers': LOGGING_HANDLERS,
'level': 'ERROR',
'propagate': True,
},
'dramatiq': {
'handlers': LOGGING_HANDLERS,
'level': 'DEBUG',
'propagate': False,
},
'': {
# 'handlers': LOGGING_HANDLERS,
'handlers': ['console'],
'level': 'WARNING',
'propagate': True,
}
},
}
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
REDIS_URL = "redis://%s:%s" % (REDIS_CONF["host"], REDIS_CONF["port"])
def redis_config(db):
def make_key(key, key_prefix, version):
return key
return {
"BACKEND": "utils.cache.MyRedisCache",
"LOCATION": f"{REDIS_URL}/{db}",
"TIMEOUT": None,
"KEY_PREFIX": "",
"KEY_FUNCTION": make_key
}
CACHES = {
"default": redis_config(db=1)
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
DRAMATIQ_BROKER = {
"BROKER": "dramatiq.brokers.redis.RedisBroker",
"OPTIONS": {
"url": f"{REDIS_URL}/4",
},
"MIDDLEWARE": [
# "dramatiq.middleware.Prometheus",
"dramatiq.middleware.AgeLimit",
"dramatiq.middleware.TimeLimit",
"dramatiq.middleware.Callbacks",
"dramatiq.middleware.Retries",
# "django_dramatiq.middleware.AdminMiddleware",
"django_dramatiq.middleware.DbConnectionsMiddleware"
]
}
DRAMATIQ_RESULT_BACKEND = {
"BACKEND": "dramatiq.results.backends.redis.RedisBackend",
"BACKEND_OPTIONS": {
"url": f"{REDIS_URL}/4",
},
"MIDDLEWARE_OPTIONS": {
"result_ttl": None
}
}
RAVEN_CONFIG = {
'dsn': 'https://b200023b8aed4d708fb593c5e0a6ad3d:1fddaba168f84fcf97e0d549faaeaff0@sentry.io/263057'
}
IP_HEADER = "HTTP_X_REAL_IP"
ALLOWED_HOSTS=['*'] | [
"504428538@qq.com"
] | 504428538@qq.com |
85467f0e87ac145734cbd7cf82630fdc79c4cc99 | ec41aaf927a8c9e2f69ac6a2c5111beb447a71cf | /log_analysis.py | 54cb355c10891efd5b2a9087822d3c8c09e3923b | [
"MIT"
] | permissive | rr2674/log_analysis | 6450f13ec19a41ad58f379174ea7c293d2dbb3d4 | e17428bdf2c56414b4e31bc53efa785fa856550c | refs/heads/master | 2020-04-21T15:28:42.571933 | 2019-02-10T01:20:04 | 2019-02-10T01:20:04 | 169,670,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | #!/usr/bin/env python3
import psycopg2
def do_query(db, query):
cur = db.cursor()
cur.execute(query)
rows = cur.fetchall()
cur.close()
return rows
def most_popular_three_articles(db):
"""
Q: Which articles have been accessed the most?
This function will display a sorted list with the most popular articles
at the top.
"""
query = r"""
select title, count(*) as views
from articles, log
where slug = substring(path,'\/article\/(.*)')
group by title
order by views desc
limit 3;
"""
print('\nWhich articles have been accessed the most?\n')
for row in do_query(db, query):
print('\t"{}" -- {:,} views'.format(row[0], row[1]))
def most_popular_article_authors(db):
"""
Who are the most popular article authors of all time?
This function will display a sorted list with the most popular author
at the top.
"""
query = r"""
select name, count(*) as views
from authors, articles, log
where articles.slug = substring(log.path,'\/article\/(.*)')
and authors.id = articles.author
group by name
order by views desc;
"""
print('\nWho are the most popular article authors of all time?\n')
for row in do_query(db, query):
print('\t{} -- {:,} views'.format(row[0], row[1]))
def daily_error_gt_1pct(db):
"""
On which days did more than 1% of requests lead to errors?
"""
query = """
select day, round(error_pct,2) as error_pct
from ( select day,
( ( sum(occurance) filter(where status != '200 OK')
/ sum(occurance) ) * 100 ) as error_pct
from ( select to_char(time, 'Month DD, YYYY') as day,
status,
count(*) as occurance
from log
group by day, status
order by day, occurance desc ) as subq1
group by day ) as subq
where subq.error_pct > 1;
"""
print('\nOn which days did more than 1% of requests lead to errors?\n')
for row in do_query(db, query):
print('\t{} -- {}% errors'.format(row[0], row[1]))
if __name__ == '__main__':
db = psycopg2.connect("dbname=news")
most_popular_three_articles(db)
most_popular_article_authors(db)
daily_error_gt_1pct(db)
db.close()
| [
"rr2674@att.com"
] | rr2674@att.com |
11bb0dda8e1159f8ca4f83f7f6caee2f1410bc1f | d56885aa49e6541932b1dba223f3a6aa1601ea5d | /TorrentDriveAdjuster.py | ec43eabb62f09c911199eee1d2d4facbe82d7d33 | [
"MIT"
] | permissive | stevin05/SonarrDiskMultiplexer | 6e11cea91e9da7b55318c11b85b93cbdf3993a34 | 21a74883aef176ab4275644c75ca5be4f1faec9a | refs/heads/master | 2022-04-21T17:08:15.617696 | 2020-04-18T01:45:37 | 2020-04-18T01:45:37 | 256,647,795 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,832 | py | import os
import sys
from pathlib import Path
import qbittorrentapi
import logging
import requests
# Prerequisites:
# 1. Install python
# 2. Make sure python is in the system path (a check box on the installation)
# 3. Install pip so you can grab the required packages
# 4. pip install requests
# 5. pip install qbittorrentapi
# 6. qbittorrent 'Downloads' settings:
# a. Default Torrent Management Mode: Manual
# b. Keep incomplete torrents in: <UNCHECKED>
# c. Default Save Path -> One of the Downloads directories from step 7
# 7. Same directory structure on every drive. Something like:
# X:/Downloads <Set one of these as the default in qbittorrent>
# X:/TV Shows
# Y:/Downloads
# Y:/TV Shows
# Z:/Downloads
# Z:/TV Shows
# 8. Sonarr -> Settings -> Connect -> Custom Script -> Path to this script. Check 'On Grab'
# 9. Set environment variables 'SONARR_API_KEY', 'QBT_USER', 'QBT_PORT', and 'QBT_PASSWORD' to match your API key
# in RADARR and web UI settings in qbittorrent. Or just set them into the variables below.
# Sonarr will find shows, start downloading them into qbittorrent's default directory and
# then call this script. This script will pause the torrent, move it to the same drive that
# the TV show would have eventually been copied to, and then resume it. When the download is
# complete, it's already on the correct drive and can now be hard linked instead of copied.
sonarr_api_key = os.environ['SONARR_API_KEY']
qbt_user = os.environ['QBT_USER']
qbt_password = os.environ['QBT_PASSWORD']
qbt_port = os.environ['QBT_PORT']
log_file = 'C:\V2\Scripts\TorrentDriveAdjuster.log'
# Some helper logging to figure out what our script parameters are
logging.basicConfig(
filename=log_file,
level=logging.WARNING,
format='%(asctime)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
for env, value in os.environ.items():
if env.startswith('SONARR'):
logging.debug(f'[{env}: {value}]')
event_type = os.environ['SONARR_EVENTTYPE']
if event_type != 'Grab':
exit(0)
# An example of the environment variables set for downloading a season of IT Crowd
#4/15/2020 8:24:20 PM - Standard - [SONARR_DOWNLOAD_CLIENT: qBittorrent]
#4/15/2020 8:24:20 PM - Standard - [SONARR_DOWNLOAD_ID: 8DE2A43452F6AAFF0C08B33AF0EF78AE05CD9591]
#4/15/2020 8:24:20 PM - Standard - [SONARR_EVENTTYPE: Grab]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_ABSOLUTEEPISODENUMBERS: 1,2,3,4,5,6]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_EPISODEAIRDATES: 2006-02-03,2006-02-03,2006-02-10,2006-02-17,2006-02-24,2006-03-03]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_EPISODEAIRDATESUTC: 2/3/2006 12:00:00 AM,2/3/2006 12:25:00 AM,2/10/2006 12:00:00 AM,2/17/2006 12:00:00 AM,2/24/2006 12:00:00 AM,3/3/2006 12:00:00 AM]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_EPISODECOUNT: 6]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_EPISODENUMBERS: 1,2,3,4,5,6]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_EPISODETITLES: Yesterday's Jam|Calamity Jen|Fifty-Fifty|The Red Door|The Haunting of Bill Crouse|Aunt Irma Visits]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_INDEXER: BroadcastheNet]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_QUALITY: DVD]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_QUALITYVERSION: 1]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_RELEASEGROUP: iNGOT]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_SEASONNUMBER: 1]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_SIZE: 1464000512]
#4/15/2020 8:24:20 PM - Standard - [SONARR_RELEASE_TITLE: The.IT.Crowd.S01.DVDRip.Xvid-iNGOT]
#4/15/2020 8:24:20 PM - Standard - [SONARR_SERIES_ID: 6]
#4/15/2020 8:24:20 PM - Standard - [SONARR_SERIES_IMDBID: tt0487831]
#4/15/2020 8:24:20 PM - Standard - [SONARR_SERIES_TITLE: The IT Crowd]
#4/15/2020 8:24:20 PM - Standard - [SONARR_SERIES_TVDBID: 79216]
#4/15/2020 8:24:20 PM - Standard - [SONARR_SERIES_TVMAZEID: 539]
#4/15/2020 8:24:20 PM - Standard - [SONARR_SERIES_TYPE: Standard]
# Start by making sure we can talk to qbittorrent
qbt_client = qbittorrentapi.Client(host=f'localhost:{qbt_port}', username=qbt_user, password=qbt_password)
try:
qbt_client.auth_log_in()
except qbittorrentapi.LoginFailed as e:
print(e)
# Grab the torrent object from qbittorrent so we can operate on it
torrents = qbt_client.torrents_info(hashes=[os.environ['SONARR_DOWNLOAD_ID'].lower()],limit=1)
# Make sure we found something
if len(torrents) != 1:
msg = f"Couldn't relocate torrent for: {os.environ['SONARR_RELEASE_TITLE']}"
print(msg)
logging.error(msg)
exit(1)
torrent = torrents[0]
# Pause the torrent. We hopefully catch this so early that the file is essentially empty.
try:
torrent.pause()
except qbittorrentapi.APIError as e:
msg = str(e)
print(msg)
logging.error(msg)
# Grab the full series info from sonarr. We weren't told where the show should go in our parameters.
series_id = os.environ['SONARR_SERIES_ID']
sonarr_series = requests.get(
f'http://localhost:8989/api/series/{series_id}',
headers={'X-Api-Key': sonarr_api_key},
)
new_location = None
old_location = None
if sonarr_series.status_code == 200:
drive = Path(sonarr_series.json()['path']).parts[0]
old_location = Path(torrent.save_path)
old_parts = list(old_location.parts)
old_parts[0] = drive
new_location = Path(*old_parts)
else:
msg = f'Failed to relocate: {os.environ["SONARR_RELEASE_TITLE"]}'
print(msg)
logging.error(msg)
exit(1)
try:
if new_location is not None:
print(f'Moving {os.environ["SONARR_RELEASE_TITLE"]} from {old_location} to {new_location}')
torrent.set_location(location=new_location)
torrent.resume()
except qbittorrentapi.APIError as e:
msg = str(e)
print(msg)
logging.error(msg)
| [
"63887600+stevin05@users.noreply.github.com"
] | 63887600+stevin05@users.noreply.github.com |
f480fc8f2b9e68eea63991f2c1e899917ba3f613 | c5148bc364dac753c0872bd5676027a30b260486 | /build/lib/biosteam/units/decorators/_design.py | bd20530d6c47562af76bc4bada9d063ee6920593 | [
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ecoent/biosteam | 86f47c713a2cae5d6261b6c2c7734ccf7a90fb4e | f1371386d089df3aa8ce041175f210c0318c1fe0 | refs/heads/master | 2021-02-24T14:10:23.158984 | 2020-03-05T03:43:17 | 2020-03-05T03:43:17 | 245,433,768 | 1 | 0 | NOASSERTION | 2020-03-06T13:59:27 | 2020-03-06T13:59:26 | null | UTF-8 | Python | false | false | 4,647 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 6 17:19:41 2019
@author: yoelr
"""
__all__ = ('design', 'add_design')
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 21:18:50 2019
@author: yoelr
"""
from thermosteam.base import stream_units_of_measure
__all__ = ('design',)
# %% Design Center class
def _design(self):
D = self.design_results
U = self._units
for i, j in self._design_basis_: D[i] = j(self, U[i])
class DesignCenter:
"""Create a DesignCenter object that manages all design basis functions. When called, it returns a Unit class decorator that adds a design item to the given Unit class."""
__slots__ = ('design_basis_functions',)
def __init__(self):
self.design_basis_functions = {}
def define(self, design_basis):
"""Define a new design basis.
Parameters
----------
design_basis : function
Should accept the unit_object and the units_of_measure and return design basis value.
.. Note::
Design basis is registered with the name of the design basis function.
"""
name = design_basis.__name__.replace('_', ' ').capitalize()
functions = self.design_basis_functions
if name in functions: raise ValueError(f"design basis '{name}' already implemented")
functions[name] = design_basis
return design_basis
def __call__(self, name, units, fsize=None):
"""Return a Unit class decorator that adds a size/design requirement to the class.
Parameters
----------
name : str
Name of design item.
units : str
Units of measure of design item.
fsize : function
Should return design item given the Unit object. If None, defaults to function predefined for given name and units.
"""
return lambda cls: self._add_design2cls(cls, name, units, fsize)
def _add_design2cls(self, cls, name, units, fsize):
"""Add size/design requirement to class.
Parameters
----------
cls : Unit class.
name : str
Name of design item.
units : str
Units of measure of design item.
fsize : function
Should return design item given the Unit object. If None, defaults to function predefined for given name and units.
Examples
--------
:doc:`Unit decorators`
"""
f = fsize or self.design_basis_functions[name.capitalize()]
# Make sure new _units dictionary is defined
if not cls._units:
cls._units = {}
elif '_units' not in cls.__dict__:
cls._units = cls._units.copy()
# Make sure design basis is not defined
if name in cls._units:
raise RuntimeError(f"design basis '{name}' already defined in class")
else:
cls._units[name] = units
# Add design basis
if cls._design is _design:
cls._design_basis_.append((name, f))
elif '_design' in cls.__dict__:
raise RuntimeError("'_design' method already implemented")
else:
cls._design_basis_ = [(name, f)]
cls._design = _design
return cls
def __contains__(self, basis):
return basis in self.__dict__
def __iter__(self):
yield from self.__dict__
def __repr__(self):
return f"<{type(self).__name__}: {', '.join(self)}>"
# %% Design factories
design = DesignCenter() #: Used to decorate classes with new design item
@design.define
def flow_rate(self, units):
if self._N_ins == 1:
return self._ins[0].get_total_flow(units)
elif self._N_outs == 1:
return self._outs[0].get_total_flow(units)
elif self._N_ins < self._N_outs:
return sum([i.get_total_flow(units) for i in self._ins])
else:
return sum([i.get_total_flow(units) for i in self._outs])
H_units = stream_units_of_measure['H']
@design.define
def duty(self, units):
self._duty = duty = self.H_out - self.H_in
self.heat_utilities[0](duty, self.ins[0].T, self.outs[0].T)
return H_units.conversion_factor(units) * duty
@design.define
def dry_flow_rate(self, units):
ins = self._ins
flow_in = sum([i.get_total_flow(units) for i in ins])
moisture = sum([i.get_flow(units, IDs='7732-18-5') for i in ins])
return flow_in - moisture
del flow_rate, duty, dry_flow_rate
| [
"yoelcortes@gmail.com"
] | yoelcortes@gmail.com |
d29c454867dbde6d3ef0ab9681683aa9fb155672 | cc2f58b01309b585e8ade99bc430d6f859f0e42d | /resources/independence_write_pyc.py | 2330cdda2dc931f81e5bfd6800fd6a6933f50636 | [] | no_license | orangepips/coding-the-matrix | 4e758f3047432054a0763639bef418690dea580e | 43ed47bf1febfcbc69fbdbcf9b4d2234512e4b1a | refs/heads/master | 2021-09-04T04:48:29.146868 | 2018-01-16T01:27:13 | 2018-01-16T01:27:13 | 109,063,250 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,331 | py | def main():
# del globals()['_get_module']
import imp
import base64
import marshal
magic = imp.get_magic()
if magic == b'\xee\x0c\r\n': # Python 3.4
pycData = b'7gwNCj16/FTtDQAA4wAAAAAAAAAAAAAAAAMAAABAAAAAc4YAAABkAABkAQBsAABaAABHZAIAZAMA\nhAAAZAMAgwIAWgEAZAQAZAUAZAYAhAEAWgIAZAcAZAgAhAAAWgMAZAkAZAoAhAAAWgQAZAsAZAwA\nhAAAWgUAZA0AZA4AhAAAWgYAZAAAZQUAXwcAZA8AZQUAXwgAZAAAZQYAXwcAZA8AZQYAXwgAZAEA\nUykQ6QAAAABOYwAAAAAAAAAAAAAAAAQAAABAAAAAc3kAAABlAABaAQBkAABaAgBlAwCDAABpAABk\nAQBkAgCEAgBaBABkAwBkBACEAABaBQBkBQBkBgCEAABaBgBkBwBkCACEAABaBwBkCQBkCgCEAABa\nCABkCwBkDACEAABaCQBkDQBkDgCEAABaCgBkDwBkEACEAABaCwBkEQBTKRLaBF9WZWNjAwAAAAAA\nAAADAAAAAgAAAEMAAABzFgAAAHwBAHwAAF8AAHwCAHwAAF8BAGQAAFMpAU4pAtoBRNoBZikD2gRz\nZWxmWgZsYWJlbHNaCGZ1bmN0aW9uqQByBgAAAPokLi4vcmVzb3VyY2VzL3ByaXZhdGUvaW5kZXBl\nbmRlbmNlLnB52ghfX2luaXRfXwUAAABzBAAAAAABCQF6DV9WZWMuX19pbml0X19jAgAAAAAAAAAC\nAAAAAgAAAEMAAABzHgAAAHwBAHwAAGoAAGsGAHIaAHwAAGoAAHwBABlTZAEAUykCTnIBAAAAKQFy\nBAAAACkC2gF22gFrcgYAAAByBgAAAHIHAAAA2gtfX2dldGl0ZW1fXwkAAABzAAAAAHoQX1ZlYy5f\nX2dldGl0ZW1fX2MDAAAAAAAAAAMAAAAEAAAAQwAAAHMVAAAAdAAABHwAAGoBAHwBADx9AgBkAABT\nKQFOKQJaCF9zZXRpdGVtcgQAAAApA3IJAAAAcgoAAADaA3ZhbHIGAAAAcgYAAAByBwAAANoLX19z\nZXRpdGVtX18KAAAAcwAAAAB6EF9WZWMuX19zZXRpdGVtX19jAQAAAAAAAAABAAAAAgAAAEMAAABz\nCAAAAGQCAHwAABRTKQNO6QEAAADp/////3IGAAAAKQFyCQAAAHIGAAAAcgYAAAByBwAAANoHX19u\nZWdfXwsAAABzAAAAAHoMX1ZlYy5fX25lZ19fYwIAAAAAAAAAAgAAAAUAAAADAAAAcywAAAB0AAB8\nAABqAQCHAABmAQBkAQBkAgCGAAB8AABqAgBqAwCDAABEgwEAgwIAUykDTmMBAAAAAAAAAAMAAAAE\nAAAAEwAAAHMjAAAAaQAAfAAAXRkAXAIAfQEAfQIAiAAAfAIAFHwBAJMCAHEGAFNyBgAAAHIGAAAA\nKQPaAi4wcgoAAADaAXgpAdoFYWxwaGFyBgAAAHIHAAAA+go8ZGljdGNvbXA+DAAAAHMCAAAACQB6\nIV9WZWMuX19ybXVsX18uPGxvY2Fscz4uPGRpY3Rjb21wPikEcgIAAAByAwAAAHIEAAAA2gVpdGVt\ncykCcgkAAAByEwAAAHIGAAAAKQFyEwAAAHIHAAAA2ghfX3JtdWxfXwwAAABzAAAAAHoNX1ZlYy5f\nX3JtdWxfX2MCAAAAAAAAAAIAAAAEAAAAAwAAAHNAAAAAdAAAiAAAdAEAgwIAcjgAdAIAhwAAhwEA\nZgIAZAEAZAIAhgAAiAEAagMAagQAgwAARIMBAIMBAFN0BQBTZAAAUykDTmMBAAAAAAAAAAIAAAAF\nAAAAEwAAAHMiAAAAZwAAfAAAXRgAfQEAiAEAfAEAGYgAAHwBABkUkQIAcQYAU3IGAAAAcgYAAAAp\nAnIRAAAAcgoAAAApAtoFb3RoZXJyBQAAAHIGAAAAcgcAAAD6CjxsaXN0Y29tcD4RAAAAcwIAAAAJ\nAHogX1ZlYy5fX211bF9fLjxsb2NhbHM+LjxsaXN0Y29tcD4pBtoKaXNpbnN0YW5jZXICAAAA2gNz\ndW1yBAAAANoEa2V5c9oOTm90SW1wbGVtZW50ZWQpAnIFAAAAchcAAAByBgAAACkCchcAAAByBQAA\nAHIHAAAA2gdfX211bF9fDgAAAHMGAAAAAAIPASkCegxfVmVjLl9fbXVsX19jAgAAAAAAAAACAAAA\nBQAAAAMAAABzRwAAAHQAAIgAAGoBAIcAAIcBAGYCAGQBAGQCAIYAAHQCAIgAAGoDAGoEAIMAAIMB\nAGoFAIgBAGoDAGoEAIMAAIMBAESDAQCDAgBTKQNOYwEAAAAAAAAAAgAAAAUAAAATAAAAcyUAAABp\nAAB8AABdGwB9AQCIAAB8AQAZiAEAfAEAGRd8AQCTAgBxBgBTcgYAAAByBgAAACkCchEAAAByCgAA\nACkC2gF1cgkAAAByBgAAAHIHAAAAchQAAAAVAAAAcwIAAAAJAHogX1ZlYy5fX2FkZF9fLjxsb2Nh\nbHM+LjxkaWN0Y29tcD4pBnICAAAAcgMAAADaA3NldHIEAAAAchsAAADaBXVuaW9uKQJyHgAAAHIJ\nAAAAcgYAAAApAnIeAAAAcgkAAAByBwAAANoHX19hZGRfXxUAAABzAAAAAHoMX1ZlYy5fX2FkZF9f\nYwIAAAAAAAAAAgAAAAIAAABDAAAAcwkAAAB8AAB8AQALF1MpAXo0UmV0dXJucyBhIHZlY3RvciB3\naGljaCBpcyB0aGUgZGlmZmVyZW5jZSBvZiBhIGFuZCBiLnIGAAAAKQLaAWHaAWJyBgAAAHIGAAAA\ncgcAAADaB19fc3ViX18XAAAAcwIAAAAAAnoMX1ZlYy5fX3N1Yl9fTikM2ghfX25hbWVfX9oKX19t\nb2R1bGVfX9oMX19xdWFsbmFtZV9fch8AAAByCAAAAHILAAAAcg0AAAByEAAAAHIWAAAAch0AAABy\nIQAAAHIkAAAAcgYAAAByBgAAAHIGAAAAcgcAAAByAgAAAAQAAABzEAAAAAwBFQQMAQwBDAEMAgwH\nDAJyAgAAAGebK6GGm4QGPWMCAAAAAAAAAAUAAAAGAAAAQwAAAHNsAAAAZwAAfQIAeFkAfAAARF1R\nAH0DAHgoAHwCAERdIAB9BAB8AwB8AwB8BAAUfAQAfAQAFBt8BAAUGH0DAHEaAFd8AwB8AwAUfAEA\nawQAcg0AfAIAagAAfAMAgwEAAXENAHENAFd0AQB8AgCDAQBTKQFOKQLaBmFwcGVuZNoDbGVuKQXa\nAUxaA2Vwc1oJdnN0YXJsaXN0cgkAAABaBXZzdGFycgYAAAByBgAAAHIHAAAA2gdfUl9yYW5rGwAA\nAHMOAAAAAAEGAQ0BDQEeARAAFAFyKwAAAGMBAAAAAAAAAAYAAAAFAAAAAwAAAHO3AAAAdAAAdAEA\ndAIAiAEAgwEAgwEAgwEAfQEAZAEAfQIAeJIAiAEAZAEAGWoDAERdgwCJAACHAACHAQBmAgBkAgBk\nAwCGAAB8AQBEgwEAfQMAfAMAZwAAawMAciwAfAMAZAEAGX0EAHwBAGoEAHwEAIMBAAF8AgBkBAA3\nfQIAeDEAfAMAZAQAZAAAhQIAGURdHAB9BQCIAQB8BQAZiAEAfAQAGReIAQB8BQA8cYwAV3EsAHEs\nAFd8AgBTKQVOcgEAAABjAQAAAAAAAAACAAAABAAAABMAAABzKgAAAGcAAHwAAF0gAH0BAIgBAHwB\nABmIAAAZZAAAawMAcgYAfAEAkQIAcQYAUykBcgEAAAByBgAAACkCchEAAADaAXIpAtoBY9oHcm93\nbGlzdHIGAAAAcgcAAAByGAAAACcAAABzAgAAAAkAeh1fR0YyX3JhbmsuPGxvY2Fscz4uPGxpc3Rj\nb21wPnIOAAAAKQVyHwAAANoFcmFuZ2VyKQAAAHIDAAAA2gZyZW1vdmUpBnIuAAAAWglyb3dzX2xl\nZnRyLAAAAFoRcm93c193aXRoX25vbnplcm9aBXBpdm90Wglyb3dfaW5kZXhyBgAAACkCci0AAABy\nLgAAAHIHAAAA2glfR0YyX3JhbmsjAAAAcxYAAAAAARgBBgEUARwBDAEKAQ0BCgEXASEBcjEAAABj\nAQAAAAAAAAAEAAAABQAAAEMAAABzdAAAAGQBAGQCAIQAAHwAAESDAQB9AQB4WgB8AABEXVIAfQIA\neEkAfAIAagAAagEAgwAARF04AH0DAHwDAGQDAGsDAHIwAHQCAHwDAHQDAGoEAIMCAHJeAHQFAHwB\nAIMBAFN0BgB8AQCDAQBTcTAAV3EaAFdkAwBTKQROYwEAAAAAAAAAAgAAAAUAAABTAAAAcyUAAABn\nAAB8AABdGwB9AQB0AAB8AQBqAQB8AQBqAgCDAgCRAgBxBgBTcgYAAAApA3ICAAAAcgMAAAByBAAA\nACkCchEAAAByHgAAAHIGAAAAcgYAAAByBwAAAHIYAAAAMQAAAHMCAAAACQB6GV9yYW5rLjxsb2Nh\nbHM+LjxsaXN0Y29tcD5yAQAAACkHcgQAAADaBnZhbHVlc3IZAAAA2gNHRjJaA09uZXIxAAAAcisA\nAAApBHIqAAAAWgJMY3IJAAAAchIAAAByBgAAAHIGAAAAcgcAAADaBV9yYW5rMAAAAHMQAAAAAAET\nAQ0BFgEMARIBCgESAXI0AAAAYwEAAAAAAAAAAQAAAAQAAABDAAAAczQAAAB0AAB8AAB0AQB0AgBm\nAgCDAgBzGwB0AwCCAQB0BAAEagUAZAEANwJfBQB0BgB8AACDAQBTKQJhcAIAAEZpbmRzIHRoZSBy\nYW5rIG9mIGEgbGlzdCBvciBzZXQgb2YgdmVjdG9ycy4KCiAgICBBcmdzOgogICAgICAgIEw6IEEg\nbGlzdCBvciBzZXQgb2YgdmVjdG9ycy4KCiAgICBSZXR1cm5zOgogICAgICAgIHg6IEEgbm9ubmVn\nYXRpdmUgaW50ZWdlci4gIFRoZSByYW5rIG9mIEwuCgogICAgUmFpc2VzOgogICAgICAgIEFzc2Vy\ndGlvbkVycm9yOiBBbiBlcnJvciBvY2N1cnMgd2hlbiBMIGlzIG5vdCBhIGxpc3Qgb3Igc2V0LgoK\nICAgIEV4YW1wbGU6CiAgICA+Pj4gZnJvbSB2ZWMgaW1wb3J0IFZlYwogICAgPj4+IGEwID0gVmVj\nKHsnYScsICdiJywgJ2MnLCAnZCd9LCB7J2EnOiAxfSkKICAgID4+PiBhMSA9IFZlYyh7J2EnLCAn\nYicsICdjJywgJ2QnfSwgeydiJzogMX0pCiAgICA+Pj4gYTIgPSBWZWMoeydhJywgJ2InLCAnYycs\nICdkJ30sIHsnYyc6IDF9KQogICAgPj4+IGEzID0gVmVjKHsnYScsICdiJywgJ2MnLCAnZCd9LCB7\nJ2EnOiAxLCAnYyc6IDN9KQogICAgPj4+IHJhbmsoW2EwLCBhMSwgYTJdKQogICAgMwogICAgPj4+\nIHJhbmsoe2EwLCBhMiwgYTN9KQogICAgMgogICAgPj4+IHJhbmsoe2EwLCBhMSwgYTN9KQogICAg\nMwogICAgPj4+IHJhbmsoW2EwLCBhMSwgYTIsIGEzXSkKICAgIDMKICAgIHIOAAAAKQdyGQAAANoE\nbGlzdHIfAAAA2g5Bc3NlcnRpb25FcnJvctoEcmFua9oJX19jYWxsc19fcjQAAAApAXIqAAAAcgYA\nAAByBgAAAHIHAAAAcjcAAAA6AAAAcwYAAAAAGxsBDwFyNwAAAGMBAAAAAAAAAAEAAAAEAAAAQwAA\nAHNAAAAAdAAAfAAAdAEAdAIAZgIAgwIAcxsAdAMAggEAdAQABGoFAGQBADcCXwUAdAYAfAAAgwEA\ndAcAfAAAgwEAawIAUykCYe0CAABEZXRlcm1pbmVzIGlmIGEgbGlzdCBvciBzZXQgb2YgdmVjdG9y\ncyBhcmUgbGluZWFybHkgaW5kZXBlbmRlbnQuCgogICAgQXJnczoKICAgICAgICBMOiBBIGxpc3Qg\nb3Igc2V0IG9mIHZlY3RvcnMuCgogICAgUmV0dXJuczoKICAgICAgICB4OiBBIGJvb2xlYW4uICBU\ncnVlIGlmIHRoZSB2ZWN0b3JzIGluIEwgYXJlIGxpbmVhcmx5IGluZGVwZW5kZW50LiAgRmFsc2UK\nICAgICAgICBvdGhlcndpc2UuCgogICAgUmFpc2VzOgogICAgICAgIEFzc2VydGlvbkVycm9yOiBB\nbiBlcnJvciBvY2N1cnMgd2hlbiBMIGlzIG5vdCBhIGxpc3Qgb3Igc2V0LgoKICAgIEV4YW1wbGU6\nCiAgICA+Pj4gZnJvbSB2ZWMgaW1wb3J0IFZlYwogICAgPj4+IGEwID0gVmVjKHsnYScsICdiJywg\nJ2MnLCAnZCd9LCB7J2EnOiAxfSkKICAgID4+PiBhMSA9IFZlYyh7J2EnLCAnYicsICdjJywgJ2Qn\nfSwgeydiJzogMX0pCiAgICA+Pj4gYTIgPSBWZWMoeydhJywgJ2InLCAnYycsICdkJ30sIHsnYyc6\nIDF9KQogICAgPj4+IGEzID0gVmVjKHsnYScsICdiJywgJ2MnLCAnZCd9LCB7J2EnOiAxLCAnYyc6\nIDN9KQogICAgPj4+IGlzX2luZGVwZW5kZW50KFthMCwgYTEsIGEyXSkKICAgIFRydWUKICAgID4+\nPiBpc19pbmRlcGVuZGVudCh7YTAsIGEyLCBhM30pCiAgICBGYWxzZQogICAgPj4+IGlzX2luZGVw\nZW5kZW50KHthMCwgYTEsIGEzfSkKICAgIFRydWUKICAgID4+PiBpc19pbmRlcGVuZGVudChbYTAs\nIGExLCBhMiwgYTNdKQogICAgRmFsc2UKICAgIHIOAAAAKQhyGQAAAHI1AAAAch8AAAByNgAAANoO\naXNfaW5kZXBlbmRlbnRyOAAAAHI0AAAAcikAAAApAXIqAAAAcgYAAAByBgAAAHIHAAAAcjkAAABZ\nAAAAcwYAAAAAHBsBDwFyOQAAAFoMaW5zdHJ1bWVudGVkKQlyMwAAAHICAAAAcisAAAByMQAAAHI0\nAAAAcjcAAAByOQAAAHI4AAAA2gtfX3ZlcnNpb25fX3IGAAAAcgYAAAByBgAAAHIHAAAA2gg8bW9k\ndWxlPgIAAABzFAAAAAwCExcPCAwNDAoMHwwgCQEJAQkB\n'
offset = 12
elif magic == b'\x17\r\r\n': # Python 3.5
pycData = b'Fw0NCmOrjFjtDQAA4wAAAAAAAAAAAAAAAAMAAABAAAAAc4YAAABkAABkAQBsAABaAABHZAIAZAMA\nhAAAZAMAgwIAWgEAZAQAZAUAZAYAhAEAWgIAZAcAZAgAhAAAWgMAZAkAZAoAhAAAWgQAZAsAZAwA\nhAAAWgUAZA0AZA4AhAAAWgYAZAAAZQUAXwcAZA8AZQUAXwgAZAAAZQYAXwcAZA8AZQYAXwgAZAEA\nUykQ6QAAAABOYwAAAAAAAAAAAAAAAAQAAABAAAAAc3kAAABlAABaAQBkAABaAgBlAwCDAABpAABk\nAQBkAgCEAgBaBABkAwBkBACEAABaBQBkBQBkBgCEAABaBgBkBwBkCACEAABaBwBkCQBkCgCEAABa\nCABkCwBkDACEAABaCQBkDQBkDgCEAABaCgBkDwBkEACEAABaCwBkEQBTKRLaBF9WZWNjAwAAAAAA\nAAADAAAAAgAAAEMAAABzFgAAAHwBAHwAAF8AAHwCAHwAAF8BAGQAAFMpAU4pAtoBRNoBZikD2gRz\nZWxmWgZsYWJlbHNaCGZ1bmN0aW9uqQByBgAAAPokLi4vcmVzb3VyY2VzL3ByaXZhdGUvaW5kZXBl\nbmRlbmNlLnB52ghfX2luaXRfXwUAAABzBAAAAAABCQF6DV9WZWMuX19pbml0X19jAgAAAAAAAAAC\nAAAAAgAAAEMAAABzHgAAAHwBAHwAAGoAAGsGAHIaAHwAAGoAAHwBABlTZAEAUykCTnIBAAAAKQFy\nBAAAACkC2gF22gFrcgYAAAByBgAAAHIHAAAA2gtfX2dldGl0ZW1fXwkAAABzAAAAAHoQX1ZlYy5f\nX2dldGl0ZW1fX2MDAAAAAAAAAAMAAAAEAAAAQwAAAHMVAAAAdAAABHwAAGoBAHwBADx9AgBkAABT\nKQFOKQJaCF9zZXRpdGVtcgQAAAApA3IJAAAAcgoAAADaA3ZhbHIGAAAAcgYAAAByBwAAANoLX19z\nZXRpdGVtX18KAAAAcwAAAAB6EF9WZWMuX19zZXRpdGVtX19jAQAAAAAAAAABAAAAAgAAAEMAAABz\nCAAAAGQCAHwAABRTKQNO6QEAAADp/////3IGAAAAKQFyCQAAAHIGAAAAcgYAAAByBwAAANoHX19u\nZWdfXwsAAABzAAAAAHoMX1ZlYy5fX25lZ19fYwIAAAAAAAAAAgAAAAUAAAADAAAAcywAAAB0AAB8\nAABqAQCHAABmAQBkAQBkAgCGAAB8AABqAgBqAwCDAABEgwEAgwIAUykDTmMBAAAAAAAAAAMAAAAE\nAAAAEwAAAHMjAAAAaQAAfAAAXRkAXAIAfQEAfQIAiAAAfAIAFHwBAJMCAHEGAFNyBgAAAHIGAAAA\nKQPaAi4wcgoAAADaAXgpAdoFYWxwaGFyBgAAAHIHAAAA+go8ZGljdGNvbXA+DAAAAHMCAAAACQB6\nIV9WZWMuX19ybXVsX18uPGxvY2Fscz4uPGRpY3Rjb21wPikEcgIAAAByAwAAAHIEAAAA2gVpdGVt\ncykCcgkAAAByEwAAAHIGAAAAKQFyEwAAAHIHAAAA2ghfX3JtdWxfXwwAAABzAAAAAHoNX1ZlYy5f\nX3JtdWxfX2MCAAAAAAAAAAIAAAAEAAAAAwAAAHNAAAAAdAAAiAAAdAEAgwIAcjgAdAIAhwAAhwEA\nZgIAZAEAZAIAhgAAiAEAagMAagQAgwAARIMBAIMBAFN0BQBTZAAAUykDTmMBAAAAAAAAAAIAAAAF\nAAAAEwAAAHMiAAAAZwAAfAAAXRgAfQEAiAEAfAEAGYgAAHwBABkUkQIAcQYAU3IGAAAAcgYAAAAp\nAnIRAAAAcgoAAAApAtoFb3RoZXJyBQAAAHIGAAAAcgcAAAD6CjxsaXN0Y29tcD4RAAAAcwIAAAAJ\nAHogX1ZlYy5fX211bF9fLjxsb2NhbHM+LjxsaXN0Y29tcD4pBtoKaXNpbnN0YW5jZXICAAAA2gNz\ndW1yBAAAANoEa2V5c9oOTm90SW1wbGVtZW50ZWQpAnIFAAAAchcAAAByBgAAACkCchcAAAByBQAA\nAHIHAAAA2gdfX211bF9fDgAAAHMGAAAAAAIPASkCegxfVmVjLl9fbXVsX19jAgAAAAAAAAACAAAA\nBQAAAAMAAABzRwAAAHQAAIgAAGoBAIcAAIcBAGYCAGQBAGQCAIYAAHQCAIgAAGoDAGoEAIMAAIMB\nAGoFAIgBAGoDAGoEAIMAAIMBAESDAQCDAgBTKQNOYwEAAAAAAAAAAgAAAAUAAAATAAAAcyUAAABp\nAAB8AABdGwB9AQCIAAB8AQAZiAEAfAEAGRd8AQCTAgBxBgBTcgYAAAByBgAAACkCchEAAAByCgAA\nACkC2gF1cgkAAAByBgAAAHIHAAAAchQAAAAVAAAAcwIAAAAJAHogX1ZlYy5fX2FkZF9fLjxsb2Nh\nbHM+LjxkaWN0Y29tcD4pBnICAAAAcgMAAADaA3NldHIEAAAAchsAAADaBXVuaW9uKQJyHgAAAHIJ\nAAAAcgYAAAApAnIeAAAAcgkAAAByBwAAANoHX19hZGRfXxUAAABzAAAAAHoMX1ZlYy5fX2FkZF9f\nYwIAAAAAAAAAAgAAAAIAAABDAAAAcwkAAAB8AAB8AQALF1MpAXo0UmV0dXJucyBhIHZlY3RvciB3\naGljaCBpcyB0aGUgZGlmZmVyZW5jZSBvZiBhIGFuZCBiLnIGAAAAKQLaAWHaAWJyBgAAAHIGAAAA\ncgcAAADaB19fc3ViX18XAAAAcwIAAAAAAnoMX1ZlYy5fX3N1Yl9fTikM2ghfX25hbWVfX9oKX19t\nb2R1bGVfX9oMX19xdWFsbmFtZV9fch8AAAByCAAAAHILAAAAcg0AAAByEAAAAHIWAAAAch0AAABy\nIQAAAHIkAAAAcgYAAAByBgAAAHIGAAAAcgcAAAByAgAAAAQAAABzEAAAAAwBFQQMAQwBDAEMAgwH\nDAJyAgAAAGebK6GGm4QGPWMCAAAAAAAAAAUAAAAGAAAAQwAAAHNpAAAAZwAAfQIAeFYAfAAARF1O\nAH0DAHgoAHwCAERdIAB9BAB8AwB8AwB8BAAUfAQAfAQAFBt8BAAUGH0DAHEaAFd8AwB8AwAUfAEA\nawQAcg0AfAIAagAAfAMAgwEAAXENAFd0AQB8AgCDAQBTKQFOKQLaBmFwcGVuZNoDbGVuKQXaAUxa\nA2Vwc1oJdnN0YXJsaXN0cgkAAABaBXZzdGFycgYAAAByBgAAAHIHAAAA2gdfUl9yYW5rGwAAAHMO\nAAAAAAEGAQ0BDQEeARAAEQFyKwAAAGMBAAAAAAAAAAYAAAAFAAAAAwAAAHO0AAAAdAAAdAEAdAIA\niAEAgwEAgwEAgwEAfQEAZAEAfQIAeI8AiAEAZAEAGWoDAERdgACJAACHAACHAQBmAgBkAgBkAwCG\nAAB8AQBEgwEAfQMAfAMAZwAAawMAciwAfAMAZAEAGX0EAHwBAGoEAHwEAIMBAAF8AgBkBAA3fQIA\neC4AfAMAZAQAZAAAhQIAGURdHAB9BQCIAQB8BQAZiAEAfAQAGReIAQB8BQA8cYwAV3EsAFd8AgBT\nKQVOcgEAAABjAQAAAAAAAAACAAAABAAAABMAAABzKgAAAGcAAHwAAF0gAH0BAIgBAHwBABmIAAAZ\nZAAAawMAcgYAfAEAkQIAcQYAUykBcgEAAAByBgAAACkCchEAAADaAXIpAtoBY9oHcm93bGlzdHIG\nAAAAcgcAAAByGAAAACcAAABzAgAAAAkAeh1fR0YyX3JhbmsuPGxvY2Fscz4uPGxpc3Rjb21wPnIO\nAAAAKQVyHwAAANoFcmFuZ2VyKQAAAHIDAAAA2gZyZW1vdmUpBnIuAAAAWglyb3dzX2xlZnRyLAAA\nAFoRcm93c193aXRoX25vbnplcm9aBXBpdm90Wglyb3dfaW5kZXhyBgAAACkCci0AAAByLgAAAHIH\nAAAA2glfR0YyX3JhbmsjAAAAcxYAAAAAARgBBgEUARwBDAEKAQ0BCgEXAR4BcjEAAABjAQAAAAAA\nAAAEAAAABQAAAEMAAABzdAAAAGQBAGQCAIQAAHwAAESDAQB9AQB4WgB8AABEXVIAfQIAeEkAfAIA\nagAAagEAgwAARF04AH0DAHwDAGQDAGsDAHIwAHQCAHwDAHQDAGoEAIMCAHJeAHQFAHwBAIMBAFN0\nBgB8AQCDAQBTcTAAV3EaAFdkAwBTKQROYwEAAAAAAAAAAgAAAAUAAABTAAAAcyUAAABnAAB8AABd\nGwB9AQB0AAB8AQBqAQB8AQBqAgCDAgCRAgBxBgBTcgYAAAApA3ICAAAAcgMAAAByBAAAACkCchEA\nAAByHgAAAHIGAAAAcgYAAAByBwAAAHIYAAAAMQAAAHMCAAAACQB6GV9yYW5rLjxsb2NhbHM+Ljxs\naXN0Y29tcD5yAQAAACkHcgQAAADaBnZhbHVlc3IZAAAA2gNHRjJaA09uZXIxAAAAcisAAAApBHIq\nAAAAWgJMY3IJAAAAchIAAAByBgAAAHIGAAAAcgcAAADaBV9yYW5rMAAAAHMQAAAAAAETAQ0BFgEM\nARIBCgESAXI0AAAAYwEAAAAAAAAAAQAAAAQAAABDAAAAczQAAAB0AAB8AAB0AQB0AgBmAgCDAgBz\nGwB0AwCCAQB0BAAEagUAZAEANwJfBQB0BgB8AACDAQBTKQJhcAIAAEZpbmRzIHRoZSByYW5rIG9m\nIGEgbGlzdCBvciBzZXQgb2YgdmVjdG9ycy4KCiAgICBBcmdzOgogICAgICAgIEw6IEEgbGlzdCBv\nciBzZXQgb2YgdmVjdG9ycy4KCiAgICBSZXR1cm5zOgogICAgICAgIHg6IEEgbm9ubmVnYXRpdmUg\naW50ZWdlci4gIFRoZSByYW5rIG9mIEwuCgogICAgUmFpc2VzOgogICAgICAgIEFzc2VydGlvbkVy\ncm9yOiBBbiBlcnJvciBvY2N1cnMgd2hlbiBMIGlzIG5vdCBhIGxpc3Qgb3Igc2V0LgoKICAgIEV4\nYW1wbGU6CiAgICA+Pj4gZnJvbSB2ZWMgaW1wb3J0IFZlYwogICAgPj4+IGEwID0gVmVjKHsnYScs\nICdiJywgJ2MnLCAnZCd9LCB7J2EnOiAxfSkKICAgID4+PiBhMSA9IFZlYyh7J2EnLCAnYicsICdj\nJywgJ2QnfSwgeydiJzogMX0pCiAgICA+Pj4gYTIgPSBWZWMoeydhJywgJ2InLCAnYycsICdkJ30s\nIHsnYyc6IDF9KQogICAgPj4+IGEzID0gVmVjKHsnYScsICdiJywgJ2MnLCAnZCd9LCB7J2EnOiAx\nLCAnYyc6IDN9KQogICAgPj4+IHJhbmsoW2EwLCBhMSwgYTJdKQogICAgMwogICAgPj4+IHJhbmso\ne2EwLCBhMiwgYTN9KQogICAgMgogICAgPj4+IHJhbmsoe2EwLCBhMSwgYTN9KQogICAgMwogICAg\nPj4+IHJhbmsoW2EwLCBhMSwgYTIsIGEzXSkKICAgIDMKICAgIHIOAAAAKQdyGQAAANoEbGlzdHIf\nAAAA2g5Bc3NlcnRpb25FcnJvctoEcmFua9oJX19jYWxsc19fcjQAAAApAXIqAAAAcgYAAAByBgAA\nAHIHAAAAcjcAAAA6AAAAcwYAAAAAGxsBDwFyNwAAAGMBAAAAAAAAAAEAAAAEAAAAQwAAAHNAAAAA\ndAAAfAAAdAEAdAIAZgIAgwIAcxsAdAMAggEAdAQABGoFAGQBADcCXwUAdAYAfAAAgwEAdAcAfAAA\ngwEAawIAUykCYe0CAABEZXRlcm1pbmVzIGlmIGEgbGlzdCBvciBzZXQgb2YgdmVjdG9ycyBhcmUg\nbGluZWFybHkgaW5kZXBlbmRlbnQuCgogICAgQXJnczoKICAgICAgICBMOiBBIGxpc3Qgb3Igc2V0\nIG9mIHZlY3RvcnMuCgogICAgUmV0dXJuczoKICAgICAgICB4OiBBIGJvb2xlYW4uICBUcnVlIGlm\nIHRoZSB2ZWN0b3JzIGluIEwgYXJlIGxpbmVhcmx5IGluZGVwZW5kZW50LiAgRmFsc2UKICAgICAg\nICBvdGhlcndpc2UuCgogICAgUmFpc2VzOgogICAgICAgIEFzc2VydGlvbkVycm9yOiBBbiBlcnJv\nciBvY2N1cnMgd2hlbiBMIGlzIG5vdCBhIGxpc3Qgb3Igc2V0LgoKICAgIEV4YW1wbGU6CiAgICA+\nPj4gZnJvbSB2ZWMgaW1wb3J0IFZlYwogICAgPj4+IGEwID0gVmVjKHsnYScsICdiJywgJ2MnLCAn\nZCd9LCB7J2EnOiAxfSkKICAgID4+PiBhMSA9IFZlYyh7J2EnLCAnYicsICdjJywgJ2QnfSwgeydi\nJzogMX0pCiAgICA+Pj4gYTIgPSBWZWMoeydhJywgJ2InLCAnYycsICdkJ30sIHsnYyc6IDF9KQog\nICAgPj4+IGEzID0gVmVjKHsnYScsICdiJywgJ2MnLCAnZCd9LCB7J2EnOiAxLCAnYyc6IDN9KQog\nICAgPj4+IGlzX2luZGVwZW5kZW50KFthMCwgYTEsIGEyXSkKICAgIFRydWUKICAgID4+PiBpc19p\nbmRlcGVuZGVudCh7YTAsIGEyLCBhM30pCiAgICBGYWxzZQogICAgPj4+IGlzX2luZGVwZW5kZW50\nKHthMCwgYTEsIGEzfSkKICAgIFRydWUKICAgID4+PiBpc19pbmRlcGVuZGVudChbYTAsIGExLCBh\nMiwgYTNdKQogICAgRmFsc2UKICAgIHIOAAAAKQhyGQAAAHI1AAAAch8AAAByNgAAANoOaXNfaW5k\nZXBlbmRlbnRyOAAAAHI0AAAAcikAAAApAXIqAAAAcgYAAAByBgAAAHIHAAAAcjkAAABZAAAAcwYA\nAAAAHBsBDwFyOQAAAFoMaW5zdHJ1bWVudGVkKQlyMwAAAHICAAAAcisAAAByMQAAAHI0AAAAcjcA\nAAByOQAAAHI4AAAA2gtfX3ZlcnNpb25fX3IGAAAAcgYAAAByBgAAAHIHAAAA2gg8bW9kdWxlPgIA\nAABzFAAAAAwCExcPCAwNDAoMHwwgCQEJAQkB\n'
offset = 12
elif magic == b'3\r\r\n': # Python 3.6
pycData = b'Mw0NCmOrjFjtDQAA4wAAAAAAAAAAAAAAAAMAAABAAAAAc1wAAABkAGQBbABaAEcAZAJkA4QAZAOD\nAloBZBBkBWQGhAFaAmQHZAiEAFoDZAlkCoQAWgRkC2QMhABaBWQNZA6EAFoGZABlBV8HZA9lBV8I\nZABlBl8HZA9lBl8IZAFTACkR6QAAAABOYwAAAAAAAAAAAAAAAAMAAABAAAAAc1QAAABlAFoBZABa\nAmUDgwBpAGYCZAFkAoQBWgRkA2QEhABaBWQFZAaEAFoGZAdkCIQAWgdkCWQKhABaCGQLZAyEAFoJ\nZA1kDoQAWgpkD2QQhABaC2QRUwApEtoEX1ZlY2MDAAAAAAAAAAMAAAACAAAAQwAAAHMQAAAAfAF8\nAF8AfAJ8AF8BZABTACkBTikC2gFE2gFmKQPaBHNlbGZaBmxhYmVsc1oIZnVuY3Rpb26pAHIGAAAA\n+iQuLi9yZXNvdXJjZXMvcHJpdmF0ZS9pbmRlcGVuZGVuY2UucHnaCF9faW5pdF9fBQAAAHMEAAAA\nAAEGAXoNX1ZlYy5fX2luaXRfX2MCAAAAAAAAAAIAAAACAAAAQwAAAHMYAAAAfAF8AGoAawZyFHwA\nagB8ARkAUwBkAVMAKQJOcgEAAAApAXIEAAAAKQLaAXbaAWtyBgAAAHIGAAAAcgcAAADaC19fZ2V0\naXRlbV9fCQAAAHMAAAAAehBfVmVjLl9fZ2V0aXRlbV9fYwMAAAAAAAAAAwAAAAQAAABDAAAAcxIA\nAAB0AAQAfABqAXwBPAB9AmQAUwApAU4pAloIX3NldGl0ZW1yBAAAACkDcgkAAAByCgAAANoDdmFs\ncgYAAAByBgAAAHIHAAAA2gtfX3NldGl0ZW1fXwoAAABzAAAAAHoQX1ZlYy5fX3NldGl0ZW1fX2MB\nAAAAAAAAAAEAAAACAAAAQwAAAHMIAAAAZAJ8ABQAUwApA07pAQAAAOn/////cgYAAAApAXIJAAAA\ncgYAAAByBgAAAHIHAAAA2gdfX25lZ19fCwAAAHMAAAAAegxfVmVjLl9fbmVnX19jAgAAAAAAAAAC\nAAAABQAAAAMAAABzIAAAAHQAfABqAYcAZgFkAWQChAh8AGoCagODAEQAgwGDAlMAKQNOYwEAAAAA\nAAAAAwAAAAQAAAATAAAAcxoAAABpAHwAXRJcAn0BfQKIAHwCFAB8AZMCcQRTAHIGAAAAcgYAAAAp\nA9oCLjByCgAAANoBeCkB2gVhbHBoYXIGAAAAcgcAAAD6CjxkaWN0Y29tcD4MAAAAcwIAAAAGAHoh\nX1ZlYy5fX3JtdWxfXy48bG9jYWxzPi48ZGljdGNvbXA+KQRyAgAAAHIDAAAAcgQAAADaBWl0ZW1z\nKQJyCQAAAHITAAAAcgYAAAApAXITAAAAcgcAAADaCF9fcm11bF9fDAAAAHMAAAAAeg1fVmVjLl9f\ncm11bF9fYwIAAAAAAAAAAgAAAAQAAAADAAAAczAAAAB0AIgAdAGDAnIodAKHAIcBZgJkAWQChAiI\nAWoDagSDAEQAgwGDAVMAdAVTAGQAUwApA05jAQAAAAAAAAACAAAABQAAABMAAABzHAAAAGcAfABd\nFH0BiAF8ARkAiAB8ARkAFACRAnEEUwByBgAAAHIGAAAAKQJyEQAAAHIKAAAAKQLaBW90aGVycgUA\nAAByBgAAAHIHAAAA+go8bGlzdGNvbXA+EQAAAHMCAAAABgB6IF9WZWMuX19tdWxfXy48bG9jYWxz\nPi48bGlzdGNvbXA+KQbaCmlzaW5zdGFuY2VyAgAAANoDc3VtcgQAAADaBGtleXPaDk5vdEltcGxl\nbWVudGVkKQJyBQAAAHIXAAAAcgYAAAApAnIXAAAAcgUAAAByBwAAANoHX19tdWxfXw4AAABzBgAA\nAAACCgEeAnoMX1ZlYy5fX211bF9fYwIAAAAAAAAAAgAAAAUAAAADAAAAczIAAAB0AIgAagGHAIcB\nZgJkAWQChAh0AogAagNqBIMAgwFqBYgBagNqBIMAgwFEAIMBgwJTACkDTmMBAAAAAAAAAAIAAAAF\nAAAAEwAAAHMeAAAAaQB8AF0WfQGIAHwBGQCIAXwBGQAXAHwBkwJxBFMAcgYAAAByBgAAACkCchEA\nAAByCgAAACkC2gF1cgkAAAByBgAAAHIHAAAAchQAAAAVAAAAcwIAAAAGAHogX1ZlYy5fX2FkZF9f\nLjxsb2NhbHM+LjxkaWN0Y29tcD4pBnICAAAAcgMAAADaA3NldHIEAAAAchsAAADaBXVuaW9uKQJy\nHgAAAHIJAAAAcgYAAAApAnIeAAAAcgkAAAByBwAAANoHX19hZGRfXxUAAABzAAAAAHoMX1ZlYy5f\nX2FkZF9fYwIAAAAAAAAAAgAAAAIAAABDAAAAcwoAAAB8AHwBCwAXAFMAKQF6NFJldHVybnMgYSB2\nZWN0b3Igd2hpY2ggaXMgdGhlIGRpZmZlcmVuY2Ugb2YgYSBhbmQgYi5yBgAAACkC2gFh2gFicgYA\nAAByBgAAAHIHAAAA2gdfX3N1Yl9fFwAAAHMCAAAAAAJ6DF9WZWMuX19zdWJfX04pDNoIX19uYW1l\nX1/aCl9fbW9kdWxlX1/aDF9fcXVhbG5hbWVfX3IfAAAAcggAAAByCwAAAHINAAAAchAAAAByFgAA\nAHIdAAAAciEAAAByJAAAAHIGAAAAcgYAAAByBgAAAHIHAAAAcgIAAAAEAAAAcxAAAAAIARAECAEI\nAQgBCAIIBwgCcgIAAADnmyuhhpuEBj1jAgAAAAAAAAAFAAAABgAAAEMAAABzVgAAAGcAfQJ4SHwA\nRABdQH0DeCR8AkQAXRx9BHwDfAN8BBQAfAR8BBQAGwB8BBQAGAB9A3EUVwB8A3wDFAB8AWsEcgp8\nAmoAfAODAQEAcQpXAHQBfAKDAVMAKQFOKQLaBmFwcGVuZNoDbGVuKQXaAUxaA2Vwc1oJdnN0YXJs\naXN0cgkAAABaBXZzdGFycgYAAAByBgAAAHIHAAAA2gdfUl9yYW5rGwAAAHMOAAAAAAEEAQoBCgEc\nAQwADgFyLAAAAGMBAAAAAAAAAAYAAAAFAAAAAwAAAHOMAAAAdAB0AXQCiAGDAYMBgwF9AWQBfQJ4\ncogBZAEZAGoDRABdZIkAhwCHAWYCZAJkA4QIfAFEAIMBfQN8A2cAawNyIHwDZAEZAH0EfAFqBHwE\ngwEBAHwCZAQ3AH0CeCh8A2QEZACFAhkARABdGH0FiAF8BRkAiAF8BBkAFwCIAXwFPABxaFcAcSBX\nAHwCUwApBU5yAQAAAGMBAAAAAAAAAAIAAAAEAAAAEwAAAHMgAAAAZwB8AF0YfQGIAXwBGQCIABkA\nZABrA3IEfAGRAnEEUwApAXIBAAAAcgYAAAApAnIRAAAA2gFyKQLaAWPaB3Jvd2xpc3RyBgAAAHIH\nAAAAchgAAAAnAAAAcwIAAAAGAHodX0dGMl9yYW5rLjxsb2NhbHM+LjxsaXN0Y29tcD5yDgAAACkF\nch8AAADaBXJhbmdlcioAAAByAwAAANoGcmVtb3ZlKQZyLwAAAFoJcm93c19sZWZ0ci0AAABaEXJv\nd3Nfd2l0aF9ub256ZXJvWgVwaXZvdFoJcm93X2luZGV4cgYAAAApAnIuAAAAci8AAAByBwAAANoJ\nX0dGMl9yYW5rIwAAAHMWAAAAAAEQAQQBEAEUAQgBCAEKAQgBEgEcAXIyAAAAYwEAAAAAAAAABAAA\nAAUAAABDAAAAc1gAAABkAWQChAB8AEQAgwF9AXhEfABEAF08fQJ4NnwCagBqAYMARABdKH0DfANk\nA2sDciR0AnwDdANqBIMCckR0BXwBgwFTAHQGfAGDAVMAcSRXAHEUVwBkA1MAKQROYwEAAAAAAAAA\nAgAAAAUAAABTAAAAcxoAAABnAHwAXRJ9AXQAfAFqAXwBagKDApECcQRTAHIGAAAAKQNyAgAAAHID\nAAAAcgQAAAApAnIRAAAAch4AAAByBgAAAHIGAAAAcgcAAAByGAAAADEAAABzAgAAAAYAehlfcmFu\nay48bG9jYWxzPi48bGlzdGNvbXA+cgEAAAApB3IEAAAA2gZ2YWx1ZXNyGQAAANoDR0YyWgNPbmVy\nMgAAAHIsAAAAKQRyKwAAAFoCTGNyCQAAAHISAAAAcgYAAAByBgAAAHIHAAAA2gVfcmFuazAAAABz\nEAAAAAABDgEKARABCAEMAQgBEAFyNQAAAGMBAAAAAAAAAAEAAAAEAAAAQwAAAHMoAAAAdAB8AHQB\ndAJmAoMCcxJ0A4IBdAQEAGoFZAE3AAIAXwV0BnwAgwFTACkCYXACAABGaW5kcyB0aGUgcmFuayBv\nZiBhIGxpc3Qgb3Igc2V0IG9mIHZlY3RvcnMuCgogICAgQXJnczoKICAgICAgICBMOiBBIGxpc3Qg\nb3Igc2V0IG9mIHZlY3RvcnMuCgogICAgUmV0dXJuczoKICAgICAgICB4OiBBIG5vbm5lZ2F0aXZl\nIGludGVnZXIuICBUaGUgcmFuayBvZiBMLgoKICAgIFJhaXNlczoKICAgICAgICBBc3NlcnRpb25F\ncnJvcjogQW4gZXJyb3Igb2NjdXJzIHdoZW4gTCBpcyBub3QgYSBsaXN0IG9yIHNldC4KCiAgICBF\neGFtcGxlOgogICAgPj4+IGZyb20gdmVjIGltcG9ydCBWZWMKICAgID4+PiBhMCA9IFZlYyh7J2En\nLCAnYicsICdjJywgJ2QnfSwgeydhJzogMX0pCiAgICA+Pj4gYTEgPSBWZWMoeydhJywgJ2InLCAn\nYycsICdkJ30sIHsnYic6IDF9KQogICAgPj4+IGEyID0gVmVjKHsnYScsICdiJywgJ2MnLCAnZCd9\nLCB7J2MnOiAxfSkKICAgID4+PiBhMyA9IFZlYyh7J2EnLCAnYicsICdjJywgJ2QnfSwgeydhJzog\nMSwgJ2MnOiAzfSkKICAgID4+PiByYW5rKFthMCwgYTEsIGEyXSkKICAgIDMKICAgID4+PiByYW5r\nKHthMCwgYTIsIGEzfSkKICAgIDIKICAgID4+PiByYW5rKHthMCwgYTEsIGEzfSkKICAgIDMKICAg\nID4+PiByYW5rKFthMCwgYTEsIGEyLCBhM10pCiAgICAzCiAgICByDgAAACkHchkAAADaBGxpc3Ry\nHwAAANoOQXNzZXJ0aW9uRXJyb3LaBHJhbmvaCV9fY2FsbHNfX3I1AAAAKQFyKwAAAHIGAAAAcgYA\nAAByBwAAAHI4AAAAOgAAAHMGAAAAABsSAQ4BcjgAAABjAQAAAAAAAAABAAAABAAAAEMAAABzMAAA\nAHQAfAB0AXQCZgKDAnMSdAOCAXQEBABqBWQBNwACAF8FdAZ8AIMBdAd8AIMBawJTACkCYe0CAABE\nZXRlcm1pbmVzIGlmIGEgbGlzdCBvciBzZXQgb2YgdmVjdG9ycyBhcmUgbGluZWFybHkgaW5kZXBl\nbmRlbnQuCgogICAgQXJnczoKICAgICAgICBMOiBBIGxpc3Qgb3Igc2V0IG9mIHZlY3RvcnMuCgog\nICAgUmV0dXJuczoKICAgICAgICB4OiBBIGJvb2xlYW4uICBUcnVlIGlmIHRoZSB2ZWN0b3JzIGlu\nIEwgYXJlIGxpbmVhcmx5IGluZGVwZW5kZW50LiAgRmFsc2UKICAgICAgICBvdGhlcndpc2UuCgog\nICAgUmFpc2VzOgogICAgICAgIEFzc2VydGlvbkVycm9yOiBBbiBlcnJvciBvY2N1cnMgd2hlbiBM\nIGlzIG5vdCBhIGxpc3Qgb3Igc2V0LgoKICAgIEV4YW1wbGU6CiAgICA+Pj4gZnJvbSB2ZWMgaW1w\nb3J0IFZlYwogICAgPj4+IGEwID0gVmVjKHsnYScsICdiJywgJ2MnLCAnZCd9LCB7J2EnOiAxfSkK\nICAgID4+PiBhMSA9IFZlYyh7J2EnLCAnYicsICdjJywgJ2QnfSwgeydiJzogMX0pCiAgICA+Pj4g\nYTIgPSBWZWMoeydhJywgJ2InLCAnYycsICdkJ30sIHsnYyc6IDF9KQogICAgPj4+IGEzID0gVmVj\nKHsnYScsICdiJywgJ2MnLCAnZCd9LCB7J2EnOiAxLCAnYyc6IDN9KQogICAgPj4+IGlzX2luZGVw\nZW5kZW50KFthMCwgYTEsIGEyXSkKICAgIFRydWUKICAgID4+PiBpc19pbmRlcGVuZGVudCh7YTAs\nIGEyLCBhM30pCiAgICBGYWxzZQogICAgPj4+IGlzX2luZGVwZW5kZW50KHthMCwgYTEsIGEzfSkK\nICAgIFRydWUKICAgID4+PiBpc19pbmRlcGVuZGVudChbYTAsIGExLCBhMiwgYTNdKQogICAgRmFs\nc2UKICAgIHIOAAAAKQhyGQAAAHI2AAAAch8AAAByNwAAANoOaXNfaW5kZXBlbmRlbnRyOQAAAHI1\nAAAAcioAAAApAXIrAAAAcgYAAAByBgAAAHIHAAAAcjoAAABZAAAAcwYAAAAAHBIBDgFyOgAAAFoM\naW5zdHJ1bWVudGVkKQFyKAAAACkJcjQAAAByAgAAAHIsAAAAcjIAAAByNQAAAHI4AAAAcjoAAABy\nOQAAANoLX192ZXJzaW9uX19yBgAAAHIGAAAAcgYAAAByBwAAANoIPG1vZHVsZT4CAAAAcxQAAAAI\nAg4XCggIDQgKCB8IIAYBBgEGAQ==\n'
offset = 12
else:
raise ImportError("Unsupported Python version")
pycData = base64.decodebytes(pycData)
assert pycData[:4] == magic, "Bad magic number"
# try:
# return marshal.loads(pycData[offset:])
# except ValueError:
# raise ImportError("Could not read .pyc data")
output_file = open('independence_written.pyc', 'wb')
output_file.write(pycData)
output_file.close()
if __name__ == "__main__":
main()
| [
"orangepips@gmail.com"
] | orangepips@gmail.com |
e4ba016e8bb3ad6ec871723925cf265fadfb6d51 | 152bdbeb3057a084401df3c3cf83394f09decad7 | /APAC-server.py | a94fe989fb0c45d3f6f04e97f6faefafae9057b5 | [] | no_license | ramadas88/1-Ansible-Master | fbfd955ba860fd6d9cae1dbccfd74438814bfcd3 | fdce7d78d0d38b5f9b460dd7674b154339d45504 | refs/heads/master | 2020-06-03T11:14:06.338454 | 2019-07-21T09:13:33 | 2019-07-21T09:13:33 | 191,546,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | import pandas as pd
File_Name = "SpecificAlertCheck_2019-6-11_1617.csv"
# List of Tuples
students = [ ('jack', 34, 'Sydeny' , 'Australia') ,
('Riti', 30, 'Delhi' , 'India' ) ,
('Vikas', 31, 'Mumbai' , 'India' ) ,
('Neelu', 32, 'Bangalore' , 'India' ) ,
('John', 16, 'New York' , 'US') ,
('Mike', 17, 'las vegas' , 'US') ]
infile = open ( File_Name , 'r' )
dfObj = pd.DataFrame(students, columns = ['Name' , 'Age', 'City' , 'Country'], index=['a', 'b', 'c' , 'd' , 'e' , 'f'])
#print dfObj
for line in infile:
linenew = line.split('\n')
linenew2 = linenew[2]
#if line[1]:
# print line
print linenew2
#exit(20) | [
"ramadas88@gmail.com"
] | ramadas88@gmail.com |
a65c18a1683bd7269c63d79bd52543077f0c8c1a | fe1650eef4fc338ae31e206d6f0ce706bf943ba7 | /VideoFusionGPU/frame/__init__.py | fe51af8832f1117bcd3350f4d26a087a82cf8765 | [] | no_license | davidliyutong/360ParallelConvert | 1341c8046a49a179eb2038dee80c33cc27b0c082 | 42c4fb46878f0355f39a3975309a98ffd2028bdd | refs/heads/master | 2022-11-10T21:50:44.453403 | 2020-07-05T15:34:24 | 2020-07-05T15:34:24 | 277,330,157 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | import numpy as np
import numba.cuda as cuda
from ..utils import sphere2cube, project
class frame_processor(object):
def __init__(self, frame_shape, view_shape=(1024, 2048, 3), block=(16, 16, 1)):
self.valid = False
self.view_shape = view_shape
self.block_dim = block
assert block[0] == block[1]
assert view_shape[0] * 2 == view_shape[1]
assert view_shape[0] % block[0] == 0
assert frame_shape[1] == 6 * frame_shape[0]
self.valid = True
self.frame_shape = frame_shape
self.height = view_shape[0]
self.width = view_shape[1]
self.depth = view_shape[2]
self.edge = frame_shape[0]
self.grid_dim = (int(self.view_shape[0] / self.block_dim[0]), int(self.view_shape[1] / self.block_dim[1]), 1)
self.function = project
self.stream0 = cuda.stream()
self.stream1 = cuda.stream()
self.imgIn_gpu = cuda.device_array(shape=self.frame_shape, dtype=np.uint8, stream=self.stream0)
self.imgOut_gpu = cuda.device_array(shape=self.view_shape, dtype=np.uint8, stream=self.stream1)
self.imgOut_cpu = np.zeros(shape=self.view_shape, dtype=np.uint8)
def render(self, frame):
pass
class frame_from_horizon(frame_processor):
def __init__(self, frame_shape, view_shape=(1024, 2048, 3), block=(16, 16, 1)):
super(frame_from_horizon, self).__init__(frame_shape, view_shape, block)
def render(self, frame: np.array):
cuda.from_cuda_array_interface
self.imgIn_gpu = cuda.to_device(frame)
self.function[self.grid_dim, self.block_dim](self.imgOut_gpu, self.imgIn_gpu, self.height, self.width, self.edge)
self.imgOut_gpu.copy_to_host(self.imgOut_cpu)
cuda.synchronize()
return self.imgOut_cpu
| [
"32391509+davidliyutong@users.noreply.github.com"
] | 32391509+davidliyutong@users.noreply.github.com |
4c351b046695fbd164014df647be4c83f03eaa06 | 5ee0b8b4d3c2b30d2674cc28079f56fe191f2176 | /tests/integration/Common_test.py | 31fd20b9a2d95db7bc23e874d929fa99dea45f87 | [
"MIT"
] | permissive | dorrella/widget-rest | 280d22ff7d622c857570d8a1b76f352fc46d95eb | 6874fa1487cd1967707790ea365e4ace6f28f175 | refs/heads/main | 2023-06-12T03:10:30.193899 | 2021-07-03T19:30:59 | 2021-07-03T20:08:51 | 382,124,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import os
from pathlib import Path
from tornado.testing import AsyncHTTPTestCase
from WebApp.App import make_test_app
dir_path = Path(os.path.dirname(os.path.realpath(__file__)))
json_path = dir_path / "../../json"
class TestMainHandlerSetup(AsyncHTTPTestCase):
"""dummy template that makes a test app and will
populate it on request"""
def get_app(self):
app = make_test_app()
return app
def populate_db(self):
"""adds 3 widgets to the db for testing. note
that each tests starts with an empty database"""
for file_path in ["test1", "test2", "test3"]:
path = json_path / f"{file_path}.json"
with open(path, "r") as f:
text = f.read()
self.fetch("/widget", method="POST", body=text)
| [
"alexdorrell9@gmail.com"
] | alexdorrell9@gmail.com |
a54031a091dc8ef18fbd886a0d9a5de58b59c0fa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03326/s432961114.py | 004e333b7354de53b77847c25af19fb0cad2aa12 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | n,m = map(int, input().split())
ab = [list(map(int, input().split())) for _ in range(n)]
import itertools
nums = [0,1]
ll = list(itertools.product(nums,repeat=3))
res = 0
for i in ll:
temp =[]
for j in range(n):
if i[0]==0:
x = ab[j][0]
else:
x = -ab[j][0]
if i[1]==0:
y = ab[j][1]
else:
y = -ab[j][1]
if i[2] ==0:
z = ab[j][2]
else:
z = -ab[j][2]
temp.append(x+y+z)
tempp =list(sorted(temp,reverse=True))
res = max(res,sum(tempp[:m]))
print(res)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a511cf8f8dd6979263066a3d62441c835ddf669f | f090e7883e90fd96a4feab83703d702965efbf4a | /Blake/Chapter 1 - Python Basics/hello.py | d9ddb18ff18d1f6c03d68fd0a72001b19c9ca87c | [] | no_license | Kelley12/LearningPython | a83d09b155c6d79111f2830b44b8e49e6bbc1f2c | 0ffcd1541d5b152b3359bca679e577365825d7e0 | refs/heads/master | 2021-01-20T14:03:44.707519 | 2018-09-07T21:06:42 | 2018-09-07T21:06:42 | 90,556,594 | 0 | 0 | null | 2017-08-23T12:37:26 | 2017-05-07T19:31:40 | Python | UTF-8 | Python | false | false | 344 | py | # This is the hello world program from Chapter 1
print('Hellow world!')
print('What is your name?')
name = input()
print('It is nice to meet you, ' + name)
print('The length of your name is ' + str(len(name)))
print('What is your age?')
age = input()
print('In case you didn\'t know, you will be ' + str(int(age) + 1) + ' on your birthday.')
| [
"blakekelley127@gmail.com"
] | blakekelley127@gmail.com |
6f0fb9c49884e780a516de1d650e4a68ef9638bb | 8937c4d452c98699610923f76a395a2247f576df | /demo/download_demo_data.py | 849791f18789882a992f77b515e00bad9d872f31 | [] | no_license | mistycheney/MouseBrainAtlas | 812b204af06ed303f3c12d5c81edef50c8d9d1ed | bffbaa1ede9297084e64fc197716e63d5cb54275 | refs/heads/master | 2020-04-11T13:44:09.632311 | 2018-11-20T22:32:15 | 2018-11-20T22:32:15 | 20,377,173 | 3 | 9 | null | 2017-03-15T19:39:27 | 2014-06-01T12:42:08 | Jupyter Notebook | UTF-8 | Python | false | false | 3,974 | py | #! /usr/bin/env python
import sys, os
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'utilities'))
from utilities2015 import *
from metadata import *
from data_manager import *
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='This script downloads input data for demo.')
parser.add_argument("-d", "--demo_data_dir", type=str, help="Directory to store demo input data", default='demo_data')
args = parser.parse_args()
# demo_data_dir = '/home/yuncong/Brain/demo_data/'
def download_to_demo(fp):
demo_data_dir = args.demo_data_dir
s3_http_prefix = 'https://s3-us-west-1.amazonaws.com/mousebrainatlas-data/'
url = s3_http_prefix + fp
demo_fp = os.path.join(demo_data_dir, fp)
execute_command('wget -N -P \"%s\" \"%s\"' % (os.path.dirname(demo_fp), url))
return demo_fp
##### For registration demo. #####
fp = DataManager.get_sorted_filenames_filename(stack='DEMO999')
rel_fp = relative_to_local(fp, local_root=DATA_ROOTDIR)
download_to_demo(rel_fp)
fp = DataManager.get_anchor_filename_filename(stack='DEMO999')
rel_fp = relative_to_local(fp, local_root=DATA_ROOTDIR)
anchor_fp_demo = download_to_demo(rel_fp)
anchor_fn = DataManager.load_data(anchor_fp_demo, filetype='anchor')
fp = DataManager.get_section_limits_filename_v2(stack='DEMO999', anchor_fn=anchor_fn)
rel_fp = relative_to_local(fp, local_root=DATA_ROOTDIR)
download_to_demo(rel_fp)
fp = DataManager.get_cropbox_filename_v2(stack='DEMO999', prep_id=2, anchor_fn=anchor_fn)
rel_fp = relative_to_local(fp, local_root=DATA_ROOTDIR)
download_to_demo(rel_fp)
download_to_demo(os.path.join('CSHL_simple_global_registration', 'DEMO999_T_atlas_wrt_canonicalAtlasSpace_subject_wrt_wholebrain_atlasResol.bp'))
# Download subject detection maps
for name_s in ['3N_R', '4N_R', '12N']:
fp = DataManager.get_score_volume_filepath_v3(stack_spec={'name':'DEMO999', 'detector_id':799, 'resolution':'10.0um', 'vol_type':'score'}, structure=name_s)
rel_fp = relative_to_local(fp, local_root=ROOT_DIR)
download_to_demo(rel_fp)
fp = DataManager.get_score_volume_origin_filepath_v3(stack_spec={'name':'DEMO999', 'detector_id':799, 'resolution':'10.0um', 'vol_type':'score'}, structure=name_s, wrt='wholebrain')
rel_fp = relative_to_local(fp, local_root=ROOT_DIR)
download_to_demo(rel_fp)
# Download atlas
for name_s in ['3N_R', '4N_R', '3N_R_surround_200um', '4N_R_surround_200um','12N', '12N_surround_200um']:
fp = DataManager.get_score_volume_filepath_v3(stack_spec={'name':'atlasV7', 'resolution':'10.0um', 'vol_type':'score'}, structure=name_s)
rel_fp = relative_to_local(fp, local_root=ROOT_DIR)
download_to_demo(rel_fp)
fp = DataManager.get_score_volume_origin_filepath_v3(stack_spec={'name':'atlasV7', 'resolution':'10.0um', 'vol_type':'score'}, structure=name_s, wrt='canonicalAtlasSpace')
rel_fp = relative_to_local(fp, local_root=ROOT_DIR)
download_to_demo(rel_fp)
##### For visualization demo. #####
# Download images
for sec in range(221, 238):
fp = DataManager.get_image_filepath_v2(stack='DEMO999', prep_id=2, resol='raw', version='NtbNormalizedAdaptiveInvertedGammaJpeg', section=sec)
rel_fp = relative_to_local(fp, local_root=DATA_ROOTDIR)
download_to_demo(rel_fp)
fp = DataManager.get_original_volume_filepath_v2(stack_spec={'name':'DEMO999', 'resolution':'10.0um', 'vol_type':'intensity', 'prep_id':'wholebrainWithMargin'}, structure=None)
rel_fp = relative_to_local(fp, local_root=ROOT_DIR)
download_to_demo(rel_fp)
fp = DataManager.get_original_volume_origin_filepath_v3(stack_spec={'name':'DEMO999', 'resolution':'10.0um', 'vol_type':'intensity', 'prep_id':'wholebrainWithMargin'}, structure=None)
rel_fp = relative_to_local(fp, local_root=ROOT_DIR)
download_to_demo(rel_fp)
download_to_demo(os.path.join('CSHL_simple_global_registration', 'DEMO999_registered_atlas_structures_wrt_wholebrainXYcropped_xysecTwoCorners.json'))
| [
"cyc3700@gmail.com"
] | cyc3700@gmail.com |
7977bb1ce1d9a579b1e649b597c8cf721be29e0f | d61f93228649430cf38dd6b9cb0c66e870a10a9d | /scripts/x86_example.2.py | 1a02b428954b9f038027a5c8d3dcc1cc0ee77f2b | [] | no_license | kr1tzy/unicorn_scripts | adf22da64d995aa37f75fbfd0062db1ac2b5df7e | 509504e92cae4cdc195d3af02c9a42de52f4764f | refs/heads/master | 2023-03-17T20:44:00.270903 | 2021-03-16T04:57:39 | 2021-03-16T04:57:39 | 210,704,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | """
Code + Stack
rax starts with 0x1 (set on line 41)
rbx starts with 0x2 (set on line 42)
rcx starts with 0x3 (set on line 43)
```
push rcx [push rcx onto the stack]
pop rax rax: 0x3
add rax, rbx rax: 0x5
```
"""
from unicorn import *
from unicorn.x86_const import *
CODE = b"\x51\x58\x48\x01\xD8"
BASE = 0x004000000
STACK_ADDR = 0x00200000
STACK_SIZE = 1024*1024
try:
print("-" * 32)
print("Emulating x86_64")
print(" - yes stack")
print(" - no data")
mu = Uc(UC_ARCH_X86, UC_MODE_64)
# Map out memory
mu.mem_map(BASE, 1024*1024)
mu.mem_map(STACK_ADDR, STACK_SIZE)
# Write the code to the base
mu.mem_write(BASE, CODE)
# Set the starting value of the stack pointer
mu.reg_write(UC_X86_REG_RSP, STACK_ADDR + STACK_SIZE)
# Set the registers
mu.reg_write(UC_X86_REG_RAX, 0x1)
mu.reg_write(UC_X86_REG_RBX, 0x2)
mu.reg_write(UC_X86_REG_RCX, 0x3)
# Emulate
mu.emu_start(BASE, BASE + len(CODE))
# Read values
r_rax = mu.reg_read(UC_X86_REG_RAX)
r_rbx = mu.reg_read(UC_X86_REG_RBX)
r_rcx = mu.reg_read(UC_X86_REG_RCX)
print("-" * 32)
print("Result")
print(f" - rax: {r_rax}")
print(f" - rbx: {r_rbx}")
print(f" - rcx: {r_rcx}")
except Exception as e:
print(f"err: {e}")
| [
"noahkritz@gmail.com"
] | noahkritz@gmail.com |
e5d11a77ce1c989514ae0215922f88d8f5fb9851 | 137b2969323f7fb20cf4b72bf24ff98339a410c8 | /tutorial/2/cal_pi_python.py | c9c4f802a962d65871cd0ef97a9fb7dd63ca74cd | [] | no_license | kangheeyong/STUDY-make-lda-lib | c91b6d9a5bff7b25a0c00b63e61f93fbd60d2a8e | 0f658032cf63922a0a5fc2fb5f34b8f22e97c94f | refs/heads/master | 2021-02-12T06:27:27.900312 | 2020-04-03T10:56:21 | 2020-04-03T10:56:21 | 244,568,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | # calc_pi.py
def recip_square(i):
return 1. / i ** 2
def approx_pi(n=10000000):
val = 0.
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
if __name__ == '__main__':
approx_pi()
| [
"cagojeiger@naver.com"
] | cagojeiger@naver.com |
36c3cc1e845f99a9dc4bf7fe62ea02ee0001f7ec | cf4b4d6130a535093ce16a6fb55f98ba270e6b68 | /utils/helper.py | 5d09ffdff05ad53ec133d06b5036365caeeb09ef | [] | no_license | RafaelMaldivas/Mercadinho | 98786e611b6d7982b04137ba5aa0ae26a8bd3221 | 5dd88070043c5bae7e78681f577bf1026c614e61 | refs/heads/master | 2023-09-03T14:10:54.750014 | 2021-11-03T03:51:09 | 2021-11-03T03:51:09 | 424,084,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def formata_float_str_moeda(valor: float) -> str:
return f' R$ {valor:,.2f} '
def formata_qtd_gramas(valor: int) -> str:
return f'{valor} gr'
def formata_qtd_ml(valor: int) -> str:
return f'{valor} mL'
| [
"rafaelmaldivas@gmail.com"
] | rafaelmaldivas@gmail.com |
5ca60b7b3e609676fcc11e9e3be95cb89d25509d | 24e9c44e8c6ed93c1d977b32e1b5db125c3ef99f | /FinalProject/FinalProject/spiders/FinalProject.py | 542dee394d515e011ebb16e5a9a6269fa1571151 | [] | no_license | MariaMasood-1/IDCE302-01-S21_Final_Project | 0e55a9fbb71c4ae1051e86f52c1e40149453855b | f91221845b0d239e87280a7cfc4cf6452769490b | refs/heads/main | 2023-04-01T12:39:24.626173 | 2021-04-09T16:00:54 | 2021-04-09T16:00:54 | 355,973,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | # Created by: Maria Masood
# Date: 04/07/2021
# Description: built a spider to scrape the https://coronalenergy.com/solar-portfolio/?tag=Utility%20Scale to get
#the list of solar projects in United states
#importing libraries
import scrapy
from ..items import FinalprojectItem
class Final_Project(scrapy.Spider): #inheriting from already written class in spider
name = 'projects'
start_urls = [
'https://coronalenergy.com/solar-portfolio/?tag=Utility%20Scale'
]
# give a list of url that we want to scrape
# the class spider that we inherit expect two variable assignment that is name and url
def parse(self, response): #parse is called whenever the crawler(spider) has successfully crawled the url
items = FinalprojectItem()
content_information = response.css('div.col.col-1')
for data in content_information: #looping for all the title values
title = data.css('h4.textdiv_title::text').extract() #extracting title from the url using css selectors
items['title'] = title
yield items #the source code from the url will be held in response. response contains the source code of our website
| [
"mariamasood@Marias-MacBook-Air.local"
] | mariamasood@Marias-MacBook-Air.local |
879b16bbb01c016b6e984839fa4a918be7adb5cf | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Stack/largestRectangleinHistogram.py | 39ee39bff0455b2a950049c9ecd1446682f4e05b | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 641 | py | """
LeetCode Problem: 84. Largest Rectangle in Histogram
Link: https://leetcode.com/problems/largest-rectangle-in-histogram/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(N)
Space Complexity: O(N)
"""
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
heights.append(0)
stack = [-1]
ans = 0
for i in range(len(heights)):
while heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i - stack[-1] - 1
ans = max(ans, h * w)
stack.append(i)
return ans | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
d565798d45b6dc2a1ddcc9972a30cb2209295433 | 4ba7037a9102727c02f6ec967c1f2c745dd7056b | /crescente_decrescente.py | 075b9760726f425adf880a61b4a2ff83164b4e4a | [
"MIT"
] | permissive | caique-alencar/coursera-python | 546b7a331a179ce148780ee7ae71ff1634525b0b | 57e669f8b7b4b79a4cb10ebd968df47fd6f93877 | refs/heads/master | 2020-04-23T11:48:32.510949 | 2019-08-28T22:04:30 | 2019-08-28T22:04:30 | 171,148,645 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # Função que verifica se os números estão em ordem decrescente
def ordem_decrescente:
decrescente = True
anterior = int(input("Digite o primeiro número da sequência: "))
valor = 1
while valor != 0 and decrescente:
valor = int(input("Digite o próximo número da sequência: "))
if valor > anterior:
decrescente = False
anterior = valor
if decrescente:
print("A sequência está em ordem decrescente!")
else:
print("A sequência não está em ordem decrescente!")
# Função que verifica se os números estão em ordem crescente
def verifica_ordem_crescente(x, y, z):
if x < y < z:
return "Ordem crescente"
else:
return "Não está em ordem crescente"
| [
"caiquealeencar@gmail.com"
] | caiquealeencar@gmail.com |
796354a3a2fb83e7672227b9ac336b3519d3659f | 0c02c3a602c264e3203f4279fc3487abcf97d537 | /Helper/CNN/convolution.py | e5b1957f9c78f7db96d1fc5bc99c3f58ae92ac8d | [] | no_license | suhaneshivam/Image-classification | ff2a43b06647ad00f13c9524366cafea8703f301 | b81d6631141bb824b0a1a8fdd9ee07973a7f0c2c | refs/heads/main | 2023-04-24T17:10:02.638080 | 2021-05-08T18:15:52 | 2021-05-08T18:15:52 | 364,997,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import cv2
import numpy
def Conv2d(image ,kernal):
(kh ,kw) = kernal.shape[:2]
(ih ,iw) = image.shape[:2]
pad = (kh -1) //2
output = np.zeros((ih ,iw) ,dtype = "float32")
image = cv2.copyMakeBorder(image ,pad ,pad ,pad ,pad ,cv2.BORDER_REPLICATE)
for y in range(pad ,ih + pad):
for x in range(pad , iw + pad):
roi = image[y-pad:y+pad+1 ,x-pad:x+pad+1 ]
value = (roi * kernal).sum()
output[y-pad ,x-pad] = value
return output
| [
"suhaneshivam82@gmail.com"
] | suhaneshivam82@gmail.com |
7c0f136ffcd56de3056adde9a1237a0f37c7128a | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/storagesync/v20190601/cloud_endpoint.py | 5c78675b69b623e9ab26c75f703f023c90e7a642 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,839 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['CloudEndpoint']
class CloudEndpoint(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_file_share_name: Optional[pulumi.Input[str]] = None,
cloud_endpoint_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_resource_id: Optional[pulumi.Input[str]] = None,
storage_account_tenant_id: Optional[pulumi.Input[str]] = None,
storage_sync_service_name: Optional[pulumi.Input[str]] = None,
sync_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Cloud Endpoint object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] azure_file_share_name: Azure file share name
:param pulumi.Input[str] cloud_endpoint_name: Name of Cloud Endpoint object.
:param pulumi.Input[str] friendly_name: Friendly Name
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] storage_account_resource_id: Storage Account Resource Id
:param pulumi.Input[str] storage_account_tenant_id: Storage Account Tenant Id
:param pulumi.Input[str] storage_sync_service_name: Name of Storage Sync Service resource.
:param pulumi.Input[str] sync_group_name: Name of Sync Group resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_file_share_name'] = azure_file_share_name
if cloud_endpoint_name is None:
raise TypeError("Missing required property 'cloud_endpoint_name'")
__props__['cloud_endpoint_name'] = cloud_endpoint_name
__props__['friendly_name'] = friendly_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['storage_account_resource_id'] = storage_account_resource_id
__props__['storage_account_tenant_id'] = storage_account_tenant_id
if storage_sync_service_name is None:
raise TypeError("Missing required property 'storage_sync_service_name'")
__props__['storage_sync_service_name'] = storage_sync_service_name
if sync_group_name is None:
raise TypeError("Missing required property 'sync_group_name'")
__props__['sync_group_name'] = sync_group_name
__props__['backup_enabled'] = None
__props__['last_operation_name'] = None
__props__['last_workflow_id'] = None
__props__['name'] = None
__props__['partnership_id'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagesync/latest:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20170605preview:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180402:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180701:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20181001:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190201:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190301:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20191001:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200301:CloudEndpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CloudEndpoint, __self__).__init__(
'azure-nextgen:storagesync/v20190601:CloudEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CloudEndpoint':
"""
Get an existing CloudEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return CloudEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureFileShareName")
def azure_file_share_name(self) -> pulumi.Output[Optional[str]]:
"""
Azure file share name
"""
return pulumi.get(self, "azure_file_share_name")
@property
@pulumi.getter(name="backupEnabled")
def backup_enabled(self) -> pulumi.Output[str]:
"""
Backup Enabled
"""
return pulumi.get(self, "backup_enabled")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> pulumi.Output[Optional[str]]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> pulumi.Output[Optional[str]]:
"""
CloudEndpoint lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnershipId")
def partnership_id(self) -> pulumi.Output[Optional[str]]:
"""
Partnership Id
"""
return pulumi.get(self, "partnership_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
CloudEndpoint Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="storageAccountResourceId")
def storage_account_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
Storage Account Resource Id
"""
return pulumi.get(self, "storage_account_resource_id")
@property
@pulumi.getter(name="storageAccountTenantId")
def storage_account_tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
Storage Account Tenant Id
"""
return pulumi.get(self, "storage_account_tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
e5fc77321af5eda8618f5073fec8b70ecdefc885 | 3bc2bd8137a6d32925a38ea6929be60cb977b2a6 | /dags/fncore/tasks/neo4j_writer.py | 1730a85dae045804a6a3b16ae6f15a0501614e95 | [] | no_license | shermanelee92/hadoop-spark-hive | 0d076997a25206ec939a2cfe0975b1f1acada9b0 | f8b7e4c96fbdb19011c789f5ed3a8633eeefc042 | refs/heads/master | 2022-11-15T00:30:50.970724 | 2019-12-17T09:31:45 | 2019-12-17T09:31:45 | 212,046,752 | 1 | 1 | null | 2022-11-11T07:26:18 | 2019-10-01T08:26:57 | Python | UTF-8 | Python | false | false | 15,712 | py | # coding=utf-8
"""
This module defines the airflow function to insert the data into the neo4j
database.
For each node list, the graph specification is parsed to identify which fields
to hide (not to insert into the database), which fields that should be indexed
for searching (and is replicated as a `_label` field), and the corresponding
friendly names for the fields (as the data may contain non-readable names). The
primary key field is also replicated as `_node_id` field. `_node_id` and
`_label` together are used in searching for the suggestions for the front end.
A label `_searchable` for the nodes is created to enable searching across
different types of entities. Correspondingly, the `_canonical_id` field is
indexed and constrained to be unique with respect to the `_searchable` label.
Similarly, the `_node_id` and `_label` fields are also indexed to enable for
faster searching.
The nodes are keyed by the `_canonical_id` field, while the edges /
relationships are keyed by `_canonical_id_source`, `_canonical_id_target` and
the field specified by the graph specification (if any).
The database is imported into the HDFS at a location specified by the
environment variable ``PIPELINE_DATA_PATH`` and in a sub-directory
corresponding to the graph name. Within the sub-directory, it is further split
into the directories:
- `tables`: which contains the imported tables from the database
- `node_list`: which contains the tables corresponding to each node list as
specified in the graph specification
- `edge_list`: which contains the tables corresponding to each edge list as
specified in the graph specification
- `node_list_resolved`: which contains the tables of the node lists with
entries referring to the same entity being resolved to have the same
canonical id
- `edge_list_resolved`: which contains the tables of the edge lists with
entries referring to the same entity being resolved to have the same
canonical id
Each node / edge list is saved with an unique uuid that is specified in the
graph specification, and contains the fields as specified in the graph
specification.
Please refer to the documentation for more information regarding the graph
specifications
"""
# pylint: disable=import-error
# pylint: disable=no-name-in-module
import logging
import os
from pyspark.sql import SQLContext
from pyspark.sql.functions import upper
from fncore.utils.graph_specification import (get_fields_with_property,
get_friendly_name_mapping)
from fncore.utils.hdfs import get_hdfs_info
from fncore.utils.neo4j_conf import get_neo4j_context
from fncore.utils.neo4j_tools import (commit_graphdb, create_index,
create_uniqueness_constraint,
generate_merge_edge_statement,
generate_merge_node_statement,
get_indexes)
from fncore.utils.spark_tools import get_spark_context
# pylint: disable=too-many-arguments
def push_edges(conf, tags, keyname, rows, mapping, hiddenfields, noappendfields):
"""
Given a neo4j configurations, insert the edges specified into
the neo4j database
:param conf: dictionary containing the configuration parameters of the
connection to the neo4j database
:type conf: dict
:param tags: a string describing the type of the edges to be inserted
:type tags: str
:param keyname: a string describing the property to use as primary key for
the edge
:type keyname: str
:param rows: a spark dataframe row describing the edges to be inserted.
The row must contain `_canonical_id_source` and
`_canonical_id_target` which contains the canonical ids of the
source and target nodes respectively
:type rows: list(pyspark.sql.Row)
:param mapping: a dictionary mapping from the field names (key) to the
friendly names (value)
:type mapping: dict
:param hiddenfields: fields of the data that should not be inserted into
the database
:type hiddenfields: list
:param noappendfields: fields of the data that will be overwritten with new
values when the function encounters another entry of an entity that
already exist in the database
:type noappendfields: list
:return: Does not return anything
:rtype: None
"""
# pylint: disable=too-many-locals
# Converts the rows to a set of edges to be added into the Neo4j database
batchsize = conf.get('max_batchsize', 500)
maxretry = conf.get('max_retries', 5)
statements = []
with get_neo4j_context(conf['uri']) as neo_ctx:
count = batchsize
nodes_used = {}
for row in rows:
source_id = row['_canonical_id_source']
target_id = row['_canonical_id_target']
rowdict = row.asDict()
for key, value in rowdict.items():
rowdict[key] = unicode(value)
statement = generate_merge_edge_statement(rowdict,
hiddenfields,
noappendfields,
mapping,
keyname,
tags)
if source_id in nodes_used or target_id in nodes_used:
count = batchsize
nodes_used = {}
commit_graphdb(neo_ctx, statements, maxretry)
statements = []
statements.append((rowdict, statement))
nodes_used[source_id] = 1
nodes_used[target_id] = 1
count -= 1
if count <= 0:
count = batchsize
nodes_used = {}
commit_graphdb(neo_ctx, statements, maxretry)
statements = []
commit_graphdb(neo_ctx, statements, maxretry)
# pylint: disable=too-many-locals
def push_nodes(conf, tags, rows, mapping, hiddenfields, noappendfields):
"""
Given a neo4j configurations, insert the nodes specified into
the neo4j database
:param conf: dictionary containing the configuration parameters of
the connection to the neo4j database
:type conf: dict
:param tags: a list of strings describing the type of the nodes to be
inserted
:type tags: list
:param rows: a spark dataframe row describing the nodes to be inserted.
The row must contain `_canonical_id` which contains the canonical
ids of the nodes
:type rows: list(pyspark.sql.Row)
:param mapping: a dictionary mapping from the field names (key) to the
friendly names (value)
:type mapping: dict
:param hiddenfields: fields of the data that should not be inserted into the
database
:type hiddenfields: list
:param noappendfields: fields of the data that will be overwritten with new
values when the function encounters another entry of an entity that
already exist in the database
:type noappendfields: list
:return: Does not return anything
:rtype: None
"""
# Assumptions
# Configuration providing neo4j server address and
# authentication is provided, spark sql rows will be passed in
# Notes:
# Possible points of failure include
# - not able to get the context
# - wrong configuration
# - neo4j server down
# - host network down
# - authentication error
# - tags does not exist [CHECKED]
# - unable to start neo4j cypher transaction
# - constrained property value does not exist [CHECKED]
# - Node properties with spaces [CHECKED]
# - Node properties that are numbers
# - Empty node property name [CHECKED]
# - Empty node property value [CHECKED]
# - Empty tag value, or value is not a string [CHECKED]
# - Fail to commit transaction
# - nodes exist and are locked for modifications
batchsize = conf.get('max_batchsize', 500)
maxretry = conf.get('max_retries', 5)
statements = []
with get_neo4j_context(conf['uri']) as neo_ctx:
count = batchsize
for row in rows:
rowdict = row.asDict()
for key, value in rowdict.items():
rowdict[key] = unicode(value)
statement = generate_merge_node_statement(rowdict,
hiddenfields,
noappendfields,
mapping,
tags)
statements.append((rowdict, statement))
count -= 1
if count <= 0:
count = batchsize
commit_graphdb(neo_ctx, statements, maxretry)
statements = []
commit_graphdb(neo_ctx, statements, maxretry)
def write_neo4j_nodes(graph_specification, spark_config):
"""
Given the graph specification, spark and neo4j configurations, insert the
nodes in the data (specified in the graph specification) into the neo4j
database. This is used as an airflow task
:param graph_specification: graph specification in dictionary format
:type graph_specification: dict
:param spark_config: Spark config.
:type spark_config: fncore.utils.spark_tools.SparkConfFactory
:return: Does not return anything
:rtype: None
"""
# pylint: disable=too-many-locals
data_format, graph_data_path = get_hdfs_info(graph_specification)
# Use graph specification's neo4j connection
neo_config = {
'uri': graph_specification['graph_uri'],
'max_retries': 5,
'max_batchsize': 20000
}
with get_spark_context(spark_config.create()) as spark_ctx:
sql_context = SQLContext(spark_ctx)
# create and save node lists
node_list = graph_specification.get('node_lists')
count = 0
if node_list:
for node_kind in node_list:
count += 1
logging.info(
"%d/%d: %s",
count,
len(node_list),
node_kind['safe_name']
)
# Load in the node list with duplicates dropped
# and invalid entries
data = sql_context\
.read.format(data_format)\
.option('header', 'true')\
.option('inferschema', 'true')\
.load(os.path.join(graph_data_path['node_list_resolved'],
node_kind['safe_name']))\
.dropna(how='any', subset=['_canonical_id'])
# Get the friendly name mapping
mapping = get_friendly_name_mapping(node_kind)
# Get the hidden fields
hiddenfields = get_fields_with_property(
node_kind, prop='hidden')
# Get the labelled fields
labelledfields = get_fields_with_property(
node_kind, prop='use_as_label')
indexfields = ['_label' if k == 0 else '_label_' + str(k)
for k in range(len(labelledfields))]
labelledfields.append(
node_kind['index_column'].get('safe_name'))
indexfields.append('_node_id')
# Drop invalid data in the fields that need to be indexed
data = data.dropna(how='any', subset=labelledfields)
# Ignore node id and label fields
noappendfields = indexfields + [labelledfields[-1]]
# Update the data frame to have the labels
for oldfield, newfield in zip(labelledfields, indexfields):
data = data.withColumn(newfield, upper(data[oldfield]))
# Setup the node constraints and indices on the labels
tags = node_kind['tags'] + ['_searchable']
with get_neo4j_context(neo_config['uri']) as neo_ctx:
for tag in tags:
create_uniqueness_constraint(neo_ctx, tag, '_canonical_id')
already_indexed = get_indexes(neo_ctx, '_searchable')
for curindex in indexfields:
if curindex not in already_indexed:
create_index(neo_ctx, '_searchable', curindex)
data.foreachPartition(
lambda x, t=tags, m=mapping, h=hiddenfields, n=noappendfields:
push_nodes(neo_config, t, x, m, h, n)
)
def write_neo4j_edges(graph_specification, spark_config):
"""
Given the graph specification, spark and neo4j configurations, insert the
edges in the data (specified in the graph specification) into the neo4j
database. This is used as an airflow task
:param graph_specification: graph specification in dictionary format
:type graph_specification: dict
:param spark_config: Spark config.
:type spark_config: fncore.utils.spark_tools.SparkConfFactory
:return: Does not return anything
:rtype: None
"""
# pylint: disable=too-many-locals
data_format, graph_data_path = get_hdfs_info(graph_specification)
# Use graph specification's neo4j connection
neo_config = {
'uri': graph_specification['graph_uri'],
'max_retries': 5,
'max_batchsize': 20000
}
with get_spark_context(spark_config.create()) as spark_ctx:
sql_context = SQLContext(spark_ctx)
# create and save edge lists
edge_list = graph_specification.get('edge_lists')
count = 0
if edge_list:
for edge_kind in edge_list:
count += 1
logging.info("# " +
str(count) +
"/" +
str(len(edge_list)) +
": " +
edge_kind['safe_name'])
# Load in the edge list with duplicates dropped
data = sql_context\
.read.format(data_format)\
.load(os.path.join(graph_data_path['edge_list_resolved'],
edge_kind['safe_name']))
# Get the friendly name mapping
mapping = get_friendly_name_mapping(edge_kind)
# Get the hidden fields
hiddenfields = get_fields_with_property(
edge_kind, prop='hidden')
# Drops duplicates
keyname = None
keylist = ['_canonical_id_source', '_canonical_id_target']
if 'index_column' in edge_kind:
keyname = edge_kind['index_column']\
.get('safe_name', None)
if keyname:
keylist.append(keyname)
data = data.dropDuplicates(keylist)\
.dropna(how='any', subset=keylist)
data = data.repartition(1000)
logging.info("Count: " + str(data.count()))
# Insert the edges into the Neo4j database
tags = edge_kind['tags'] if 'tags' in edge_kind else 'related'
data.foreachPartition(
lambda x, t=tags, key=keyname, m=mapping, h=hiddenfields, n=keylist:
push_edges(neo_config, t, key, x, m, h, n)
)
| [
"shermaneleeqianhui@gmail.com"
] | shermaneleeqianhui@gmail.com |
d21803cc3e6025eb6eac8884a3604a3dfc0f2cfe | b0d7d91ccb7e388829abddb31b4aa04a2f9365cd | /archive-20200922/binary-search/first_bad_version2.py | 62556ed0f8ac0ef51c7ea5298bc5dd50a4ec3b3f | [] | no_license | clarkngo/python-projects | fe0e0aa02896debe82d1e9de84b1ae7d00932607 | 139a20063476f9847652b334a8495b7df1e80e27 | refs/heads/master | 2021-07-02T10:45:31.242041 | 2020-10-25T08:59:23 | 2020-10-25T08:59:23 | 188,570,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | # https://leetcode.com/problems/first-bad-version/submissions/
# You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
# Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
# You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
# Example:
# Given n = 5, and version = 4 is the first bad version.
# call isBadVersion(3) -> false
# call isBadVersion(5) -> true
# call isBadVersion(4) -> true
# Then 4 is the first bad version.
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
import bisect
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
self.__getitem__ = isBadVersion
return bisect.bisect_left(self, True, 1, n)
import unittest
a = Solution()
class Test(unittest.TestCase):
def test(self):
self.assertEqual(a.firstBadVersion()) | [
"clarkngo@gmail.com"
] | clarkngo@gmail.com |
21418667a8fe05896963725a6a68685b6845f11a | 468a20df682ba43484f1953797f343011f77d7c1 | /app.py | 31669bddb87e95ebaf2f6c422b3575cae13d671e | [
"MIT"
] | permissive | hchen13/capshelper | 447006d363420e87a1ca4389ab1388b496495cdd | aea799c07064369642c3db557e939634c6c5da70 | refs/heads/master | 2020-03-21T21:09:30.904144 | 2018-06-29T03:20:30 | 2018-06-29T03:20:30 | 139,049,101 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | import json
import os
from butler import Butler
from butler.visualize import *
from downloader import *
from settings import ROOT_DIR
butler = Butler()
downloader = Downloader()
main_coins = 'USDT BTC ETH'.split(" ")
def update_watchlist():
watchlist = []
for counter in main_coins:
coins = downloader.get_top_coins(counter, limit=20)
watchlist += coins
return list(set(watchlist))
def get_watchlist(from_cache=True):
cache_file = "watchlist.json"
cache_path = os.path.join(ROOT_DIR, cache_file)
if from_cache and os.path.exists(cache_path):
return json.load(open(cache_path, 'r'))
watchlist = update_watchlist()
json.dump(watchlist, open(cache_path, 'w'))
return watchlist
def collect(base, counter):
base = base.upper()
counter = counter.upper()
ts = butler.latest_timestamp(base, counter)
if ts is None:
ts = datetime(2017, 2, 1, 0, 0).timestamp()
data = downloader.get_candlesticks(base, counter, start=ts)
if len(data):
butler.save_candlesticks(data)
butler.update_indicators(base, counter)
def single_run():
watchlist = get_watchlist(from_cache=True)
for counter in main_coins:
for base in watchlist:
counter = counter.upper()
base = base.upper()
if base == counter:
continue
collect(base, counter)
def prepare_train_data(path):
train_end = datetime(2018, 6, 1, 23, 59).timestamp()
valid_end = datetime(2018, 6, 10, 23, 59).timestamp()
test_end = datetime.now().timestamp()
butler.generate_train_files(path, 'train', end=train_end)
butler.generate_train_files(path, 'valid', start=train_end + 1, end=valid_end)
butler.generate_train_files(path, 'test', start=valid_end + 1, end=test_end)
if __name__ == '__main__':
# get_watchlist(False)
# single_run()
prepare_train_data('data/')
| [
"gradschool.hchen13@gmail.com"
] | gradschool.hchen13@gmail.com |
63e2616cad769cc31ffedd1ff5bb10567e0943e6 | 33312d4112c506194476c8f74c6f861b0aa2c36d | /setup.py | a26143240b12b3d22a006500d2580f19cbe7d105 | [
"MIT"
] | permissive | armenchester/check_spell | 33b26cda5abf33b762729d351389828c79b2aef0 | 3c23e8978a820f8dfdbac70274c2c6e7effddf5f | refs/heads/master | 2020-05-03T05:48:08.883659 | 2019-03-29T18:39:11 | 2019-03-29T18:39:11 | 178,454,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import setuptools
setuptools.setup(
name='check_spell',
version='0.1',
description='Spell checker',
url='https://github.com/armenchester/check_spell',
author='Armen Khachatryan',
author_email='armen.khachatryan88@gmail.com',
license='MIT',
packages=['check_spell'],
include_package_data=True
)
| [
"armen.khachatryan@click2sure.co.za"
] | armen.khachatryan@click2sure.co.za |
d1a8bf25ea2b3f1f66c839d53aa183941896b335 | 8560321b3503881b1403d6afe4bd926a0e997765 | /Inheritance/ChineseChef.py | 36732249734f1d214467460608a466b4b80768cb | [] | no_license | marzan-666/Learn-Python-Giraffe-Academy | ea08fff94c5306c1cbc8b4d38a19bcc9c5d83a07 | dfe55342e5d5530056758fb6c71835b497e07c27 | refs/heads/main | 2023-01-13T16:18:31.154473 | 2020-10-20T07:32:24 | 2020-10-20T07:32:24 | 304,973,960 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | '''class ChineseChef:
def make_chicken(self):
print("The chef makes a chicken ")
def make_salad(self):
print("The chef makes a salad ")
def make_special_dish(self):
print("The chef makes am orange chicken ")
def make_fried_rice(self):
print("The chef makes fried rice ")'''
#--------------------------------------------Inheritance---------------------------------------------------
from Chef import Chef
class ChineseChef(Chef):
def make_fried_rice(self):
print("The chef makes fried rice ")
def make_special_dish(self):
print("The chef makes am orange chicken ") | [
"noreply@github.com"
] | marzan-666.noreply@github.com |
edd291d16f305f9817ef9f141bdbff8329a968ab | 3ccf564663ca3449b3f9969259e11af9cc822ee6 | /bin/deprecated/auto_submit.py | 4682c9d8bb3aaef063dbfdc46354578ea2480ae8 | [
"NCSA"
] | permissive | DarkEnergySurvey/opstoolkit | bf1e08456e197f9742e03b98fcaae1a2ffd4490c | a180dba181fcddf38134a120f9a7dc26c7bb27e9 | refs/heads/master | 2023-04-20T07:59:30.600730 | 2021-05-06T20:35:33 | 2021-05-06T20:35:33 | 279,697,107 | 0 | 0 | NCSA | 2021-05-06T20:35:34 | 2020-07-14T21:30:55 | Python | UTF-8 | Python | false | false | 1,395 | py | #! /usr/bin/env python
from sys import exit
from argparse import ArgumentParser
from despydb import DesDbi
from opstoolkit import common
# Stop current cron if still submitting previous jobs from previous cron
common.stop_if_already_running()
# Create command line arguments
parser = ArgumentParser(description=__doc__)
parser.add_argument('--section','-s',required=True,help = "e.g., db-desoper or db-destest")
parser.add_argument('--pipeline','-p',required=True,help = "e.g.,firstcut,supercal,precal,preskytmp,finalcut,coadd")
parser.add_argument('--kwargs','-k',required=False,help = "list of key/value pairs within string,e.g.,kwarg1=value1,kwarg2=value2")
parser.add_argument('--show_kwargs',required=False,action='store_true',help = "Displays available kwargs for pipeline.")
args = parser.parse_args()
# Dynamically load pipeline module
load_pipeline = common.load_pipeline(args.pipeline,args.section)
# If kwargs specified, register them with class
if args.kwargs:
kwargs_dict = {}
for item in args.kwargs.split(','):
k,v = item.split('=')
kwargs_dict[k]=v
for key,val in kwargs_dict.iteritems():
load_pipeline.__dict__[key] = val
# Print available kwargs
if args.show_kwargs:
print "Available kwargs..."
print load_pipeline.show_kwargs()
exit()
# Execute run function of pipeline class
run_pipeline = load_pipeline.run()
| [
"gruendl@illinois.edu"
] | gruendl@illinois.edu |
565eee867d5f4d638d1bf7d170afbac787e9b075 | 072ce9b6df838155351ebda46fa4d0640d1538fb | /instagram_scraper.py | 8da917f6c66587a228df5bc44a44955cce9f3e8d | [] | no_license | andrebto/scrapper | fc36af9f63f83fe6bed80c9a9f21484e1ae62f4d | 74420de75c8093fd1cca23ed20e1a7238baa6bb4 | refs/heads/master | 2020-07-30T02:49:10.834175 | 2019-09-22T03:49:02 | 2019-09-22T03:49:02 | 210,061,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,775 | py | import os
import time
import platform
import math
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from constants import *
class Instagram_scrapper():
def __init__(self):
self.add_executable_to_path()
self.driver = None
def add_executable_to_path(self): # To avoid install browsers drivers
# Add exec in SO path
if not os.getcwd() in os.get_exec_path():
os.environ["PATH"] = os.environ["PATH"] + ";" if platform.system() == "Windows" else os.environ["PATH"] + ":"
def wait_for_load(self, by=By.ID, name='react-root', timeout=10):
element_present = EC.presence_of_element_located((by, name))
WebDriverWait(self.driver, timeout).until(element_present)
def scrapping(self, query, limit=50):
self.driver = webdriver.Chrome()
self.query_tag(query, limit)
def query_tag(self, query, limit, timeout=4):
self.driver.get(SEARCH_TAG_URL.format(query['value']))
div_with_imgs = None
# Wait for the page to load completely.
try:
self.wait_for_load(By.ID, 'react-root')
div_with_imgs = self.driver.find_elements_by_class_name("_bz0w")
except TimeoutException:
try:
self.driver.find_element_by_css_selector(".p-error.dialog-404")
except:
print("Some error occurred while loading the page")
actions = ActionChains(self.driver)
actions.send_keys(Keys.SPACE).perform()
actions.send_keys(Keys.SPACE).perform()
actions.send_keys(Keys.SPACE).perform()
time.sleep(5)
num_scrolls = math.ceil((limit - len(div_with_imgs))/ IMGS_PER_SCROLL)
scroll = 0
last_height = self.driver.execute_script("return document.body.scrollHeight")
while True:
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2.5)
new_height = self.driver.execute_script("return document.body.scrollHeight")
if new_height == last_height or num_scrolls == scroll: # If new_height == last_height, there is not more images to show
break
last_height = new_height
scroll = scroll + 1
div_with_imgs = self.driver.find_elements_by_class_name("_bz0w")
query = {
"value":"disney"
}
instagram = Instagram_scrapper()
instagram.scrapping(query)
| [
"andresouzabrito@MacBook-Air-de-Andre.local"
] | andresouzabrito@MacBook-Air-de-Andre.local |
f7eef4e3e9515bade5f8e15383707084ab7ec8db | e0e2340254885d5f79c10d077d2d8f608ca313f1 | /gym/migrations/0047_auto_20190422_1116.py | 8ac71e0103b85ca1850f3dfbb262bec75e2f352f | [] | no_license | i32agval/GymLuis | 87a8f278d137872cce009551032e6db0e4cec83f | 5f4ae324f70a01e53a951330b0a3350a774f5b36 | refs/heads/master | 2023-09-01T13:33:42.921528 | 2020-03-16T12:18:56 | 2020-03-16T12:18:56 | 182,527,515 | 0 | 0 | null | 2023-08-28T17:19:30 | 2019-04-21T12:01:44 | JavaScript | UTF-8 | Python | false | false | 2,413 | py | # Generated by Django 2.2 on 2019-04-22 11:16
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('gym', '0046_auto_20190411_1003'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='date',
field=models.DateField(default=datetime.datetime(2019, 4, 22, 11, 16, 42, 247769, tzinfo=utc)),
),
migrations.AlterField(
model_name='exercise',
name='weight',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='biceps',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='chest',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='date',
field=models.DateField(default=datetime.datetime(2019, 4, 22, 11, 16, 42, 246636, tzinfo=utc)),
),
migrations.AlterField(
model_name='userimages',
name='gastrocnemius',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='muscle_fat',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='muscle_mass',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='quadricep',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='waist',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='userimages',
name='weight',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='weightdata',
name='date',
field=models.DateField(default=datetime.datetime(2019, 4, 22, 11, 16, 42, 246193, tzinfo=utc)),
),
migrations.AlterField(
model_name='weightdata',
name='weight',
field=models.IntegerField(max_length=10),
),
]
| [
"i32agval@gmail.com"
] | i32agval@gmail.com |
edfd582480b72d10d09c194a3b7b3e81e62b6e35 | 14b5679d88afa782dc5d6b35878ab043089a060a | /students/Siyang Liu/voicecontrol_changechoth/voice.py | 7236b542f70ca123741e24926ba00368fad76e9a | [] | no_license | mutiangua/EIS2020 | c541ef32623f67f9277945cd39cff3c02f06e4dd | 92aa2711b763a2c93be238825c445bf2db8da391 | refs/heads/master | 2022-11-18T05:21:47.567342 | 2020-07-11T10:11:21 | 2020-07-11T10:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | import wave
from aip import AipSpeech
from xpinyin import Pinyin
import pyaudio
import os
import cv2
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 8000
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "audio.wav"
APP_ID = '19165946'
API_KEY = 'D7BqfDPOj9ORbG85FL5jOQjh'
SECRET_KEY = 'skL4Mag0dGquseo08RaVsDgM1ABMxGN7'
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
STATE = 0
TIME_START = 0
TIME_END = 0
num = 0
x = 0
y = 0
w = 0
h = 0
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
pd = cv2.imread('white.jpg')
pd1 = cv2.imread('yellow.jpg')
pd2 = cv2.imread('black.jpg')
img = cv2.imread('freedom.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
pass
head = img[y:(y + h), x:(x + w)]
head = cv2.resize(head, (130, 130), interpolation=cv2.INTER_CUBIC)
cv2.namedWindow('result')
def readFile(fileName):
with open(fileName, 'rb') as fp:
return fp.read()
def writeFile(fileName, result):
with open(fileName, 'wb') as fp:
fp.write(result)
def getBaiduText():
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
stream.start_stream()
print("* 开始录音......")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
print("* 正在识别......")
result = client.asr(readFile('audio.wav'), 'wav', 16000, {
'dev_pid': 1537,
})
if result["err_no"] == 0:
for t in result["result"]:
return t
else:
print("没有识别到语音\n")
return ""
def getBaiduVoice(text):
result = client.synthesis(text, 'zh', 6, {'vol': 5, 'per': 4, 'spd': 5})
if not isinstance(result, dict):
writeFile("back.mp3", result)
os.system("back.mp3")
def getPinYin(result):
pin = Pinyin()
return pin.get_pinyin(result)
def pic(pd4):
cv2.destroyWindow('result')
pd4[185:315, 315:445] = head[0:130, 0:130]
cv2.imshow('result', pd4)
def wakeUp(pinyin):
if getPinYin("白色衣服") in pinyin:
print("好的,白色衣服")
pic(pd)
elif getPinYin("黄色衣服") in pinyin:
print("好的,黄色衣服")
pic(pd1)
elif getPinYin("黑色衣服") in pinyin:
print("好的,黑色衣服")
pic(pd2)
def main():
pic(pd)
if cv2.waitKey(10) & 0xFF == 'q':
return
while True:
result = getBaiduText()
pinyin = getPinYin(result)
print("等待唤醒")
print(result)
wakeUp(pinyin)
if cv2.waitKey(10) & 0xFF == 'q':
break
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
os.system("back.mp3")
os.system("audio.wav")
os.system("rmdir /s/q __pycache__")
| [
"noreply@github.com"
] | mutiangua.noreply@github.com |
bc68b48bb109e91bc5ff876765120a0c7e48a010 | fdfc64c4cab8b8e48f0912d280f9278cd0c6b281 | /extolfirstblog/settings.py | 318e364db4f3226fc8250f341e928bed13a8cd3d | [] | no_license | Extollian/Extol1stproject | 789a7ee9a56721e908ebda21e0a25acd360ecde1 | 0206302955178956f11ef8f8e52094a69e368861 | refs/heads/master | 2021-01-20T00:05:00.047925 | 2017-04-22T14:18:15 | 2017-04-22T14:18:15 | 89,074,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | """
Django settings for extolfirstblog project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u0!zp=by4_9p$nq_fb5zew-8cax1c&moc_c!y@7w-4hl9y&)is'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'extolfirstblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'extolfirstblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"OLUWALONIMI EXTOL"
] | OLUWALONIMI EXTOL |
f424ec4d5925055402ce8f2cc7626e7474386291 | b28eaf11c4f3a97703d471c895fc1ee399e5d0d6 | /produc/views.py | 9eb3a9f61f63e7d4383a1ac27c08e9603b606837 | [] | no_license | AleksandrBagdasarov/test | e6c255cc82bb7b5290d1e08752a51cff6c2c7911 | 4942c3d8d347afa40ad1b28fd06b400b4dc62c97 | refs/heads/master | 2022-07-19T19:25:36.629495 | 2020-05-24T10:13:40 | 2020-05-24T10:13:40 | 264,422,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from .models import Choice, Question
from django.urls import reverse
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list' : latest_question_list,
}
return render(request, 'produc/index.html',context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'produc/detail.html', {'question':question})
def result(request, question_id):
response = 'You are looking at the results of questions %s'
return HttpResponse(response % question_id)
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
select_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'produc/detail.html', {
'question' : question,
'error_message' : 'You did not select a choice',
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('produc:results', args=(question_id,))) | [
"sashabagdasarov@gmail.com"
] | sashabagdasarov@gmail.com |
f702741ca387da78893353be313425648e5c6570 | e3a467cc02d787746298babd760503bb5f1be288 | /spistograms/functions/timeFractions_f.py | ea3ca1f92ded3f287faec4a97d8eba1866298140 | [
"MIT"
] | permissive | benholtzman/specufex_post | b623d6aa9d8764baaef89dd5e6485b4a344ea7b4 | 3af4b93832c27e565c915f069502b45af4a1b6b7 | refs/heads/main | 2023-06-20T11:44:01.649541 | 2021-07-24T19:42:19 | 2021-07-24T19:42:19 | 385,685,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py | import pandas as pd
import numpy as np
# import sys
# import scipy.io as sio
from datetime import date as dt # replace the obspy version of UTCDateTime with this.
#from obspy import UTCDateTime
# a timestamp is defined (POSIX time) as the number of seconds elapsed since 1st of January 1970
# take in a CATALOG with clustering data,
# add TIME to it-- in year frac
def calc_YearFloat_HourFloat(cat):
n_evts = len(cat)
hourFrac_vec = []
yearFrac_vec = []
for ind in range(n_evts):
dtobj = cat.DateObj.iloc[ind]
###print(dtobj)
tt = dtobj.timetuple()
#print(tt, type(tt))
dayofyear = tt.tm_yday
# print(dayofyear)
year = dtobj.year
hour = dtobj.hour
minu = dtobj.minute
sec = dtobj.second
microsec = dtobj.microsecond
# print(hour,minu,sec,microsec)
dayDec = (hour*60*60 + minu*60 + sec + microsec/1e6)/(24*60*60)
# calculate in seconds/seconds_in_day
hourDec = (minu*60 + sec + microsec/1e6 )/ (60*60) # seconds/seconds_in_hour
yearDec = (dayofyear+dayDec)/365.25
#delta = dayofyear/365.25 - yearFrac
hourFrac = hour+hourDec
yearFrac = year+yearDec
hourFrac_vec.append(hourFrac)
yearFrac_vec.append(yearFrac)
# print(yearFrac)
# print(delta)
# print(dayFrac)
# print(yearFloat)
# print(hourFloat)
return np.asarray(yearFrac_vec),np.asarray(hourFrac_vec)
# def calc_YearFrac_UTCts(cat):
# n_evts = len(cat)
# UTCtimestamp = np.zeros([n_evts])
# YearFrac = np.zeros([n_evts])
# for ind in np.arange(n_evts):
# yr=int(cat.year.iloc[ind]); mo=int(cat.month.iloc[ind]); dy=int(cat.day.iloc[ind])
# hr=int(cat.hour.iloc[ind]); mn=int(cat.mn.iloc[ind]); sc=int(cat.sec.iloc[ind])
# # split sec at '.' to get microsec.. later
# stamp = int(float(UTCDateTime(year=yr, month=mo, day=dy, hour=hr, minute=mn, second=sc)))
# UTCtimestamp[ind] = stamp
# yearfrac = yr + (float(UTCDateTime(year=yr, month=mo,day=dy,hour=hr,minute=mn, second=sc).julday/365.25))
# YearFrac[ind] = yearfrac
# return YearFrac, UTCtimestamp
# print('----------------')
# print(TimeStamp[1])
# print(type(TimeStamp[1]))
# print(len(TimeStamp))
# print(TimeStamp[0],TimeStamp[-1])
| [
"holtzman.ben@gmail.com"
] | holtzman.ben@gmail.com |
a32c453d00bce6981260df8196a4d274f334471c | 64cbac4b5381d9692da08ff42d958d1149afa47d | /items/good/christ.py | b1c37207827cae78a9d904f17bb4d3f3fd37270b | [] | no_license | kolobok001/Uirs | 3c4c78c255849a66da88a35263f7d18d1bf31c0c | b9ab1e672f3f60c62cef3d6879f3d8ceefa091b2 | refs/heads/master | 2020-04-10T05:50:56.215016 | 2019-06-19T16:39:41 | 2019-06-19T16:39:41 | 160,838,488 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from constants import *
name = 'Крестик'
description = (
'Нательный крестик, который я нашел в одной из этих дверей. '
'Не знаю, кто его носил, но он весь пропах вином.'
)
tags = [ 'wine' ]
price = 150
def on_pray(user, reply, god):
if god == JESUS_NUM:
user.gods_level[JESUS_NUM] += 1
| [
"noreply@github.com"
] | kolobok001.noreply@github.com |
5620b036428d4f45b653a56db980c5b73e788828 | c507f9e311ef811be0d8c2a29d54357c5e607cb4 | /tests/test_wildcharMatcher.py | 35a6c85b8ff282458c99e6eb0f538b57fbfd01ef | [] | no_license | arturstaszczyk/SimpleRegEx | 2040655474ac752bead9e68e2ade8a2411ed1629 | fc383936e2b1e5ac60b00f067390f61cd5173aeb | refs/heads/master | 2021-01-10T13:55:33.498359 | 2015-12-23T13:06:44 | 2015-12-23T13:06:44 | 48,392,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,970 | py | from unittest import TestCase
from matchers.wildcharmatcher import WildcharMatcher
from evaluator import Evaluator
from regexexceptions import BadCondition, BadMatcher, NoModifier
TOKEN = Evaluator.EVAL_TOKEN
MOD = Evaluator.EVAL_MODIFIER
COND = Evaluator.EVAL_CONDITION
WILD = Evaluator.EVAL_WILDCHAR
class TestWildcharMatcher(TestCase):
# bad data
def test_no_wildchar(self):
matcher = WildcharMatcher({TOKEN: '.a', COND: Evaluator.EVAL_CONDITION_ANY, MOD: '*', WILD: False})
self.assertRaises(BadMatcher, matcher.get_match_length, "text")
def test_bad_wildcahr_condition(self):
matcher = WildcharMatcher({TOKEN: '.a', COND: Evaluator.EVAL_CONDITION_ANY, MOD: '*', WILD: True})
self.assertRaises(BadCondition, matcher.get_match_length, "text")
def test_no_modifier(self):
matcher = WildcharMatcher({TOKEN: '.a', COND: Evaluator.EVAL_CONDITION_MATCH, MOD: None, WILD: True})
self.assertRaises(NoModifier, matcher.get_match_length, "text")
# condition = None
def test_wildchar(self):
matcher = WildcharMatcher({TOKEN: '.', COND: None, MOD: None, WILD: True})
output = matcher.get_match_length("text")
self.assertEquals(output, {"length": 1})
def test_wildchar_multiply(self):
matcher = WildcharMatcher({TOKEN: '.', COND: None, MOD: '*', WILD: True})
output = matcher.get_match_length("text")
self.assertEquals(output, {"length": 4})
def test_wildchar_one_or_more(self):
matcher = WildcharMatcher({TOKEN: '.', COND: None, MOD: '+', WILD: True})
output = matcher.get_match_length("text")
self.assertEquals(output, {"length": 4})
def test_wildchar_one_or_zero(self):
matcher = WildcharMatcher({TOKEN: '.', COND: None, MOD: '?', WILD: True})
output = matcher.get_match_length("text")
self.assertEquals(output, {"length": 1})
# condition = Matxh
def test_wildchar_match_multiply(self):
matcher = WildcharMatcher({TOKEN: '.a', COND: Evaluator.EVAL_CONDITION_MATCH, MOD: '*', WILD: True})
output = matcher.get_match_length("taken")
self.assertEquals(output, {"length": 2})
def test_wildchar_match_more_than_one(self):
matcher = WildcharMatcher({TOKEN: '.a', COND: Evaluator.EVAL_CONDITION_MATCH, MOD: '+', WILD: True})
output = matcher.get_match_length("tabaco")
self.assertEquals(output, {"length": 4})
def test_wildchar_match_one_or_zero(self):
matcher = WildcharMatcher({TOKEN: '.a', COND: Evaluator.EVAL_CONDITION_MATCH, MOD: '?', WILD: True})
output = matcher.get_match_length("tabaco")
self.assertEquals(output, {"length": 2})
#special cases
def test_special_case1(self):
matcher = WildcharMatcher({TOKEN: 'a.', COND: Evaluator.EVAL_CONDITION_MATCH, MOD: '+', WILD: True})
output = matcher.get_match_length("alabama")
self.assertEquals(output, {"length": 6}) | [
"astaszczyk@ganymede.eu"
] | astaszczyk@ganymede.eu |
e3f642375f744b339a22ae151e053401dd729665 | 2e39f0bdfec5f05c866309eb9f3afd30dec43cf2 | /afewords/www/afutils/security.py | 26596e68d82a5bacf56c77a1e20efad018c37a45 | [] | no_license | Earthson/afewords_base | 8955fd5b3e961f684d162e62254bc579d83cbe91 | 70ae766e98fb9ef0ce993593872a17baba21fac9 | HEAD | 2016-08-03T18:39:04.585819 | 2013-04-22T14:42:27 | 2013-04-22T14:42:27 | 6,057,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,382 | py | #coding=utf-8
import re
import hashlib
import string
import Image, ImageDraw, ImageFont, random
from tornado.escape import url_escape
from tornado.escape import xhtml_escape
def encrypt(pwd):
''' first use MD5, the use SHA1, result is 40 byte '''
result = hashlib.md5(pwd).hexdigest()
result = hashlib.sha1(result).hexdigest()
return result
def random_string(num):
''' num is the nums of random string '''
salt = ''.join(random.sample(string.ascii_letters + string.digits, num))
return salt
def is_email(email):
from afutils.mail_utils import validate_email
return validate_email(email)
def arg_escape(value):
if isinstance(value, basestring):
return xhtml_escape(value)
return value
def user_login(email, pwd):
'''return user obj if successfully login'''
from user import User
email = email.lower()
data = User.datatype.find_one({'email':email})
if not data:
return None, 1, 'User not exist'
usr = User(data)
pwd = encrypt(pwd)
if usr.password != pwd:
return None, 2, 'password error'
return usr, 0, ':)'
def create_vertify_code():
import StringIO
background = (random.randrange(230,255),random.randrange(230,255),random.randrange(230,255))
line_color = (random.randrange(0,255),random.randrange(0,255),random.randrange(0,255))
img_width = 90
img_height = 30
font_color = ['black','darkblue','darkred','red','blue','green']
font_size = 18
font = ImageFont.truetype(r'FreeSans.ttf',font_size)
#font = ImageFont(font_size)
#font = ImageFont.truetype("arial.ttf", 15)
#request.session['verify'] = ''
#新建画布
im = Image.new('RGB',(img_width,img_height),background)
draw = ImageDraw.Draw(im)
code = random_string(6)
#新建画笔
draw = ImageDraw.Draw(im)
for i in range(random.randrange(7,9)):
xy = (random.randrange(0,img_width),random.randrange(0,img_height),random.randrange(0,img_width),random.randrange(0,img_height))
draw.line(xy,fill=line_color,width=1)
#写入验证码文字
x = 4
for i in code:
y = random.randrange(0,10)
draw.text((x,y), i,font=font, fill=random.choice(font_color))
x += 14
del x
del draw
buf = StringIO.StringIO()
im.save(buf,'gif')
buf.closed
return [buf.getvalue(),"".join(code)]
| [
"Earthson.Lu@gmail.com"
] | Earthson.Lu@gmail.com |
8e47cc4ed2580732246bd353878e69eab4730874 | 2273a2090aa058a121a7e27977d76feba6a76a6a | /lstm/dataset.py | 8c86163f3f94e68fa1665971e34c4f7dabd2b701 | [] | no_license | f-grimaldi/language_modelling | a80c0170e0f51e1d85a24642496f4679f6f5ab7a | 57cd34083edd3ffc57db63c02fb94eb6a1104f84 | refs/heads/master | 2022-08-06T18:38:52.098710 | 2020-05-26T11:44:05 | 2020-05-26T11:44:05 | 267,021,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,958 | py | import re
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from functools import reduce
from torchvision import transforms
from torch import optim, nn
# -*- coding: utf-8 -*-
class LOTRDataset(Dataset):
def __init__(self, text, emb, transform=None):
# Extract the sonnets (divided by empty lines and roman numerals)
sentences = re.split('[.]', text)
sentences = [i for i in sentences if len(i.split()) > 16]
chapter_list = sentences
### Char to number
char_to_number = {key: value for key, value in zip(emb.index, emb.values)}
### Store data
self.corpus = text
self.chapter_list = chapter_list
self.transform = transform
self.emb = emb
self.char_to_number = char_to_number
#self.number_to_char = number_to_char
def __len__(self):
return len(self.chapter_list)
def __getitem__(self, idx):
# Get sonnet text
text = self.chapter_list[idx]
"""
if len(text.split()) < 9:
print(self.chapter_list[idx])
print(text)
"""
# Encode with numbers
encoded = encode_text(self.char_to_number, text, self.emb)
# Create sample
sample = {'text': text, 'encoded': encoded}
# Transform (if defined)
if self.transform:
sample = self.transform(sample)
return sample
def encode_text(char_to_number, text, emb):
encoded = [char_to_number[c] for c in re.findall(r"[\w']+|[.,!?;]", text) if c in emb.index]
return encoded
def decode_text(emb, encoded):
text = [emb.index[(emb == c).all(axis=1)][0] for c in encoded]
#text = [number_to_char[c] for c in encoded]
#text = reduce(lambda s1, s2: s1 + s2, text)
return text
class RandomCrop():
def __init__(self, crop_len):
self.crop_len = crop_len
def __call__(self, sample):
text = sample['text']
encoded = sample['encoded']
# Randomly choose an index
tot_words = len(text.split())
#start_words = np.random.randint(0, tot_words - self.crop_len)
start_words = 0
end_words = start_words + self.crop_len
new_text = ' '.join(text.split()[start_words:end_words])
#print(len(text.split()))
if len(new_text.split()) < self.crop_len:
print(len(new_text.split()))
print(new_text.split())
return {**sample,
'text': new_text,
'encoded': encoded[start_words: end_words]}
class OneHotEncoder():
def __init__(self, emb=None):
self.emb = emb
#self.alphabet_len = alphabet_len
def __call__(self, sample):
# Load encoded text with numbers
encoded = np.array(sample['encoded'])
# Create one hot matrix
#encoded_onehot = create_one_hot_matrix(encoded, self.alphabet_len)
encoded_onehot = encoded
return {**sample,
'encoded_onehot': encoded_onehot}
class ToTensor():
def __call__(self, sample):
# Convert one hot encoded text to pytorch tensor
encoded_onehot = torch.tensor(sample['encoded_onehot'])
return {'encoded_onehot': encoded_onehot}
def embedding_matrix(path, text, normalize = True):
emb = pd.read_csv(path, sep = ' ', quotechar=None, quoting=3, header=None)
emb.index = emb.iloc[:, 0]
emb.drop(columns=emb.columns[0], inplace=True)
corpus = set(word for word in text.split())
word_in_corpus = [i for i in emb.index if i in corpus]
emb = emb.loc[word_in_corpus, :]
emb = pd.DataFrame(np.round(emb.values, 4), index=emb.index)
if normalize:
emb = emb.apply(lambda x: x/np.linalg.norm(x), axis=1)
return emb
def clean_text(path):
with open(path, 'r') as file:
text = file.read()
text = text.lower()
text = text.replace('#', '')
text = text.replace('*', '')
text = text.replace('(', '')
text = text.replace(')', '')
text = text.replace('`', "'")
text = text.replace(')', '')
text = text.replace('–', ' ')
text = text.replace('-', ' ')
text = text.replace('—', ' ')
text = text.replace('»', '"')
text = text.replace('«', '"')
text = text.replace('_', ' ')
text = text.replace('’', "'")
text = text.replace('‘', "'")
text = text.replace('ó', 'o')
text = text.replace('{', '')
text = text.replace('}', '')
text = text.replace('µ', ' ')
text = text.replace('¤', '')
text = text.replace('¢', '')
text = text.replace('¢', '')
text = text.replace('®', '')
text = text.replace('¥', '')
text = text.replace('<br>', '')
text = text.replace('<h4>', '')
text = text.replace('</h4>', '')
text = text.replace('/', '')
text = text.replace('&', 'e')
text = text.replace('=', 'o')
text = text.replace('‚', ',')
return text
| [
"f.grimaldi.94@gmail.com"
] | f.grimaldi.94@gmail.com |
fd3bf6fb10856deeb0a7d4ebb4e2ea8acf3217fc | 72a30834234eee674a2e22e060dfb4457108e5f6 | /db_migrate.py | 9a04679fd0b8bc5d1827d66948f002cdb4aada9a | [] | no_license | wheresmyjetpack/krispr | 153bf29603695a5f842b7ddd4fdd2f2d977c6452 | 49d6136176730305d96bc8e6e2c64d82a81f8cd5 | refs/heads/master | 2016-09-06T19:26:41.981860 | 2015-07-27T17:49:18 | 2015-07-27T17:49:18 | 39,209,200 | 2 | 1 | null | 2015-07-17T00:25:14 | 2015-07-16T16:52:45 | Python | UTF-8 | Python | false | false | 858 | py | #!flask/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, 'wt').write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as %s' % migration
print 'Current database version: %s' % v
| [
"wheresmyjetpack03@gmail.com"
] | wheresmyjetpack03@gmail.com |
4173480083053b81034cb8d69ad2bde659ed3885 | df4d6fa5191ec7a211ea5859d5996518e4059bf8 | /excel/settings.py | 15750fd29b28588815e8cdba87acfca0f4d6e1d7 | [] | no_license | Sainadh086/Auto-ML- | 664f9e953fa042d7f687a2df6116148e2d83525b | 67606ede063b98787a05def0da6e7891f0af479e | refs/heads/master | 2022-11-24T10:13:15.245881 | 2020-06-09T02:14:13 | 2020-06-09T02:14:13 | 264,955,086 | 0 | 0 | null | 2020-06-09T02:15:00 | 2020-05-18T13:54:57 | Python | UTF-8 | Python | false | false | 3,576 | py | """
Django settings for excel project.
Generated by 'django-admin startproject' using Django 1.11.26.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b#4xj(3v2(=_#bn$u#55rd8%@e!s-%niusss8==4hv9v^!k_98'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [u'127.0.0.1',
'varshanapp.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app'
]
MIDDLEWARE = [
#'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'excel.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'excel.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER' : 'postgres',
'PASSWORD' : '1234',
'HOST' : 'localhost',
}
}
"""
import dj_database_url
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES = {
'default' :{}
}
DATABASES['default'].update(prod_db)
# """
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# Add configuration for static files storage using whitenoise
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| [
"varshan@pop-os.localdomain"
] | varshan@pop-os.localdomain |
81c71b9b6f028d9fb81d2e31bbe2c89201b54127 | 27a681a4ea9922fb2785046f50bb4f7e9ffc3478 | /checkout/admin.py | 003521233a0b3c69e7bdf05f0c67be6936d568ab | [] | no_license | Dgoodwin92/boutique_ado_v1 | ff7b7dc59da82bb336ded4a7db50f24cfde9f8b5 | 259bd164a33b5e065336dd84a9f552c59fc6c352 | refs/heads/main | 2023-08-18T03:21:42.026315 | 2021-09-18T21:04:19 | 2021-09-18T21:04:19 | 393,482,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | from django.contrib import admin
from .models import Order, OrderLineItem
class OrderLineItemAdminInline(admin.TabularInline):
model = OrderLineItem
readonly_fields = ('lineitem_total',)
class OrderAdmin(admin.ModelAdmin):
inlines = (OrderLineItemAdminInline,)
readonly_fields = ('order_number', 'date',
'delivery_cost', 'order_total',
'grand_total', 'original_bag',
'stripe_pid')
fields = ('order_number', 'user_profile', 'date', 'full_name',
'email', 'phone_number', 'country',
'postcode', 'town_or_city', 'street_address1',
'street_address2', 'county', 'delivery_cost',
'order_total', 'grand_total', 'original_bag',
'stripe_pid')
list_display = ('order_number', 'date', 'full_name',
'order_total', 'delivery_cost',
'grand_total',)
ordering = ('-date',)
admin.site.register(Order, OrderAdmin) | [
"danielgoodwin85@gmail.com"
] | danielgoodwin85@gmail.com |
753871694ec48ad0edc66732a60278684671fbf7 | 67159562155f448aef7fd06557c540d8c712f0fe | /buttom.py | 052154c29c7643e2162046bd2f7dca06da3961df | [
"BSD-3-Clause"
] | permissive | mizunashi-sh/flappy-bird-remastered | dcb86bd0b203082362c60c21fa9bb898f809ce78 | 9fa876b25ba98fb576e19ad1bebdebc43fde11fb | refs/heads/master | 2021-06-21T12:33:15.990257 | 2021-01-11T06:13:25 | 2021-01-11T06:13:25 | 163,739,859 | 2 | 0 | null | 2021-01-11T06:13:26 | 2019-01-01T13:46:15 | Python | UTF-8 | Python | false | false | 668 | py | import pygame
class Buttom():
"""表示按键的类"""
def __init__(self, init_settings, screen):
"""初始化"""
# 导入屏幕和默认设置
self.screen = screen
self.init_settings = init_settings
# 导入图片资源,获取rect
self.image = pygame.image.load('resources/sprites/button_play.png')
self.rect = self.image.get_rect()
# 设定位置
self.rect.x = 0.3 * self.init_settings.screen_width
self.rect.y = 0.5 * self.init_settings.screen_height
def blitme(self):
"""显示按键"""
self.screen.blit(self.image, self.rect)
| [
"noreply@github.com"
] | mizunashi-sh.noreply@github.com |
dc2dd5439cb5bc87b057d15c4313f5adde3c50df | cebf2e5276e6d064d0ec86beaf1129fe0d0fd582 | /days051-060/day059/capstone/blog.py | 5a570cc0b88eb25417e462a8cc83d88159821495 | [] | no_license | SheikhFahimFayasalSowrav/100days | 532a71c5c790bc28b9fd93c936126a082bc415f5 | 0af9f2f16044facc0ee6bce96ae5e1b5f88977bc | refs/heads/master | 2023-06-14T06:18:44.109685 | 2021-07-08T16:58:13 | 2021-07-08T16:58:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | import requests
from flask import Flask, render_template
app = Flask(__name__)
posts = requests.get('https://api.npoint.io/a6ff5a040e0baf25233b').json()
@app.route('/')
def home():
return render_template('index.html', posts=posts)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/post/<int:index>')
def post(index):
return render_template('post.html', post=posts[index])
if __name__ == '__main__':
app.run(debug=True)
| [
"pullynnhah@icloud.com"
] | pullynnhah@icloud.com |
2a0c9595b531f97a15f0c54fae714cd6d5289c02 | 2a45507fff25c42cad05b52d83d011fea0909be5 | /Algorithms/Dynamic Problems/Python/FlodyWarshall.py | 517051545da014551bfc3e4d0dffed6a2dec31db | [
"MIT"
] | permissive | Sharayu1071/Daily-Coding-DS-ALGO-Practice | dc8256e76d43952f679236df904f597908fbda13 | 2c424b33a1385085f97b98d6379d6cd9cc71b1bd | refs/heads/main | 2023-08-30T17:49:44.312613 | 2021-10-03T04:21:21 | 2021-10-03T04:21:21 | 412,973,714 | 3 | 0 | MIT | 2021-10-03T04:18:20 | 2021-10-03T04:18:19 | null | UTF-8 | Python | false | false | 1,410 | py | #Python3 mplementation of FloydWarshall Algorithm
#the below represents Number of Vertices in the main graph
v=4
"""Define infinity as the large
enough value. This value will be
used for verticles not connected to each other """
INF=99999
#the function solves all pair shortest path via
# FloydWarshall Algorithm
def FloydWarshall(graph):
"""
dist[][] will be the output
matrix that will finally
have the shortest distances
between every pair of vertices
"""
dist=list(map(lambda i: list(map(lambda j:j,i)),graph))
for k in range(v):
for i in range(v):
for j in range(v):
dist[i][j]=min(dist[i][j],dist[i][k]+dist[k][j])
printSolution(dist)
def printSolution(dist):
print ("Following Matrix Shows the shortest distances between every pair of vertices")
for i in range(v):
for j in range(v):
if dist[i][j]==INF:
print( "%7s" % ("INF"),end=" ")
else:
print( "%7d" % (dist[i][j]),end=" ")
if j==v-1:
print()
graph=[[0 ,5 ,INF ,10 ]
,[INF,0 ,3 ,INF]
,[INF,INF,0 ,1 ]
,[INF,INF,INF,0 ]]
FloydWarshall(graph)
| [
"noreply@github.com"
] | Sharayu1071.noreply@github.com |
52b7a607ea4c0ad4fa6ee11a821e1a7a54c5b1a7 | 22060deec5b7a7506fd5b0135cb0d2992b36c543 | /record_pset_solution.py | de8cf3651a336c41240e20163b0d344399c2e18b | [] | no_license | mrmwood/Reports_Project | 6eaa1ff92331878eeb474c32c3ef174afcf4ceed | 44e3bd69afea1de326347cb29ed3703d0266414a | refs/heads/master | 2022-12-29T06:22:37.368634 | 2020-09-17T04:52:54 | 2020-09-17T04:52:54 | 296,219,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | class Record:
def __init__(self,forename,surname,age,gender,cs_student):
self.__forename = forename
self.__surname = surname
if not isinstance(age, int):
raise TypeError("age must be an integer")
else:
self.__age = age
self.__gender = gender
if cs_student not in ["True","False"]:
raise ValueError("CS Student must be 'True' or 'False'")
else:
self.__cs_student = cs_student
def get_forename(self):
return self.__forename
def set_surname(self,surename):
self.__surname = surname
return self.__surname
def get_surname(self):
return self.__surname
def set_age(self,age):
self.__age = age
return self.__age
def get_forename(self):
return self.__forename
def set_forename(self,forename):
self.__forename = forename
return self.__forename
mark = Record("Mark","Wood",39,"M","True")
print(mark.get_forename())
print(mark.set_forename("Dave"))
| [
"Mark.Wood@dubaicollege.org"
] | Mark.Wood@dubaicollege.org |
5cad545e3cb672684d3ce2ff7e81c469d9491420 | 6eedf3d66addff978a1ee3c3f56dd30965074b45 | /OAuth2Info.py | 5d2df12c71c025ad6b28cb9c44c1102d82645202 | [] | no_license | Korpip/wagerbot-discord | f5406dfd15f63723b33ec59c76050a9e61529425 | a048a196e487ae9bf19abf47bacde591bdcbc7d4 | refs/heads/master | 2022-04-13T05:31:39.629810 | 2020-04-12T01:38:57 | 2020-04-12T01:38:57 | 254,988,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | OTOKEN="Nyour discord oath here"
bookiename = 'YOUR BOOKIE NAME HERE'
| [
"noreply@github.com"
] | Korpip.noreply@github.com |
f79ae7bdacbd52e84966912adfca625191377984 | 994aa173fe6acc8a467d4ed638de8dba29896156 | /hw1.2.py | 94fdf660a57700237aa72d1f145f94875460d70e | [] | no_license | jtyso3/PythonLabs | 86da77d9ab42385c88ad8da4cef9b55f00d5213b | 7762b01ed293f11c54c3afb6a1e57e74153039bf | refs/heads/master | 2022-08-30T03:36:22.848175 | 2020-05-21T19:27:20 | 2020-05-21T19:27:20 | 265,932,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py |
user_name = 'Andy'
print('Hello {}, it\'s nice to meet you!'.format (user_name))# Python 3.x
print(f'Hello {user_name}, it\'s nice to meet you!')# Python 3.4+ shortcut
print('Hello %s, it\'s nice to meet you!' % user_name) # Python 2.x method
#Shortcuts insert variable user_name
city = 'Minneapolis'
temp = 67
chance_precip = 40
parking = 20.00
guests = 1000
print(f'Welcome!\n{city} Hilton Today')
print('{0:.<18}{1:.>5d}f'.format('Temperature', temp))
print('{:.<18}${:.>5.2f}'.format('Parking', parking))
print('{:.<18}{:.>5d}%'.format('Chance Precip', chance_precip))
print('{:.<18} {:.>5,d}'.format('Guests', guests))
'''
\n= next line, {0:.<18}{1:.>5d}f'.format('Temperature', temp)) 0= placeholder, : = unknown,
. = optional fill character tab, 18 = unknown, 5 = column width, .2f = sets digits after decimal,
.format(parameters,of columns)
'''
name1 = input('Please type your name here >>> ')
print('Hello, ' + name1 + '!')
number_of_cds = int(input('How many CDs do you own?'))
age = float(input('Enter your age - use decimal for partial year.'))
print( "hello" + str(3)) | [
"noreply@github.com"
] | jtyso3.noreply@github.com |
2f5dbc4d6359237f2b2c11a3424d947433948987 | d9bdb48990671e63ca785a7f44cc8997ddd8ef43 | /orders/migrations/0006_order_shipping_address.py | 8292fded7fe8af23704f6e54c00351e2d4ddd667 | [] | no_license | nikitagriu-site/ecommerce | 1c537bfa021b330034204c6a6e5dd0d898087370 | eda0e3036f87539499feaa807594f52211a00643 | refs/heads/master | 2023-05-03T23:01:53.888850 | 2021-05-19T18:07:26 | 2021-05-19T18:07:26 | 363,171,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # Generated by Django 3.2.2 on 2021-05-19 16:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('addresses', '0002_alter_address_state'),
('orders', '0005_auto_20210519_1148'),
]
operations = [
migrations.AddField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.address'),
),
]
| [
"nikitagriu04@gmail.com"
] | nikitagriu04@gmail.com |
53b3d735d92adf064d111c3474102b7708bdbd5e | 027d6585e8d11f1c6c324bf3c916d27665df99be | /ge_kitchen/switch.py | be5017d69f6701fd5e23a0ab8cadc5945f3e5a5d | [
"MIT"
] | permissive | vinoboy/ha_components | 024d25d6e02046ae408e2241e628bbc705ebc6da | 296aeaf0e28b19b4d2345d12ce5597cb5c4eadce | refs/heads/master | 2023-08-18T02:03:53.513845 | 2020-12-20T00:13:28 | 2020-12-20T00:13:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,001 | py | """GE Kitchen Sensor Entities"""
import async_timeout
import logging
from typing import Callable, TYPE_CHECKING
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .binary_sensor import GeErdBinarySensor
from .const import DOMAIN
if TYPE_CHECKING:
from .update_coordinator import GeKitchenUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
class GeErdSwitch(GeErdBinarySensor, SwitchEntity):
"""Switches for boolean ERD codes."""
device_class = "switch"
@property
def is_on(self) -> bool:
"""Return True if switch is on."""
return bool(self.appliance.get_erd_value(self.erd_code))
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
_LOGGER.debug(f"Turning on {self.unique_id}")
await self.appliance.async_set_erd_value(self.erd_code, True)
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
_LOGGER.debug(f"Turning on {self.unique_id}")
await self.appliance.async_set_erd_value(self.erd_code, False)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable):
"""GE Kitchen sensors."""
_LOGGER.debug('Adding GE Kitchen switches')
coordinator: "GeKitchenUpdateCoordinator" = hass.data[DOMAIN][config_entry.entry_id]
# This should be a NOP, but let's be safe
with async_timeout.timeout(20):
await coordinator.initialization_future
_LOGGER.debug('Coordinator init future finished')
apis = list(coordinator.appliance_apis.values())
_LOGGER.debug(f'Found {len(apis):d} appliance APIs')
entities = [
entity
for api in apis
for entity in api.entities
if isinstance(entity, GeErdSwitch) and entity.erd_code in api.appliance._property_cache
]
_LOGGER.debug(f'Found {len(entities):d} switches')
async_add_entities(entities)
| [
"ajmarks@gmail.com"
] | ajmarks@gmail.com |
e2c8b9f9cd9488db86f12df603ef718af6cbff5c | b0e261e7d2450803a4683df86efb7ee6f5f48530 | /huffman.py | f15abb4faa8932f2e7cd8f92bfb0e83c08040ba3 | [] | no_license | kishorekdty/huffman-coding-in-python | 239a93a24be2c5a9074619ca8f2af57e8318ba88 | a6f0c9a0c6ef88c8f3c4f0082f831f29f5c78a76 | refs/heads/master | 2021-01-10T19:31:03.386305 | 2012-11-16T10:42:31 | 2012-11-16T10:42:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | def encode(string,tree):
return encode0(string,tree,tree,"")
def encode0(s,node,tree,op):
if s=='':
return op
elif node.left==None:
return encode0(s[1:],tree,tree,op)
elif s[0] in node.left.chars:
return encode0(s,node.left,tree,op+'0')
else:
return encode0(s,node.right,tree,op+'1')
def decode(string,tree):
return(decode0(string,tree,tree,""))
def decode0(s,node,tree,op):
if s=="":
return(op+node.chars)
elif node.left==None:
return(decode0(s,tree,tree,op+node.chars))
elif s[0]=='0':
return(decode0(s[1:],node.left,tree,op))
else:
return(decode0(s[1:],node.right,tree,op))
def makeCodeTree(sample):
nodes=sorted([fork(i,j) for (i,j) in freq(sample).items()],key=lambda x:x.weight)
while len(nodes) > 1:
nodes=sorted(([fork(nodes[0].chars+nodes[1].chars,nodes[0].weight+nodes[1].weight,nodes[0],nodes[1])]+nodes[2:]),key=lambda x:x.weight)
return(nodes[0])
class fork:
def __init__(self, chars, weight, left=None, right=None):
self.chars=chars
self.weight=weight
self.left=left
self.right=right
def freq(s):
frq={}
for i in s :
if i in frq:
frq[i]=frq[i]+1
else:
frq[i]=1
return(frq)
def makecodList(codeTree):
codList=makecodList0(codeTree)
codList0={}
for (i,j) in codList:
codList0[i]=j
return(codList0)
def makecodList0(tree):
if tree.left == None :
return [(tree.chars,'')]
rTable=makecodList0(tree.right)
lTable=makecodList0(tree.left)
return [(i,'0'+j) for (i,j) in lTable]+[(i,'1'+j) for (i,j) in rTable]
def quickEncode(string,codList):
op=""
for i in string:
op=op+codList[i]
return op
def test():
print 'tree optimised for "qwertyuiopasdfghjklzxcvbnm" created'
tree=makeCodeTree("qwertyuiopasdfghjklzxcvbnm")
table=makecodList(tree)
print "code table:" ,table
print 'string: "asdf" code: ', encode("asdf",tree)
print 'code: "11100111011001010011" string: ', decode("11100111011001010011",tree)
test()
| [
"kishorekdty@gmail.com"
] | kishorekdty@gmail.com |
62d1181857d13939454f6c06a4c3709e546fb9ad | cb332a1a858ab48d470fd15528e0fb1e822fa2ae | /experiments/main.py | 022d24d5f2926eca09a6422eff2b348c829e29c7 | [] | no_license | fKunstner/limitations-empirical-fisher | f4c531ce5609e42d8649aaa7b86c8b0b669ef4fc | 53344c835ef04386a4d7dfe0f80c32456219ab78 | refs/heads/master | 2021-08-18T04:18:28.205747 | 2020-06-02T07:00:15 | 2020-06-02T07:00:15 | 189,239,233 | 46 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,310 | py | import efplt
import argparse
def parse():
parser = argparse.ArgumentParser(description='Experiment Runner')
def add_options_to_group(options, group):
for c in options:
group.add_argument(c[0], action="store_true", help=c[1])
experiment_choices = [
["-misspec", "Misspecification experiment"],
["-optim", "Optimization experiment"],
["-vecfield", "Vector field visualization"],
]
add_options_to_group(experiment_choices, parser.add_argument_group('Experiment selection').add_mutually_exclusive_group(required=True))
main_options = [
["-run", "Runs the experiment and save results as a .pk file"],
["-plot", "Plots the result from a .pk file (requires -save and/or -show)"],
["-appendix", "Also run/plot the experiments in the appendix"],
]
add_options_to_group(main_options, parser.add_argument_group('Action selection', "At least one of [-run, -plot] is required"))
plotting_options = [
["-save", "Save the plots"],
["-show", "Show the plots"],
]
add_options_to_group(plotting_options, parser.add_argument_group('Plotting options', "At least one of [-save, -show] is required if plotting"))
args = parser.parse_args()
if not (args.run or args.plot):
parser.error('No action requested, add -run and/or -plot')
if args.plot and not (args.show or args.save):
parser.error("-plot requires -save and/or -show.")
return args
def savefigs(figs, expname):
for i, fig in enumerate(figs):
efplt.save(fig, expname + "-" + str(i) + ".pdf")
if __name__ == "__main__":
args = parse()
print("")
if args.vecfield:
import vecfield.main as exp
expname = "vecfield"
if args.misspec:
import misspec.main as exp
expname = "misspec"
if args.optim:
import optim.main as exp
expname = "optim"
if args.run:
if args.appendix:
exp.run_appendix()
else:
exp.run()
if args.plot:
if args.appendix:
figs = exp.plot_appendix()
else:
figs = exp.plot()
if args.show:
efplt.plt.show()
if args.save:
savefigs(figs, expname + ("-apx" if args.appendix else ""))
| [
"frederik.kunstner@gmail.com"
] | frederik.kunstner@gmail.com |
188071be1a6de7ac2995e9c25ac732518f7515bd | 287c79013134237aba524d42edcffae0024f208f | /src/condition/nested if.py | 08bb54dbe27b1fd6f82b60922d27464b62beacfc | [] | no_license | uCognitive/Python-Beginner | 284c7694492dfb8a2fb466fa60ae59987c1d0b59 | 074fef0b52254b05704e4efc63c5656edc5d317c | refs/heads/master | 2020-05-17T08:51:20.206070 | 2019-05-25T13:44:04 | 2019-05-25T13:44:04 | 183,617,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # user input
num1 = int(input("Enter 1st number: "))
num2 = int(input("Enter 2nd number: "))
num3 = int(input("Enter 3rd number: "))
if num1 > num2: #check if num1 > num2
if num1 > num3: # also num1 > num3
# if both condition true execute it
print("Num1 is greatest")
if num2 > num1:# check if num2 > num1
if num2 > num3:# also num2 > num3
#if both conditions are true execute it
print("Num2 is greatest")
if num3 > num1: #check if num3 > num1
if num3 > num2: # also num3 > num2
#if both conditions are true execute it
print("Num3 is greatest") | [
"junaidbashir392@gmail.com"
] | junaidbashir392@gmail.com |
2aae9f58506d3fa32dc798f5db8b4868ac37ddcb | 2fb81bd0c3630e3abdfae15bd66e36e1c44bb133 | /P5/src/scripts/test/serialtest.py | f1a49d578680c4993d5c1c9353faa4378f5c57e9 | [] | no_license | Elyteaa/P5 | d5b444b934638d47d805e5fe30ba18e216908439 | 52085dd5f8e95fbd268a78fa812c2a2495d68753 | refs/heads/master | 2020-04-02T02:06:28.665287 | 2019-01-29T09:03:39 | 2019-01-29T09:03:39 | 153,892,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | #!/usr/bin/python
import serial
import codecs
import binascii
z1baudrate = 115200
z1port = '/dev/ttyUSB0'
z1serial = serial.Serial(port=z1port, baudrate=z1baudrate, bytesize=serial.EIGHTBITS, stopbits=serial.STOPBITS_ONE)
z1serial.timeout = None # set read timeout
if z1serial.is_open:
while True:
size = z1serial.inWaiting()
test = z1serial.read(1)
test2 = codecs.decode(test.strip(), 'hex')
#test2 = binascii.unhexlify(test.strip().decode('hex'))
#print(test)
#test2 = hex(int((test), 16))
#test2 = hex(ord(test))
#test2 = int(hex(test), 16)
#test2 = int(test, 16)
#test2 = test.int()
#test2 = test.unpack("h", test)
#test2 = test.encode(encoding='utf-8',errors='strict')
print(type(test2))
print(test2)
else:
print('z1serial not open')
| [
"noreply@github.com"
] | Elyteaa.noreply@github.com |
f10d7ce8e9594a313ba184f1fd928cfaa91f49ac | 8d9ae946828be05121cc9fb9d3f1d8fad1df0694 | /NBAScraper/Schedule.py | c780e96d2c6f635a75bee8f4e3003c2fa124ac4b | [] | no_license | steven-wood/NNNBACrystalBall | 661c5873db4203e2e49a6ac8ad521f6cc23f87af | 35d6cd6cfa8c99bc0ead4d34d0cc766c2bfbd87f | refs/heads/master | 2022-07-04T10:02:54.752574 | 2020-05-10T20:03:25 | 2020-05-10T20:03:25 | 262,860,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | import csv
from . import Game
from nba_api.stats.endpoints import leaguegamefinder
def traverseSchedule(season, startDate, endDate, fileName):
schedule = getSchedule(startDate, endDate)
count = 0
for i in schedule:
count = count + 1
a_file = open(fileName, "a")
writer = csv.writer(a_file)
writer.writerow(Game(i, season).getGameRow())
a_file.close()
print(count)
def getSchedule(dateFrom, dateTo):
lgf = leaguegamefinder.LeagueGameFinder(date_to_nullable=dateTo, date_from_nullable=dateFrom)
gameDict = lgf.get_dict()["resultSets"][0]["rowSet"]
gameList = []
for i in gameDict:
if isGameRegularSeason(i[4]):
gameList.insert(0, i[4])
gameList = sorted(list(set(gameList)))
return gameList
def isGameRegularSeason(gid):
return int(int(gid)/10000000)==2
#traverseSchedule("2014-15", "11/1/2014", "5/1/2015", "ResultsFIXED/2014-15Stats.csv")
traverseSchedule("2013-14", "11/1/2013", "5/1/2014", "Seasons/2013-14Stats.csv")
traverseSchedule("2012-13", "11/1/2012", "5/1/2013", "Seasons/2012-13Stats.csv")
# traverseSchedule("2011-12", "11/1/2011", "5/1/2012", "ResultsFIXED/2011-12Stats.csv")
traverseSchedule("2010-11", "11/1/2010", "5/1/2011", "Seasons/2010-11Stats.csv")
| [
"stevenator21@gmail.com"
] | stevenator21@gmail.com |
fe00f02925c3a8356e9a607e1d9a04b6a5640953 | 6e2f8342f9e090c6715b85782adb457cf315813f | /main_server.py | 5072ed7809e4e0f2fae4117add92aef2ee4ebd13 | [] | no_license | RedExtreme12/Crypt_Lab_5 | a19f8a8e81f1ab0fb83eb08441c013b08652c344 | 3f488e55db1eda75d5fc945745e8e081b2809b23 | refs/heads/master | 2022-09-02T20:20:14.216149 | 2020-05-20T20:12:42 | 2020-05-20T20:12:42 | 264,420,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | # because i want to call python server_main.py directly
import sys
import RSA
import os.path
from send_recv_file import *
sys.path.append("..")
from sockets_classes.server_socket import *
DESTINATION = 'server_recv'
def get_ip_address():
"""Вернуть IP-адрес компьютера.
Фиктивное UDP-подключение к google's DNS,
после подключения getsockname() вернет локальный IP.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
if __name__ == "__main__":
server_socket = ServerSocket(0)
print("Порт (ID): {}".format(server_socket.s.getsockname()[1]))
client_socket = server_socket.wait_for_client()
# AUTH
RSA.auth_init(server_socket.s.getsockname()[1], public_key_A, client_socket, '[A -> B]')
RSA.auth_recv(private_key_B, client_socket, 'A')
try:
while True:
if not recv_file(client_socket, DESTINATION):
break
print('File arrived')
while True:
file_name = input('Enter file name or "quit" to quit: ')
if file_name == 'quit':
break
elif not os.path.exists(file_name):
print('Invalid file name, try again...')
continue
else:
break
if file_name == 'quit':
break
print('File sent')
send_file(file_name, client_socket)
except AttributeError:
pass
client_socket.close()
server_socket.close()
| [
"red.extreme123456@gmail.com"
] | red.extreme123456@gmail.com |
7e93cf8234c7feb67186068121a3741c38d4401b | acb3e776cc16ce56286548c40086326671dfe6e9 | /Week5/app.py | 7a411b78f0f9dbef5b5051d92d18f80fb4848193 | [] | no_license | asherif844/machineLearningApps | f6f51184130870055f8e70a3d3d368c4ffecb82c | 3e7db36797dfdc7ebcc83270f059e767c1e66072 | refs/heads/master | 2022-11-24T08:49:55.624649 | 2020-01-23T15:22:20 | 2020-01-23T15:22:20 | 211,682,033 | 1 | 0 | null | 2022-11-22T02:40:35 | 2019-09-29T15:14:36 | Python | UTF-8 | Python | false | false | 486 | py | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/send', methods= ['GET', 'POST'])
def send():
if request.method == 'POST':
age2 = request.form['age']
gender2 = request.form['gender']
return render_template('age.html', jamaica=age2, gender = gender2)
return render_template('index.html')
@app.route('/')
def homepage():
return render_template('homepage.html')
if __name__ == '__main__':
app.run(debug=True) | [
"ahsherif@microsoft.com"
] | ahsherif@microsoft.com |
97fc8efdd37ac848777c754d0a0f562ef40e6043 | ec7002f525449dcd8eea8d9d440a6f852f0082f5 | /crowdfunding/projects/migrations/0015_project_is_open.py | ea164216cb7095786fa32e0bd35df29850b64a20 | [] | no_license | SamaraLove/drf | 26298b6dc9a1765469581a04113fd4faf7988271 | cf2aeebf396bbb683bca39246967d329ad6d0efd | refs/heads/master | 2022-12-27T05:10:20.810984 | 2020-10-07T17:09:49 | 2020-10-07T17:09:49 | 290,483,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Generated by Django 3.0.8 on 2020-10-07 17:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0014_remove_project_is_open'),
]
operations = [
migrations.AddField(
model_name='project',
name='is_open',
field=models.BooleanField(default=True),
),
]
| [
"samara.love@bhp.com"
] | samara.love@bhp.com |
04728dac8ac7baaf24256bca0d7bcd65d1fa24bd | ee244096ccf50cb55819c757d3a1d3ce0b180aec | /src/test/resources/PageObjectRepository/DEV4/MembershipPage.spec | 81bac25ca0fc9e1c4d1f76b9c75da1e554b1d1cc | [] | no_license | AlphaRahul16/netProject | db92ee1c0239b2f36d9bfbe23ff8a4c31b8f9856 | 27db560c979aaf7e9e8e6ccea6be378f030c6ec7 | refs/heads/master | 2021-05-14T10:52:50.856187 | 2018-01-05T19:44:32 | 2018-01-05T19:44:32 | 116,354,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,242 | spec | Page Title: MembershipPage
#Object Definitions
==============================================================================================================================================
hd_sideBar xpath //a[text()='${Query Membership}']
hd_sideBarOuter xpath //h3[text()='${sideBarName}']
hd_page xpath //span[@class='PageTitle']
list_existingQuery id SavedQueriesDropDownList
img_spinner css #__UPIMG
table_query id DisplayConditionsTable
btn_runQuery id SaveGoButton
img_firstInactiveRegularMember css #dgDynamicList > tbody > tr:nth-child(3) > td:nth-child(1) > a
link_randomMemberInList css #dgDynamicList > tbody > tr:nth-child(${randomNumber}) > td:nth-child(1) > a
link_customerName id F1_HYPERLINK_4
txt_memberStatus xpath //label[contains(text(),'member status')]/following-sibling::span
list_mbrStatus xpath //a[contains(text(),'Mbr Status')]/../../following-sibling::tr/td[6]
img_cross xpath //img[@title='${memberDetailName}']
btn_menuItems xpath //a[contains(@title,'${menuName}')]/i
img_orderEntry xpath //img[contains(@alt,'Order Entry')]
lnk_selectProduct id HYPERLINK_17
txt_menuItems id HYPERLINK_20
list_association id mbr_asn_code
list_memberType id mbr_mbt_key
list_memberPackage id mbr_pak_prd_key
list_jobTitle id mbr_ttl_key_ext
list_industry id mbr_sin_key_ext
inp_industryUpdateDate id mbr_sin_last_update_date_ext
inp_jobTitleUpdateDate id mbr_jfn_last_update_date_ext
btn_saveAndFinish id Bottom_0
txt_itemsAdded xpath //a[text()='${itemName}']
list_batch id inv_bat_key
list_PaymentType id inv_orig_trans_type
list_paymentMethod id pin_apm_key
inp_cardNumber id pin_cc_number
list_expireDate id pin_cc_expire
inp_cvvNumber id pin_cc_security_code
txt_rejoinDateForActive xpath //td[contains(text(),'Active')]/following-sibling::td[2]
img_ticked xpath //img[@id='F1_IMAGE_${index}']
list_billingAdd xpath //select[@id='inv_cxa_key']/option
btn_add id inv_cxa_key_oAddHyperLink
list_addressType id cxa_adt_key
chk_doNotValidate css #adr_no_validation_flag
inp_addressLine id adr_line1
inp_city id adr_city
inp_country id adr_county
list_state id adr_state
inp_postalCode id adr_post_code
inp_district id adr_county
inp_congressional id adr_cong_dist
inp_province id adr_intl_province
inp_mail id cxa_mail_stop
table_queryResult css #DataFormTable
btn_saveForBillingAdd id ButtonSave
table_lineItems id UPDATEGRIDCONTROL_DOD_Invoice_DetailGRIDDIV
frame_selectProduct id menu_a83665ae18eb43488c5d83ce5f6027f8
list_billAddress id inv_cxa_key
btn_add id inv_cxa_key_oAddHyperLink
list_addressType id cxa_adt_key
chk_doNotValidate css #adr_no_validation_flag
inp_addressLine id adr_line1
inp_city id adr_city
inp_country id adr_county
list_state id adr_state
inp_postalCode id adr_post_code
inp_district id adr_county
inp_congressional id adr_cong_dist
inp_province id adr_intl_province
inp_mail id cxa_mail_stop
btn_saveForBillingAdd id ButtonSave
table_lineItems id UPDATEGRIDCONTROL_DOD_Invoice_DetailGRIDDIV
frame_selectProduct id menu_a83665ae18eb43488c5d83ce5f6027f8
list_billAddress id inv_cxa_key
link_email id F1_HYPERLINK_0
txt_ContactId id F1_cst_id
txt_customerAddress id F1_cxa_mailing_label_html
txt_addressType id F1_cxa_adt_key_Display_Text_
label_listMemberShip id LabelDataFormHeader
list_memberStatus xpath //a[starts-with(text(),'Member Status')]/../../following-sibling::tr/td[10]
list_joindate xpath //td[contains(text(),'active')]/following-sibling::td[1]
txt_effectiveDateForActive xpath //td[starts-with(text(),'Active')]/following-sibling::td[3]
inp_enterDetails xpath //span[contains(text(),'${detailName}')]/../following-sibling::td/input
btn_go id ButtonSearch
btn_editContactInfo css #F1_HYPERLINK_1
inp_editEmail id eml_address
btn_editNameAndAddress css #F1_HYPERLINK_2
inp_firstName id ind_first_name
inp_lastName id ind_last_name
inp_middleName id ind_mid_name
txt_numberOfyears xpath //td[contains(text(),'Total Years of Service')]/following-sibling::td
btn_cancel id ButtonCancel
list_memberStatusRenewal id ValueDropDownList4
txt_renewalContactId id F1_cst_id
chk_advanceNew id ctl10
list_advanceNewDropDown xpath //span[contains(text(),'${headingName}')]/../following-sibling::td[1]/select
inp_advanceNewInput xpath //span[contains(text(),'${headingName}')]/../following-sibling::td[2]/input
list_advanceNewInput xpath //span[contains(text(),'${headingName}')]/../following-sibling::td[2]/select
link_subscriptionInSelectProduct id HYPERLINK_2
inp_prdCode id prd_code
inp_searchDisplayButton id Look_Up_prc_display_name
inp_displayName id prc_display_name
link_itemInLineItems xpath //table[@id='UPDATEGRIDCONTROL_DOD_Invoice_Detail_InternalUpdateGrid']//td[4]/a
lnk_pages xpath //tr[@class='pager']/td/a[${randomPages}]
txt_membershipProfileInfo xpath //label[contains(text(),'${memberInfo}:')]/following-sibling::span
txt_paymentStatus xpath //td[text()='Payment Status:']/following-sibling::td
txt_membershipProfileDetails xpath //label[contains(text(),'${memberInfo}')]/preceding-sibling::span
btn_memberShipSubInfo xpath //span[text()='${membershipName}']/preceding-sibling::a//i[starts-with(@class,'icon-chevron-')]
txt_productName xpath (//table[@class='table']//tr[2]/td[4])[1]
txt_invoiceId xpath (//table[@class='table']//tr[2]/td[12])[1]
txt_termStartDate xpath (//table[@class='table']//tr[2]/td[14])[1]
txt_termEndDate xpath (//table[@class='table']//tr[2]/td[15])[1]
txt_productNameOnPage xpath (//table[@class='table']//tr[3]/td[4])[1]
txt_invoiceIdOnPage xpath (//table[@class='table']//tr[3]/td[12])[1]
txt_termStartDateOnPage xpath (//table[@class='table']//tr[3]/td[14])[1]
txt_termEndDateOnPage xpath (//table[@class='table']//tr[3]/td[15])[1]
inp_customerId xpath //input[contains(@id,'QueryAsk')]
btn_askGo id ButtonAskGo
txt_recordNumberAtMemberQuery classname DataFormLabelRecordCount
txt_loadOnExistingQueryLabel id LoadQueryLabel
link_pagesAvailable classname DataFormChildDataGridPagerLink
lnk_invoice_number xpath //table[@id='dgDynamicList']/tbody/tr[not(@class)]/td[3][contains(text(),'${value}')]
lnk_first_invoice_number xpath (//table[@id='dgDynamicList']/tbody/tr[not(@class)]/td[3])[1]
txt_webLogin id F1_cst_web_login
btn_arrowRightCircle xpath (//i[@class='iconpro-circle-arrow-right'])[1]
link_tabsOnModule xpath //a[text()='${value}']
btn_tabs css .iconpad[src='../images/puzzle-icon.png']
inp_transactionDate id inv_trx_date
list_memberStatusInAddMembership id mbr_mbs_key
list_memberRenewalPackage id mbr_pak_prd_renewal_key
chk_complimentry id ACS_complimentary_checkbox
txt_totalPrice id mbr_total
list_complimentryRequest id ACS_complimentary_request_source
txt_priceOrderEntryLineItmes xpath //a[text()='${itemName}']/../following-sibling::td[9]/span
inp_sourceCode id mbr_src_code
list_chapter id mbr_chp_cst_key
link_invoiceListHeadings xpath //a[contains(text(),'${value}')]
link_addMemership xpath //a[text()='add membership']
chk_complimentry_Sub id ivd_complimentary_flag_ext
list_complimentryReq_Sub id ivd_a04_key_ext
txt_priceDetailsBelowLineItems id inv_${detailsName}
list_priceOrderEntryNetBalance xpath //*[contains(@id,'ivd_${netPriceName}')]
txt_memberType xpath //td[starts-with(text(),'${memberType}')]
txt_effectiveDateMemberType xpath //td[starts-with(text(),'${memberType}')]/following-sibling::td[4]
txt_expireDateMemberType xpath //td[starts-with(text(),'${memberType}')]/following-sibling::td[5]
txt_joinDateMemberType xpath //td[starts-with(text(),'${memberType}')]/following-sibling::td[2]
txt_effectiveDate_chapter xpath //td[starts-with(text(),'${memberType}')]/following-sibling::td[5]
txt_joinDate_chapter xpath //td[starts-with(text(),'${memberType}')]/following-sibling::td[3]
txt_expireDate_chapter xpath //td[starts-with(text(),'${memberType}')]/following-sibling::td[6]
btn_addBatch id inv_bat_key_oAddHyperLink
inp_addBatchName id bat_code
list_batchSecurityGroup id gsc_grp_key
drpdwn_memberType xpath //select[contains(@id,'QueryAsk')]
btn_detailsMenuAACT xpath //span[text()='${menuName}']/../a[1]
txt_termStartDaterenewal xpath (//th/a)[2]/../../following-sibling::tr[${rowNumber}]//td[14]
txt_termEndDaterenewal xpath (//th/a)[2]/../../following-sibling::tr[${rowNumber}]//td[15]
heading_queryAskAtRunTime xpath //span[text()='Query - Ask At Run-Time Values']
list_memberPackage1 css .DataFormDropDownList
btn_goPackage id ButtonAskGo
txt_memberInfo xpath //label[contains(text(),'${value}')]/following-sibling::span
btn_mydTransfer xpath //a[@id='F1_HYPERLINK_7']/img
txt_balanceAmount xpath //table[@id='TransferTable']//span
list_term id ord_ptr_key
list_newPackage id mbr_pak_prd_renewal_key
heading_transferPackage id ui-id-3
btn_transferNow id TransferMembershipButtonID
btn_gotorenewal xpath (//td[contains(text(),'Active Renewed-No Response')]/preceding-sibling::td[3]//i)[1]
txt_PaymentStatus xpath //td[contains(text(),'${productName}')]//following-sibling::td[1]
icon_up xpath //span[contains(text(),'${value}')]/preceding-sibling::a/i[@class='icon-chevron-up']
txt_productPackage xpath (//th/a)[2]/../../following-sibling::tr[1]//td[4]
| [
"A_Rawat@acs.org"
] | A_Rawat@acs.org |
7d695bf1f23deb8841ef7cd5a72adfc4b94ec1e7 | 98f6007aa57d6df27a61fbc513332a2beaf4883e | /Black Hat/TcpServer.py | ec3dc9b8a5aaa25f6d8a0672e8b59795d365f23c | [] | no_license | Juntaran/Python | bf73996aaccd4f81b8b01a61e8f934c469e9a3c0 | 3f070812d7c824cb082c162f72269e9c15e44643 | refs/heads/master | 2021-07-10T15:38:38.637914 | 2016-11-16T13:11:17 | 2016-11-16T13:11:17 | 57,065,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | import socket
import threading
bind_ip = "127.0.0.1"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
#最大连接数设置为5
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip, bind_port)
#客户处理线程
def handle_client(client_socket):
#打印客户端发送得到内容
request = client_socket.recv(1024)
print "[*] Received: %s" %request
#返还一个数据包
client_socket.send("ACK!")
client_socket.close()
while True:
client,addr = server.accept()
print "[*] Accept connection from: %s:%d" % (addr[0],addr[1])
#挂起客户端线程,处理传入的数据
client_handler = threading.Thread(target = handle_client, args = (client,))
client_handler.start() | [
"jacinthmail@gmail.com"
] | jacinthmail@gmail.com |
3db22d43778fbaf867ca5919c555e836ff02aa44 | 18f6708f6b42142f647d2f87b597147352b9a3eb | /leguess/eval/__init__.py | 544ea7a943b38d52d4dc376d2d7c7ecb8a72fd56 | [] | no_license | godpgf/leguess | a80acb272886ba3e2fee244bf30de4cc1ba1b44b | 66c75e31cac43ad5b874e995c74358aef5ad9821 | refs/heads/master | 2020-07-14T11:33:13.468119 | 2020-02-28T07:29:44 | 2020-02-28T07:29:44 | 205,310,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,445 | py | from leguess.db import *
from .hr import HR
import numpy as np
def eval_recm(hdfs_db, user_profile_db, test_act_list_path, recm_call_back, top_list, pred_act_type="download", pred_channel="jingxuan", exclude_act_type_list=["show"], offline_time=1*60*60, filter_item_set=None):
# 评估召回和排序的命中率---------------------------------------------------------------
offline_match_hr = HR(top_list)
offline_rank_hr = HR([top_list[0]])
online_match_hr = HR(top_list)
online_rank_hr = HR([top_list[0]])
default_match_hr = HR([top_list[0]])
# 读取测试数据
def _eval_recm(user, act_list, channel_list, org_list, timestamp_list):
item_size = 0
for act, channel in zip(act_list, channel_list):
if act.split('@')[1] == pred_act_type and channel == pred_channel and (filter_item_set is None or act.split("@")[0] in filter_item_set):
item_size += 1
for act, channel, org, timestamp in zip(act_list, channel_list, org_list, timestamp_list):
if act.split('@')[1] == pred_act_type and channel == pred_channel and (filter_item_set is None or act.split("@")[0] in filter_item_set):
# 开始召回
act_list, channel_list, org_list, timestamp_list = user_profile_db.get_act_list(user)
tag_list = user_profile_db.get_tag_list(user)
if act_list is None or len(act_list) == 0:
match_item, rank_percent = recm_call_back(user, act_list, channel_list, org_list, timestamp_list,
tag_list, timestamp, True)
default_match_hr.add_sample(match_item, act.split("@")[0], 1.0/item_size)
else:
if timestamp - timestamp_list[-1] > offline_time:
match_item, rank_percent = recm_call_back(user, act_list, channel_list, org_list,
timestamp_list,
tag_list, timestamp, True)
offline_match_hr.add_sample(match_item, act.split("@")[0], 1.0/item_size)
ids = np.argsort(-rank_percent)
offline_rank_hr.add_sample(match_item[ids], act.split("@")[0], 1.0 / item_size)
else:
match_item, rank_percent = recm_call_back(user, act_list, channel_list, org_list,
timestamp_list,
tag_list, timestamp, False)
online_match_hr.add_sample(match_item, act.split("@")[0], 1.0/item_size)
ids = np.argsort(-rank_percent)
online_rank_hr.add_sample(match_item[ids], act.split("@")[0], 1.0 / item_size)
user_profile_db.push_act(user, act, channel, org, timestamp)
hdfs_db.read_user_act_list(test_act_list_path, _eval_recm, exclude_act_type_list=exclude_act_type_list)
print("match[default,offline,online]------------------------------------------")
default_match_hr.print_eval()
offline_match_hr.print_eval()
online_match_hr.print_eval()
print("rank[offline,online]-------------------------------------------")
offline_rank_hr.print_eval()
online_rank_hr.print_eval()
| [
"yanyu4@Lenovo.com"
] | yanyu4@Lenovo.com |
da28ee6be543449127c1b128c2ce343dac71c3b9 | 3e59df4e7fd5e30e541175c4ebdf0f7dcce569f3 | /Analysis_Tools/Parsers/Current/Game_Predictor.py | e53980a7c312d8890bf0c6fceb7ed374f00b4e83 | [] | no_license | BrianGoodman95/NFL-Game-Predictor-V1 | 0fde6c3edb762fae3f4578e12ee5161ee3aed3bf | dd98a92a6d772e4487b5d650ef4356dca9cd7e1e | refs/heads/master | 2023-01-22T16:39:43.370694 | 2020-12-06T05:24:20 | 2020-12-06T05:24:20 | 262,611,954 | 0 | 0 | null | 2020-12-06T05:24:21 | 2020-05-09T16:27:06 | HTML | UTF-8 | Python | false | false | 13,699 | py | import pandas as pd
import time
import os
import requests
# from Analysis_Tools.Parsers import Latest_Data_Processor
try:
from Parsers.Current import Prediction_Helper
from importlib import reload
reload(Prediction_Helper)
except:
from Analysis_Tools.Parsers.Current import Prediction_Helper
'''
Goal is to collect all the data from the collection dictionary for the current week, save it, then calculate the data for the calculation dictionary, save it then append it to the total from previous weeks for a total season results
Each Week starting from week 5, will collect the data, save it, calculate the other data, save it and append to any previous saved data and save it as total
Weeks = [i for in range(min week, current_week+1)]
For week in weeks:
COLLECTION DATA
**From gamecollection.thisweekparsing / thisweekstats
Get schedule
Get This week teams and opponents
Get Home/Away, Short Week, Bye Week
If week < current week
Get Results (points for winner - points for loser)
**From gamecollection.betting_parsing:
Get Spreads
**from dvoa_collector.?
Get WDVOA stats
Save Data for the week
CALCULATION DATA
**From dvoacollector.setupmap
Get Map
Interpolate EGO
Make Prediction
Calculate EGO/Spread
If week < current week
Calculate EGO/Result Diff
Evaluate if prediction correct
Save Data for the week
If week > min week
Read previous Season Data
Append new data to read data
Save total data for season
'''
class NFL_Game_Predictor():
def __init__(self, project_path, current_week=16, current_season=2020, updateType='Season', Enable_Messaging=False):
self.time = time
self.Read_Previous_Data = True
self.Enable_Messaging = Enable_Messaging
#Dictionary of raw stats to collect
self.Collect_Data_Dict = {
'Team':[], 'Opponent':[], 'Week':[], 'Year':[], 'Home Team':[], 'WDVOA':[], 'Spread':[], 'Result':[]
}
#Dictionary of data to calculate
self.Calculate_Data_Dict = {
'EGO':[], 'EGO_Spread_Diff':[], 'EGO_Result_Diff': [], 'Correct':[]
}
#Collection parameters
min_season=current_season
self.min_week = 6
self.current_week = current_week
print(updateType)
if updateType == 'Historical':
Update_Weeks = [w for w in range(self.min_week,16)]
min_season = 2006
elif updateType == 'Season':
Update_Weeks = [w for w in range(self.min_week,self.current_week+1)]
elif updateType == 'Week':
Update_Weeks = [current_week-1, current_week]
else:
print('INVALID VALUE IN FIELD UPDATETYPE')
return
for self.season in range(min_season,current_season+1):
if self.season <= 2019: #Get weeks to go through for Past seasons
self.season_weeks = [w for w in range(self.min_week,16)]
else: #Weeks to go through for this season
self.season_weeks = [w for w in range(self.min_week,self.current_week+1)]
if updateType == 'Historical':
Update_Weeks = self.season_weeks
#Lists and DFs
self.Week_DF = pd.DataFrame()
self.All_Weeks_DFs = []
#Make save locations
self.project_path = project_path
self.raw_data_path = f'{project_path}/Raw Data/DVOA_Based/{self.season}'
self.Make_Folder(self.raw_data_path)
#Update any Data Needed
for week in Update_Weeks:
print(f'Analyzing {self.season}, Week: {week} Games ...')
#Get the Raw Data we need
self.Raw_Game_Data = self.Get_Game_Info(self.raw_data_path, week)
self.Processed_Game_Data = self.Process_Game_Info(self.raw_data_path, week, self.Raw_Game_Data)
self.Calculated_Game_Data = self.Calculate_Game_Info(self.raw_data_path, week, self.Processed_Game_Data)
self.Spread_Targets = self.Picking_Info(self.raw_data_path, week, self.Calculated_Game_Data)
#Save Final Copy for further analysis
week_dfs = []
for week in self.season_weeks:
df = pd.read_csv(f'{self.raw_data_path}/Week {week}/Calculated Game Data.csv')
week_dfs.append(df)
df = pd.concat(week_dfs) #Concat the list of dfs into a season df
df.to_csv(f'{self.raw_data_path}/Season Game Data.csv', index=False)
#Save Final Copy for further analysis
season_dfs = []
for season in range(min_season,self.season+1):
season_df = pd.read_csv(f'{project_path}/Raw Data/DVOA_Based/{season}/Season Game Data.csv')
season_dfs.append(season_df)
season_df = pd.concat(season_dfs) #Concat the list of dfs into a season df
season_df.to_csv(f'{project_path}/Raw Data/DVOA_Based/All Game Data.csv', index=False)
def Make_Folder(self, new_path):
data_exists = False
try:
os.mkdir(new_path)
except:
# print('folder already exists')
files = os.listdir(new_path)
if len(files) > 1:
data_exists = True
return data_exists
def Save_DF(self, df, path_name):
df.to_csv(path_name, index=False)
def Concat_and_Save(self, df_list, path):
df = pd.concat(df_list) #Concat the list of dfs into a season df
df.to_csv(path, index=False)
return df
def User_Message(self, message, sleep=1):
if self.Enable_Messaging:
print(message)
time.sleep(sleep)
def Picking_Info(self, raw_data_path, week, df):
#Save the df passed in for picking after
new_picks_df = df
self.User_Message('Determining Spread Targets For Each Game ...')
#Output the target spread range for each game
#sort by date so output is better
df = df.sort_values(by=['Day', 'Time'], ascending=[0,1])
#Only keep the Away Teams' rows
df = df[df.Home_Team == 0]
predictionDF = pd.DataFrame()
Teams = list(df['Team'])
Opponents = list(df['Opponent'])
Games = [f'{Teams[x]} @ {Opponents[x]}' for x in range(len(Teams))]
predictionDF['Game'] = Games
target_spreads = list(df['Target Spreads'])
refined_spreads = []
spreads = list(df['Betting Spread'])
for game in range(len(spreads)):
spread = spreads[game]
targets = target_spreads[game]
closest_target = 100 #some big number
refined_spreads.append('') #Add a placeholder for now
for target_range in targets:
for t in target_range: #Check each spread value there
target_to_spread_diff = abs(float(t)-float(spread))
if target_to_spread_diff < closest_target: #If closer to actual spread than last one, replace the target range!
closest_target = target_to_spread_diff
refined_spreads[game] = target_range
for pos, s in enumerate(spreads):
spreads[pos]=float(s)
predictionDF['EGO'] = list(df['EGO'])
predictionDF['Spread'] = spreads
predictionDF['Spread Target'] = refined_spreads
predictionDF['Pick'] = list(df['Pick'])
# print(predictionDF)
self.Save_DF(predictionDF, f'{raw_data_path}/Week {week}/Spread Targets.csv')
self.User_Message('Making Picks ...')
time.sleep(0.5)
#Get the Picks for the week
self.picks = list(set(list(df['Pick'])))
self.picks.remove("")
# print(self.picks)
time.sleep(1)
#Add the data for picks made for this week
All_Picks = []
try:
pick_df = pd.read_csv(f'{raw_data_path}/Week {week}/Picks.csv')
prev_picks = list(pick_df['Pick'])
except:
pick_df = new_picks_df
prev_picks = list(new_picks_df['Pick'])
All_Picks.append(pick_df)
# new_picks_df = new_picks_df.loc[new_picks_df['Team'].isin(self.picks)] #Keep data for games we're picking only
for team in self.picks: #For each team we've picked
if team in prev_picks: #Check if our picks already have that team
pass
else:
new_pick_df = new_picks_df.loc[new_picks_df['Team'] == team] #Save this pick
All_Picks.append(new_pick_df)
All_Picks_DF = pd.concat(All_Picks)
self.Save_DF(All_Picks_DF, f'{raw_data_path}/Week {week}/Weekly Picks.csv')
#Later, if week not current one, evaluate if Pick Right or Wrong
#Even more later, make fancier visual of current week predictions
#Even more later, make analysis showing season stats of picking
return predictionDF
def Calculate_Game_Info(self, raw_data_path, week, df):
#Get the Map
model_path = self.project_path.split('/Data')[0] + '/Models'
self.Map_DF = pd.read_csv(f'{model_path}/All Seasons Scores Grouped By WDVOA Diff.csv')
#Setup the Helper
EGO_Analyzer = Prediction_Helper.EGO_Prediction(self.project_path, self.Map_DF)
#Get the Map/Model
self.Map = EGO_Analyzer.Map
#Calculate what we need
self.Calculated_Data = EGO_Analyzer.Calculate_Data(df)
#Add to the DataFrame
for key, val in self.Calculated_Data.items():
df[key] = val
self.Save_DF(df, f'{raw_data_path}/Week {week}/Calculated Game Data.csv')
human_df = df.copy()
#Make Human Readable Version
self.User_Message(f'Summarizing Game Data For Week {week} ...')
Teams = list(df['Team'])
Opponents = list(df['Opponent'])
Games = [f'{Teams[x]} @ {Opponents[x]}' for x in range(len(Teams))]
human_df['Team'] = Games
human_df = human_df[human_df.Home_Team == 0]
human_df = human_df.rename(columns={"Team": "Game"})
del human_df['Opponent']
del human_df['Home_Team']
# print(human_df)
time.sleep(1)
self.Save_DF(human_df, f'{raw_data_path}/Week {week}/Final Game Data.csv')
return df
def Process_Game_Info(self, raw_data_path, week, df):
#Add Season to the Raw DF
df['Season'] = [f'{self.season}' for i in range(len(list(df['Team'])))]
#Get Vicotry Margin from the PtsW and ptsL cols
margins = []
for game in range(0,len(list(df['Points For']))): #For each game in the df
try:
margins.append(int(list(df['Points For'])[game]) - int(list(df['Points Against'])[game]))
except ValueError: #Nans since games not played yet
margins.append(0)
df['Scoring Margin'] = margins
#Make Spread to Result Diff Column
srds = []
for game in range(0,len(margins)):
srds.append(margins[game] + float(list(df['Betting Spread'])[game]))
df['SRD'] = srds
#Delete Date, Unnamed: 7, PtsW, PtsL, YdsW, TOW, YdsL, TOL
dropCols = ['Date', 'Unnamed: 7', 'Points For', 'Points Against', 'YdsW', 'TOW', 'YdsL', 'TOL']
for col in dropCols:
df.drop(f'{col}', axis=1, inplace=True)
#Re-Order Columns - Put Season column to begining
cols = list(df)
cols = cols[-3:-2] + cols[:-3] + cols[-2:]
df = df[cols]
self.Save_DF(df, f'{raw_data_path}/Week {week}/Processed Game Data.csv')
return df
def Get_Game_Info(self, raw_data_path, week):
#Make the folder for data
week_path = f'{raw_data_path}/Week {week}'
self.Make_Folder(week_path)
#WDVOA DATA
self.User_Message(f'Retrieving WDVOA Data for week {week} ...')
try: #IF CAN READ PREVIOUSLY SAVED DVOA DATA
# if week == 12:
# WDVOA_DF = pd.read_csv(f'{raw_data_path}/Week {week-1}/DVOA Data.csv')
# else:
WDVOA_DF = pd.read_csv(f'{week_path}/DVOA Data.csv')
# self.Save_DF(WDVOA_DF, f'{week_path}/DVOA Data.csv')
except: #IF NEED TO GET NEW DVOA DATA
game_info_collector = Prediction_Helper.Game_Info_Parser(week, self.season)
WDVOA_DF = game_info_collector.WDVOA_DF
# print(WDVOA_DF)
self.Save_DF(WDVOA_DF, f'{week_path}/DVOA Data.csv')
#Schedule+Spread DATA
self.User_Message(f'Retrieving Scheudle and Scores for week {week} ...')
#Get the schedule for the week
game_info_collector = Prediction_Helper.Game_Info_Parser(week, self.season)
Week_DF = game_info_collector.Week_Sched_DF
# print(Week_DF)
#Get the spreads for the week
self.User_Message(f'Retrieving Spreads for week {week} ...')
spread_collector = Prediction_Helper.Spread_Parser(week, self.current_week, raw_data_path)
Spread_DF = spread_collector.parser_df
# print(Spread_DF)
# Update Names of Teams to match the WDVOA team names
self.User_Message(f'Combining Retrieved Data ...')
raw_dfs = [WDVOA_DF, Week_DF, Spread_DF]
team_matcher = Prediction_Helper.Team_Matching(raw_data_path, raw_dfs)
Combined_Raw_DF = team_matcher.Combined_Raw_DF
Game_Info_DF = Combined_Raw_DF
self.Save_DF(Game_Info_DF, f'{week_path}/Raw Game Data.csv')
# print(Game_Info_DF)
return Game_Info_DF
# NFL_DATA = NFL_Game_Predictor(latest_week)
| [
"bgoodman1995@gmail.com"
] | bgoodman1995@gmail.com |
4eaa041d0256e39539ed200f4a816597c3d3edad | ccbb5c8f53448af1a4721dbbfd06fc1ee72f58a9 | /setup.py | 4d756203887506c35187f9a7a08ac108a4b197af | [
"BSD-2-Clause"
] | permissive | jorisvandenbossche/spatialpandas | ed7c05e2d3e2c2223fdcbeaa78279edf200c5a80 | b63ebe619b8b8692fe282662725d23a50007acd9 | refs/heads/master | 2020-09-07T22:55:03.581677 | 2019-11-04T22:50:48 | 2019-11-04T22:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from setuptools import setup, find_packages
setup(name='spatialpandas',
packages=find_packages(exclude=('tests',)),
install_requires=['pandas', 'dask', 'numba', 'numpy'],
tests_require=['pytest', 'hypothesis'])
| [
"jon.mease@gmail.com"
] | jon.mease@gmail.com |
4656f2feeb7c89ebd8df602f1b94004133bf7360 | 0be92b186c2b72238d421dd48fb636e946c504c4 | /learning_templates/basic_app/urls.py | bb81c751224399d04cb0fc8f788f9a1a4156bd2b | [] | no_license | Dsignmind/django_templatesandfilters | 9077dde525078db951dcffa98a0104449957825e | 52753f8e5fc573ba0b4cc0546a17af0e089a7d85 | refs/heads/master | 2020-03-15T02:31:31.207269 | 2018-05-03T00:28:31 | 2018-05-03T00:28:31 | 131,920,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from django.urls import path
from basic_app import views
#Template tagging
app_name = 'basic_app'
urlpatterns = [
path('relative/', views.relative, name='relative'),
path('other/', views.other, name='other'),
] | [
"dsignmind@gmail.com"
] | dsignmind@gmail.com |
24e78f3d711a2a56ad26099cf4a41d3680d679cf | 5393cfde9689ba3d0ae286aa7058f0726f523502 | /not_used/SMC_supreme/distribution/poisson.py | aeee80cb4bcca9ff6fe83ddd93cb0f5083cf1aa7 | [] | no_license | amoretti86/PSVO | ee8fffa7c178390d894e658e83dc42f8043dadb0 | ef71cb518073707a68de08ac89ddf21d2d978af4 | refs/heads/master | 2021-08-18T09:57:45.477907 | 2020-04-02T17:15:52 | 2020-04-02T17:15:52 | 151,204,035 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | import numpy as np
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from distribution.base import distribution
class poisson(distribution):
# multivariate poisson distribution
def __init__(self, transformation):
self.transformation = transformation
def sample(self, Input):
assert isinstance(Input, np.ndarray), "Input for poisson must be np.ndarray, {} is given".format(type(Input))
def safe_softplus(x, limit=30):
x[x < limit] = np.log(1.0 + np.exp(x[x < limit]))
return x
lambdas = safe_softplus(self.transformation.transform(Input))
return np.random.poisson(lambdas)
class tf_poisson(distribution):
# multivariate poisson distribution, can only be used as emission distribution
def __init__(self, transformation, name='tf_poisson'):
self.transformation = transformation
self.name = name
def get_poisson(self, Input):
with tf.variable_scope(self.name):
lambdas, _ = self.transformation.transform(Input)
lambdas = tf.nn.softplus(lambdas) + 1e-6
poisson = tfd.MultivariateNormalDiag(lambdas,
validate_args=True,
allow_nan_stats=False)
return poisson
def log_prob(self, Input, output, name=None):
poisson = self.get_poisson(Input)
with tf.variable_scope(name or self.name):
return poisson.log_prob(output)
def mean(self, Input, name=None):
poisson = self.get_poisson(Input)
with tf.variable_scope(name or self.name):
return poisson.mean()
| [
"zzwang@umich.edu"
] | zzwang@umich.edu |
7fb0fdcff4227dc74d10c6bffc07eb836805e31f | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /661.py | 020da6437c8f01ec4483274e593f6429a27683c4 | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | class Solution:
def imageSmoother(self, img: List[List[int]]) -> List[List[int]]:
r, c = len(img), len(img[0])
ans = [[0] * c for _ in range(r)]
for i in range(r):
for j in range(c):
t = size = 0
for x in range(-1, 2, 1):
for y in range(-1, 2, 1):
if 0 <= i + x < r and 0 <= j + y < c:
t += img[i + x][j + y]
size += 1
ans[i][j] = t // size
return ans
| [
"tlhuang@tlhuang.net"
] | tlhuang@tlhuang.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.