blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6251d68f138a9b14476759cfdce97fd773872ec8 | 12123592a54c4f292ed6a8df4bcc0df33e082206 | /py3/pgms/sec8/Extend/ctypes/convs.py | 616830fa1e5821bc2a04d9cde8ded2752fa0ab67 | [] | no_license | alvinooo/advpython | b44b7322915f832c8dce72fe63ae6ac7c99ef3d4 | df95e06fd7ba11b0d2329f4b113863a9c866fbae | refs/heads/master | 2021-01-23T01:17:22.487514 | 2017-05-30T17:51:47 | 2017-05-30T17:51:47 | 92,860,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | #!/usr/bin/env python3
# convs.py - ctype conversions
from ctypes import *
# load shared library
mylib = CDLL("./mylib.so")
# double mult(double, double)
mylib.mult.argtypes = (c_double, c_double)
mylib.mult.restype = c_double
# call C mult() function
print(mylib.mult(2.5, 3.5))
#####################################
#
# $ convs.py
# 8.75
#
| [
"alvin.heng@teradata.com"
] | alvin.heng@teradata.com |
7f6063443980fa7eed25912a3dd0b1c477be9440 | 9940a37b520a57ed00e85e2d54991b117ce1a646 | /leetcode/python/binary_tree_postorder_traversal.py | 0dd3ecf67a7204502c52faa593b7ddf21fa20923 | [] | no_license | rioshen/Problems | e36e528f49912ff233f5f5052a7ca8ac95be4f0a | 1269b05a51e834e620d0adf4c3a10fe1a917b458 | refs/heads/master | 2020-12-03T19:37:38.281260 | 2015-04-09T17:46:59 | 2015-04-09T17:46:59 | 29,544,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | #!/usr/bin/env python
class Solution:
def postorderTraversal(self, root):
if not root:
return []
result, stack = [], []
prev, curr = None, root
while curr or stack:
if curr:
stack.append(curr)
curr = curr.left
else:
parent = stack[-1]
if parent not in (prev, None):
result.append(parent.val)
curr = parent.right
else:
| [
"rioxshen@gmail.com"
] | rioxshen@gmail.com |
49377535fc22c5ee93cad59f6aa76251d7a6bdee | 451c79cfe8cbf4a9e4301c8b8c8c4c99768274c1 | /posenet_pytorch_evaluate.py | d0b7a83e5d6017047b66c3a55a5b58e5cb4a9eeb | [] | no_license | wang422003/Code_Master_Thesis | 0662f41e24cb169b2e62215898037cae70e37cef | 62735f606b2220003b5d1aed0cb2f1330c4453d7 | refs/heads/master | 2022-04-17T18:43:41.462140 | 2020-04-16T14:31:52 | 2020-04-16T14:31:52 | 256,230,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,481 | py | import os
import argparse
from pytorch_data import get_loader
from posenet_torch_solver import SolverPoseNetEvaluate
from torch.backends import cudnn
def main(config):
cudnn.benchmark = True
data_loader = get_loader(config.model, config.image_path, config.metadata_path, config.mode, config.batch_size,
config.shuffle)
solver = SolverPoseNetEvaluate(data_loader, config)
if config.mode == 'train':
solver.train()
elif config.mode == 'test':
solver.evaluate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='test', choices=['train', 'test'])
parser.add_argument('--bayesian', type=bool, default=False, help='Bayesian Posenet, True or False')
parser.add_argument('--sequential_mode', type=str, default=None,
choices=[None, 'model', 'fixed_weight', 'batch_size', 'learning_rate', 'beta'])
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--sx', type=float, default=0.0)
parser.add_argument('--sq', type=float, default=0.0)
parser.add_argument('--learn_beta', type=bool, default=False)
parser.add_argument('--dropout_rate', type=float, default=0.5, help='range 0.0 to 1.0')
parser.add_argument('--shuffle', type=bool, default=True)
parser.add_argument('--fixed_weight', type=bool, default=False)
parser.add_argument('--model', type=str, default='Resnet', choices=['Googlenet', 'Resnet'])
parser.add_argument('--pretrained_model', type=str, default=None)
# parser.add_argument('--image_path', type=str, default='/mnt/data2/image_based_localization/posenet/KingsCollege')
# parser.add_argument('--image_path', type=str, default='/mnt/data2/complex_urban/urban08')
parser.add_argument('--image_path', type=str, default='/mnt/data2/NCLT')
# parser.add_argument('--metadata_path', type=str,
# default='/mnt/data2/image_based_localization/posenet/KingsCollege/dataset_test.txt')
parser.add_argument('--metadata_path', type=str, default='/mnt/data2/NCLT/test.txt')
# parser.add_argument('--metadata_path', type=str,
# default='/mnt/data2/complex_urban/urban08/image_convert/test.txt')
# Training settings
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 1 2 3')
# parser.add_argument('--dataset', type=str, default='Oxford', choices=['NCLT', 'VKITTI', 'Oxford', 'QUT'])
parser.add_argument('--num_epochs', type=int, default=80)
parser.add_argument('--num_epochs_decay', type=int, default=10)
parser.add_argument('--num_iters', type=int, default=200000) # 200000
parser.add_argument('--num_iters_decay', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=3) # 16
parser.add_argument('--num_workers', type=int, default=1)
# Test settings
parser.add_argument('--test_model', type=str, default='99')
parser.add_argument('--save_result', type=bool, default=True)
# Misc
parser.add_argument('--use_tensorboard', type=bool, default=True)
# Step size
parser.add_argument('--log_step', type=int, default=10)
parser.add_argument('--sample_step', type=int, default=1000)
parser.add_argument('--model_save_step', type=int, default=1000)
config = parser.parse_args()
main(config)
print("Evaluation is finished!")
| [
"noreply@github.com"
] | wang422003.noreply@github.com |
627323e27a09dc5c117409415c9142032f3337b7 | bbf0f7cc8afd4d8de241211617051923d540e701 | /Licenta2020CrivoiAndrei/Piano Follower/PianoManagerToplevel.py | 4adbe94d5c44342bb36ff34349d2a46a88f6d352 | [] | no_license | Crivoi/PianoFollower | f1c0e11a9cd7a23ec11018676ebadd9fce4fb2b1 | 90601bef5969f188d9d53fdceaa061147233f786 | refs/heads/master | 2022-03-31T18:56:40.816825 | 2020-02-12T16:39:58 | 2020-02-12T16:39:58 | 238,552,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | try:
import Tkinter as tk
import TkFileDialog
except ImportError:
import tkinter as tk
from tkinter import filedialog
from DefaultToplevel import DefaultToplevel
from MidiManager import MidiManager
class PianoManagerToplevel(DefaultToplevel):
def __init__(self, parent):
self.midi_object = MidiManager('../recordings/midi_rec.txt')
super().__init__(parent)
def init_ui(self):
menu_frame = tk.Frame(self, bd=3, bg='darkgray', height=self.height, width=self.width)
menu_frame.place(x=0, y=0)
midi_btn = tk.Button(menu_frame, text='Convert to Midi File', bd=2, command=lambda: self.convert_to_midi())
score_btn = tk.Button(menu_frame, text='Convert to Score File', bd=2, command=lambda: self.convert_to_score())
midi_btn.place(relx=0, relwidth=1.0 / 2.0, relheight=1.0)
score_btn.place(relx=1.0/2.0, relwidth=1.0 / 2.0, relheight=1.0)
def convert_to_midi(self):
self.midi_object.midi_msg_to_mido_msg()
file = filedialog.asksaveasfile(initialdir="/", title="Save file",
filetypes=(('midi files', '*.mid'),
('all files', '*.*')))
self.midi_object.save_midi(file.name)
def convert_to_score(self):
file_path = filedialog.askopenfilename(initialdir='/', title='Open Midi',
filetypes=(('midi files', '*.mid'),
('all files', '*.*')))
self.midi_object.convert_midi_to_stream(file_path)
| [
"andrei.crivoi1997@gmail.com"
] | andrei.crivoi1997@gmail.com |
4dd4ef0331f98f983cbcd659ff8e1be4bc3723c5 | f382030d3e054490c46da8e6a2a86c3f13149cfb | /Test/Profile/views.py | 9cc36837f5b8a7537160d20e7a7b36544613373c | [] | no_license | Astily/TestTask_Django | 048c2c4c48daacfbf93f39ce74b57833e3538ba2 | ccaf2d6703b7a6a86afe7169c12ad1f62e0b8fbd | refs/heads/master | 2020-04-03T17:13:23.593912 | 2018-10-31T14:54:34 | 2018-10-31T14:54:34 | 155,435,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404
from django.urls import reverse
from .forms import CustomUserForm
from .models import CustomUser, IpUsers
def index(request):
return HttpResponse("You're at the polls index. Section in development")
def profile(request, profile_id):
try:
profile = CustomUser.objects.get(pk=profile_id)
except CustomUser.DoesNotExist:
raise Http404("Profile does not exist")
return render(request, 'profile/profile.html', {'profile': profile, 'edit': reverse('edit')})
@login_required
def edit_profile(request):
profile_obj = request.user
if request.method == "POST":
form = CustomUserForm(request.POST, instance=profile_obj)
if form.is_valid():
post = form.save(commit=False)
ip = request.META.get('REMOTE_ADDR', '') or request.META.get('HTTP_X_FORWARDED_FOR', '')
userIp = IpUsers(user=post, ip=ip)
userIp.save()
post.save()
return redirect('profile', profile_id=profile_obj.pk)
else:
form = CustomUserForm(instance=profile_obj)
return render(request, 'profile/edit.html', {'form': form})
def home(request):
user = request.user
if user.is_authenticated:
return redirect('profile', profile_id=user.pk)
else:
return redirect('login')
| [
"nikita@novikoff.pp.ua"
] | nikita@novikoff.pp.ua |
86b6e7d3332dc5f4925a2df4a4b96fb09ce525fd | 5eb8c1f285837846a8fad3c08b7e9d89019ea2f8 | /character.py | ff633ed151b34f4aa96727b54fc995962db94d70 | [
"MIT"
] | permissive | RitikShah/hopper | cdd9e516eae39bd3d078aa30055433e32d384528 | 85503f9fae11124d40a7c1433102a7bdf56ba420 | refs/heads/master | 2020-03-07T21:06:07.870001 | 2018-04-02T18:33:58 | 2018-04-02T18:34:26 | 127,717,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from color import *
import random
from entity import *
import pygame
class Character(Entity):
def __init__(self, posx, posy):
super().__init__(posx, posy, 5)
self.grav = 1
self.xspeed = 5
self.floorlevel = 100
self.jumpheight = 15
self.jumps = 0
self.maxjumps = 1
self.spacehold = False
def update(self, key):
# Key Listener
if key[pygame.K_SPACE] and not(self.spacehold):
if self.pos['y'] == winh-self.floorlevel: # Ground Level
self.jumps = self.maxjumps
self.velocity['y'] = -self.jumpheight
self.spacehold = True
elif self.jumps > 0: # Double Jump
self.jumps -= 1
self.velocity['y'] = -self.jumpheight
self.spacehold = True
elif not(key[pygame.K_SPACE]):
self.spacehold = False
if key[pygame.K_LEFT]:
self.pos['x'] -= self.xspeed
elif key[pygame.K_RIGHT]:
self.pos['x'] += self.xspeed
if self.pos['x'] > winw-self.size['width']:
self.pos['x'] = winw-self.size['width']
if self.pos['x'] < 0:
self.pos['x'] = 0
# Gravity
self.pos['y'] += self.velocity['y']
self.velocity['y'] += self.grav
if self.pos['y'] > winh-self.floorlevel:
self.pos['y'] = winh-self.floorlevel
self.velocity['y'] = 0
super().update()
def reset(self, posx, posy):
self.__init__(posx, posy)
| [
"shah10.ritik@gmail.com"
] | shah10.ritik@gmail.com |
d710f047198a283c91cddbb86877ac0c4ecc62e7 | 4b670f0efaadb56b1770bc387e07a6d1439ab1b6 | /GUI/PlottingWidgets.py | cc8685b662ee49a0ffb775f3eac16877a05bb679 | [] | no_license | xyt556/Scattering_Analysis | f2e8e18fb353a36b73ec7792428c3ac9e91152de | 4a98b34ec8ac122da5077e08432782c591857f56 | refs/heads/master | 2021-01-21T15:03:59.213412 | 2017-04-14T07:55:27 | 2017-04-14T07:55:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,376 | py | import matplotlib as mpl
mpl.use("Qt5Agg")
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib import lines
import logging
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
from ..SWAS_Sequence import SWAS_Sequence
from ..SAS import SmallAngleScattering
from ..WAS import WideAngleScattering
from ..SWAS import SWAS
from ..SWAS_Sequence import SWAS_Sequence
plot_types = ('SAS','WAS','SWAS','SAS_Fit','WAS_Fit','SWAS_Fit')
class CentralWidget(QtWidgets.QWidget):
"""Class used for the tree widget panel to store the scattering object.
Based on whether the object is SAS WAS SWAS or a sequence the object
will then decide how to provide the data.
"""
def __init__(self, parent, scattering_object = None, **kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(CentralWidget, self).__init__(parent, **kwargs)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0,0,0,0)
self.stackedWidget = QtWidgets.QStackedWidget(self)
self.toolBar = QtWidgets.QToolBar(self)
self.selectPlot = QtWidgets.QComboBox(self.toolBar)
self.selectPlot.activated.connect(self.selected_plot)
self.toolBar.addWidget(self.selectPlot)
#self.selectPlot.insertItem(0,'Test1')
#self.selectPlot.insertItem(1,'Test2')
self.layout.addWidget(self.stackedWidget)
self.layout.addWidget(self.toolBar)
self.setLayout(self.layout)
def add_plot(self, plotData, plotLayout = 'individual', **kwargs):
"""add_plot is used to add a new widget to the stacked widget along
with a new entry in the dropdown list. It decides which plotting widget
to use based on the object to be plotted and how it should be displayed.
Args:
plotData(dict): contains all the information relative to the object
to plot:
'Object': one of the existing scattering objects
'SAS': the list of SAS objects which needs to be plotted. (Valid
only if the objects is a SWAS or SWAS_Sequence object)
'WAS': the list of WAS objects which needs to be plotted. (Valid
only if the objects is a SWAS or SWAS_Sequence object)
If WAS and SAS are both in the dictionary but are different then the
plot will not be done and ean error message will be printed
"""
if isinstance(plotData['Object'], SWAS_Sequence):
if plotData['SAS'] or plotData['WAS']:
#Plot both the SAS and WAS patterns
if plotData['SAS'] == plotData['WAS']:
if len(plotData['SAS']) == 1:
self.logging.debug('plotting single SWAS')
currPos = self.stackedWidget.addWidget(DoublePlot(self.stackedWidget, plotData = plotData['Object'][plotData['SAS'][0]],\
plot_type = 'SWAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['SAS'][0]].sampleName))
else:
self.logging.debug('plotting multiple SWAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'SWAS_Sequence'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
#Plot only SAS data
elif not plotData['WAS']:
if len(plotData['SAS']) == 1:
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'][plotData['SAS'][0]].SAS,\
plot_type = 'SAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['SAS'][0]].SAS.sampleName))
else:
self.logging.debug('plotting multiple SAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'SAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
#Plot only WAS data
elif not plotData['SAS']:
if len(plotData['WAS']) == 1:
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'][plotData['WAS'][0]].SAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['WAS'][0]].WAS.sampleName))
else:
self.logging.debug('plotting multiple WAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
else:
self.logging.error('Cannot understend how/what to plot with the given selection')
return
elif isinstance(plotData['Object'], SWAS):
if plotData['SAS'] and plotData['WAS']:
self.logging.debug('plotting SWAS')
currPos = self.stackedWidget.addWidget(DoublePlot(self.stackedWidget, plotData = plotData['Object'],\
plot_type = 'SWAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
elif plotData['SAS']:
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'SAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].SAS.sampleName))
elif plotData['WAS']:
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].WAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].WAS.sampleName))
elif isinstance(plotData['Object'], SAS):
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'SAS'))
elif isinstance(plotData['Object'], WAS):
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].WAS,\
plot_type = 'WAS'))
else:
self.logging.info('No recognizable element selected')
return
def add_fit(self, plotData, fitType, **kwargs ):
'''add_fit is used ot add a new plot containing the fitted data. In this case
all SAS plots are double (the data and the distribution).
Args:
plotData(dict): contains all the information relative to the object for which
the fitting was done.
fitType(string): the name of the fitting method which shoudl be plotted
'''
if isinstance(plotData['Object'], SWAS_Sequence):
if not plotData['WAS']:
if len(plotData['SAS']) == 1:
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'][plotData['SAS'][0]].SAS,\
plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'][plotData['SAS'][0]].SAS.sampleName))
else:
self.logging.debug('plotting multiple SAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].sampleName))
#Plot only WAS data
elif not plotData['SAS']:
if len(plotData['WAS']) == 1:
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'][plotData['WAS'][0]].SAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['WAS'][0]].WAS.sampleName))
else:
self.logging.debug('plotting multiple WAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
else:
self.logging.error('Cannot understend how/what to plot with the given selection')
return
elif isinstance(plotData['Object'], SWAS):
if not plotData['WAS']:
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].SAS.sampleName))
elif not plotData['SAS']:
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].WAS.sampleName))
elif isinstance(plotData['Object'], SAS):
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'],\
plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].sampleName))
elif isinstance(plotData['Object'], WAS):
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'],\
plot_type = 'WAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].sampleName))
else:
self.logging.info('No recognizable element selected')
return
def selected_plot(self):
selIndx = self.selectPlot.currentIndex()
self.stackedWidget.setCurrentIndex(selIndx)
self.logging.debug('changed index to {}'.format(selIndx))
class SingleCanvasPlot(FigureCanvas):
'''Drawing Canvas for plotting one axes on a single figure. Can be used to plot
SAS or WAS data
'''
def __init__(self, parent=None, figsize = (5,4), **kwargs):
#firstAx = [[0,1],[0,1]], secondAx = [[0,1],[1,2]] ):
'''Initiates the canvas.
Args:
parent (QtWidget): the parent widget to which the canvas is associated
Defauts to None
figsize (list of int): the size of the figure in the form: (width,height).
This will be used ot create the figure. Defaults to (5,4)
'''
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
self.fig = Figure(figsize=figsize)
self.axes = self.fig.add_subplot(111)
super(SingleCanvasPlot, self).__init__(self.fig)
#FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.updateGeometry()
class SinglePlot(QtWidgets.QWidget):
'''Wrapping class used to place the single canvas plot in a widget
'''
def __init__(self, parent=None,**kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(SinglePlot, self).__init__(parent)
self.scatteringObject = kwargs.get('plotData', None)
self.plot_layout = QtWidgets.QVBoxLayout(self)
self.plot_canvas = SingleCanvasPlot(self, figsize = (5,4))
self.navi_toolbar = NavigationToolbar(self.plot_canvas, self)
self.plot_layout.addWidget(self.plot_canvas) # the matplotlib canvas
self.plot_layout.addWidget(self.navi_toolbar)
self.setLayout(self.plot_layout)
#If the widget is created with an Onject than it can directly be plotted
if self.scatteringObject is not None:
self.plot_data()
def plot_data(self):
self.scatteringObject.plot_data(ax = self.plot_canvas.axes)
def remove_line(self, line):
'''remove_line is used ot remove a particular object from the axes given
it's handle. Useful to remove vertical lines drawn to provide visial aid
for fitting limits
'''
if isinstance(line, lines.Line2D):
if line in self.plot_canvas.axes.lines:
self.plot_canvas.axes.lines.remove(line)
else:
self.logging.error('{} is not in the axes'.format(line))
def axvline(self,x):
'''axvline simply calls axvline on the widgets axes
'''
self.plot_canvas.axes.axvline(x, color = 'k', linewidth = 2)
def cla(self):
'''cla cleans the axes in the widget
'''
self.plot_canvas.axes.cla()
def redraw(self):
'''Redraws the canvas in case something was added or removed from it
'''
self.plot_canvas.draw()
def ax_x_lim(self):
'''returns the x limits of the canvas' axes
'''
return self.plot_canvas.axes.get_xlim()
def ax_y_lim(self):
'''returns the y limits of the canvas' axes
'''
return self.plot_canvas.axes.get_ylim()
class DoubleCanvasPlot(FigureCanvas):
'''Drawing Canvas for potting two axis on the same figure. Can be used to plot
SAS and WAS data at the same time or the scattering curve plus a plot of the fitting
(e.g. a SAS curve and the distribution of sizes of the fit)
'''
def __init__(self, parent=None, figsize = (5,4), rows = 1, cols = 2, rowSpan = 1, colSpan = 1, **kwargs):
#firstAx = [[0,1],[0,1]], secondAx = [[0,1],[1,2]] ):
'''Initiates the canvas.
Args:
parent (QtWidget): the parent widget to which the canvas is associated
Defauts to None
figsize (list of int): the size of the figure in the form: (width,height).
This will be used ot create the figure. Defaults to (5,4)
rows (int): the number of rows in which the two axis should be disposed
Defaults to 1
cols (int): the number of columns over which the axis should be disposed.
Defaults to 2.
rowSpan (int): number of rows spanned by the first axis. The remaining
rows are attributed to the second axis. Defaults to 1.
colSpan (int): number of cols spanned by the first axis. The remaining
cols are attributed to the second axis. Defaults to 1
'''
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
firstAx = [[0,rowSpan],[0,colSpan]]
if rowSpan == rows:
secondAx = [[0,rowSpan]]
else:
secondAx = [[rowSpan,rows]]
if colSpan == cols:
secondAx.append([0,colSpan])
else:
secondAx.append([colSpan,cols])
self.fig = Figure(figsize=figsize)
gs = mpl.gridspec.GridSpec(rows,cols)
self.axes1 = self.fig.add_subplot(gs[slice(*firstAx[0]),slice(*firstAx[1])])
self.axes2 = self.fig.add_subplot(gs[slice(*secondAx[0]),slice(*secondAx[1])])
super(DoubleCanvasPlot, self).__init__(self.fig)
#FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.updateGeometry()
class DoublePlot(QtWidgets.QWidget):
'''Wrapper class to place the DoubleCanvasPlot in a widget
'''
def __init__(self, parent=None, **kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(DoublePlot, self).__init__(parent)
self.scatteringObject = kwargs.get('plotData', None)
self.plot_layout = QtWidgets.QVBoxLayout(self)
self.plot_canvas = DoubleCanvasPlot(self, figsize = (5,4))
self.navi_toolbar = NavigationToolbar(self.plot_canvas, self)
self.plot_layout.addWidget(self.plot_canvas) # the matplotlib canvas
self.plot_layout.addWidget(self.navi_toolbar)
self.setLayout(self.plot_layout)
if self.scatteringObject is not None and fitType is not None:
self.plot_fit()
elif self.scatteringObject is not None:
self.plot_data()
def cla(self):
'''Clears bothof the axis in the figure
'''
self.plot_canvas.axes1.cla()
self.plot_canvas.axes2.cla()
def remove_line(self, line):
'''searches both axes to see if the given line is in either.
If it is it is removed
'''
if isinstance(line, lines.Line2D):
if line in self.plot_canvas.axes1.lines:
self.plot_canvas.axes1.lines.remove(line)
elif line in self.plot_canvas.axes2.lines:
self.plot_canvas.axes2.lines.remove(line)
else:
self.logging.error('{} was not found in either axis'.format(line))
def plot_data(self):
'''Uses the plotting function of the scattering object to plot the data on the two
available axis
'''
self.scatteringObject.plot_data(axs = [self.plot_canvas.axes1,self.plot_canvas.axes2])
def plot_fit(self):
'''Uses the fitting plot function of the scattering object to plot the data on the
two available axis
'''
self.scatteringObject.plot_fit()
def redraw(self):
'''Redraws the canvas after lines have been added or removed from the figure
'''
self.plot_canvas.draw()
class MultiplePlot(QtWidgets.QWidget):
'''Widget used to plot a sequence of scattering objects. It is composed of a plotting widget,
two buttons to move the current position forward and backwards by one, and a slider to select
any avalable fitting
'''
def __init__(self, parent=None, **kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(MultiplePlot, self).__init__(parent)
#Setup the data for the plotting
self.plotData = kwargs.get('plotData', None)
if self.plotData is not None:
self.scatteringObject = self.plotData['Object']
self.plotType = kwargs.get('plot_type', 'SAS')
self.currPlot = 0
if self.plotType in ('SAS','WAS'):
self.plotWidget = SinglePlot(self)
else:
self.plotWidget = DoublePlot(self)
#Create the widget in which the data is going to be plotted
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.scroll_toolBar = QtWidgets.QToolBar(self)
self.scrollBar = QtWidgets.QScrollBar(QtCore.Qt.Horizontal,self.scroll_toolBar)
self.scrollBar.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,QtWidgets.QSizePolicy.MinimumExpanding)
self.scrollBar.sliderMoved.connect(self.sliderMoving)
self.scrollBar.sliderReleased.connect(self.sliderChanged)
self.text_toolBar = QtWidgets.QToolBar(self)
self.text_toolBar_layout = QtWidgets.QHBoxLayout(self.text_toolBar)
spacerL = QtWidgets.QWidget()
spacerR = QtWidgets.QWidget()
spacerL.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,QtWidgets.QSizePolicy.MinimumExpanding)
spacerR.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,QtWidgets.QSizePolicy.MinimumExpanding)
self.prev_button = QtWidgets.QPushButton('<<', self.text_toolBar)
self.prev_button.clicked.connect(self.prevPlot)
self.next_button = QtWidgets.QPushButton('>>', self.text_toolBar)
self.next_button.clicked.connect(self.nextPlot)
self.lineEdit = QtWidgets.QLabel(self.text_toolBar)
self.lineEdit.setText('LineEditText')
self.scroll_toolBar.addWidget(self.scrollBar)
self.text_toolBar.addWidget(spacerL)
self.text_toolBar.addWidget(self.prev_button)
self.text_toolBar.addWidget(self.lineEdit)
self.text_toolBar.addWidget(self.next_button)
self.text_toolBar.addWidget(spacerR)
self.text_toolBar_layout.setAlignment(self.lineEdit, QtCore.Qt.AlignHCenter)
self.layout.addWidget(self.scroll_toolBar)
self.layout.addWidget(self.text_toolBar)
self.layout.addWidget(self.plotWidget)
self.numb_curves = 0
if self.scatteringObject is not None:
self.InitializeValues()
def InitializeValues(self):
'''InitializeValues sets all the variables needed to move between the available
data sets and visialize them
'''
#self.numb_curves = self.scatteringObject.size
if self.plotType == 'SAS':
self.selectedPlots = self.plotData['SAS']
self.numbCurves = len(self.selectedPlots)
elif self.plotType == 'WAS':
self.selectedPlots = self.plotData['WAS']
self.numbCurves = len(self.selectedPlots)
else:
self.selectedPlots = self.plotData['SAS']
self.numbCurves = len(self.selectedPlots)
self.typeObject = self.scatteringObject.avlbCurves
self.lineEdit.setText('1/{}'.format(self.numbCurves+1))
self.scrollBar.setMinimum(1)
self.scrollBar.setMaximum(self.numbCurves)
self.scrollBar.setValue(1)
if not isinstance(self.scatteringObject,SWAS_Sequence):
self.text_toolBar.hide()
self.scroll_toolBar.hide()
if self.fitType is None:
self.plot_data()
else:
self.plot_fit()
def set_data(self, scatteringObj, selectedPlots, plotType):
'''set_data is a quick setter function to set the scattering object,
the data to plot and the type of plot and fit
'''
self.scatteringObject = scatteringObj
self.selectedPlots = selectedPlots
self.plotType = plotType
def plot_data(self, **kwargs):
'''plot_data used the plotting functions of the object to plot
the data on the available axis after clearing them
'''
#print self.scatteringObject[self.currPlot].SAS.q
self.plotWidget.cla()
if self.plotType == 'SAS':
if isinstance(self.scatteringObject,SmallAngleScattering):
self.scatteringObject.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.SAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
else:
self.logging.error('Cannot plot Small angles for the selected scattering data')
if self.plotType == 'WAS':
if isinstance(self.scatteringObject,WideAngleScattering):
self.scatteringObject.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.WAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].WAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
else:
self.logging('Cannot plot wide angles for the selected scattering data')
if self.plotType == 'SWAS_Sequence':
#print 'plotting ', self.currPlot, 'which is object ',self.selectedPlots[self.currPlot]
#print 'of ',self.scatteringObject, ' ', self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.q
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].plot_data(axs = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2],\
fig = self.plotWidget.plot_canvas.fig,**kwargs)
self.plotWidget.redraw()
def nextPlot(self):
'''Sets the current plot to one after the current one. If the last plot is
currently being shown it does nothing.
'''
if (self.currPlot+1)<self.numbCurves:
self.currPlot += 1
self.scrollBar.setValue(self.currPlot+1)
self.lineEdit.setText('{}/{}'.format(self.currPlot+1, self.numbCurves))
self.plot_data()
def prevPlot(self):
'''Sets the current plot to one before the current one. If the first plot is
currently being shown it does nothing.
'''
if (self.currPlot-1) >= 0:
self.currPlot -= 1
self.scrollBar.setValue(self.currPlot+1)
self.lineEdit.setText('{}/{}'.format(self.currPlot+1, self.numbCurves))
self.plot_data()
def sliderMoving(self):
'''Updates in real time the number being shown on the text as the slider is moved
'''
self.lineEdit.setText('{}/{}'.format(self.scrollBar.value(), self.numbCurves))
def sliderChanged(self):
'''Updates the current plot based on the selection done with the slider
'''
self.currPlot = self.scrollBar.value()-1
self.plot_data()
def ax_x_lim(self):
'''Returns the x limits of the current axes/axis
'''
if isinstance(self.plotWidget, DoublePlot):
return [self.plotWidget.plot_canvas.axes1.get_xlim(),self.plotWidget.plot_canvas.axes1.get_xlim()]
else:
return self.plotWidget.plot_canvas.axes.get_xlim()
def axvline(self,x):
if isinstance(self.plotWidget, SinglePlot):
self.plotWidget.axvline(x)
def remove_line(self, line):
if isinstance(line,lines.Line2D):
self.plotWidget.remove_line(line)
else:
self.logging.error('{} is not a matplotlib 2D line'.format(line))
class FitPlot(QtWidgets.QWidget):
def __init__(self, parent=None, **kwargs):
super(FitPlot, self).__init__(parent)
self.scatteringObject = kwargs.get('plotData', None)
self.fitType = kwargs.get('fitType',None)
self.plot_layout = QtWidgets.QVBoxLayout(self)
self.plot_canvas = DoubleCanvasPlot(self, figsize = (5,4))
self.navi_toolbar = NavigationToolbar(self.plot_canvas, self)
self.plot_layout.addWidget(self.plot_canvas) # the matplotlib canvas
self.plot_layout.addWidget(self.navi_toolbar)
self.setLayout(self.plot_layout)
if self.scatteringObject is not None and self.fitType is not None:
self.plot_fit()
elif self.scatteringObject is not None:
self.plot_data()
def cla(self):
self.plot_canvas.axes1.cla()
self.plot_canvas.axes2.cla()
def remove_line(self, line):
if isinstance(line, lines.Line2D):
if line in self.plot_canvas.axes1.lines:
self.plot_canvas.axes1.lines.remove(line)
elif line in self.plot_canvas.axes2.lines:
self.plot_canvas.axes2.lines.remove(line)
else:
print line, ' is not in either axes'
def plot_data(self):
self.scatteringObject.plot_fit(self.fitType, axs = [self.plot_canvas.axes1,self.plot_canvas.axes2])
def plot_fit(self):
self.scatteringObject.plot_fit(self.fitType, axs = [self.plot_canvas.axes1,self.plot_canvas.axes2])
def redraw(self):
self.plot_canvas.draw()
class MultipleFitPlot(MultiplePlot):
def __init__(self, parent=None, **kwargs):
super(MultipleFitPlot, self).__init__(parent, **kwargs)
self.fitType = kwargs.get('fitType')
self.plotWidget = DoublePlot(self)
def set_data(self, scatteringObj, selectedPlots, plotType, fitType):
'''set_data is a quick setter function to set the scattering object,
the data to plot, the type of plot and fit
'''
self.scatteringObject = scatteringObj
self.selectedPlots = selectedPlots
self.plotType = plotType
self.fitType = fitType
def plot_data(self, **kwargs):
'''plot_data used the plotting functions of the object to plot
the data on the available axis after clearing them
'''
#print self.scatteringObject[self.currPlot].SAS.q
self.plotWidget.cla()
if self.plotType == 'SAS':
if isinstance(self.scatteringObject,SmallAngleScattering):
self.scatteringObject.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.SAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2],\
**kwargs)
else:
self.logging.error('Cannot plot Small angles for the selected scattering data')
if self.plotType == 'WAS':
if isinstance(self.scatteringObject,WideAngleScattering):
self.scatteringObject.plot_fit(self.fitType, ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.WAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].WAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
else:
self.logging('Cannot plot wide angles for the selected scattering data')
if self.plotType == 'SWAS_Sequence':
#print 'plotting ', self.currPlot, 'which is object ',self.selectedPlots[self.currPlot]
#print 'of ',self.scatteringObject, ' ', self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.q
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].plot_fit(self.fitType, axs = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2],\
fig = self.plotWidget.plot_canvas.fig,**kwargs)
self.plotWidget.redraw()
| [
"Castro@Nicolos-MacBook-Pro-2.local"
] | Castro@Nicolos-MacBook-Pro-2.local |
13ba32c6e2a103795a5cafba7f437334176ac67e | d5fbb40c8fa95970a6b1dd10920071a3330c6de8 | /src_d21c/in_theta.py | 428938d97f7d9d9e57abdd3f26b45e3ee98844aa | [] | no_license | Pooleyo/theta.py | 622000e04a7834a7b12d371337992f6063c3f332 | 7bdf96f7494db7fda8dbe8d1e8bb536a5b39e39d | refs/heads/master | 2021-06-18T06:03:47.176742 | 2019-09-18T16:02:02 | 2019-09-18T16:02:02 | 137,497,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | test_mode = True
image_files = ['3x3_pixel_value_1.tif'] # "PSL_plate4_s10257_BBXRD.tif"#"3x3_white_test_image.tif"##"Nb_test_image.png"
# #"PSL_plate4_s10257_BBXRD.tif"#"Nb_test_image.png"#
source_position = [[50.0, 0.0, 49.8]] # In mm
normal = [[-25.6, 0.0, 12.7078]] # [-10.0, 0.0, 10.0] # The normal to the plane of the image plate with units mm.
sample_normal = [[0.0, 0.0, 1.0]] # This is used to correct for attenuation in the diffracting sample.
offset = [[0.0, 12.0, 0.0]] # X offset (mm), Y offset (mm), rotation (degrees); note that rotation is not actually used
# in this code, it is included simply to indicate which sonOfHoward parameters are being reference here.
x_scale = [56] # In mm
y_scale = [44] # In mm
view_x = [[0.01, 1.0, 0.02]] # [-0.71, 0.0, -0.71] # "normalised"
view_y = [[0.44, -0.02, 0.90]] # [0.0, 1.0, 0.0] # "normalised"
wavelength = 1.378 # In Angstroms
a_lattice = 3.3 # In Angstroms
filter_thickness = [[10.0, 6.0]]
filter_attenuation_length = [[34.1, 109.7]] # The attenuation length(s) of filter(s) used, in microns. Enter a new list
# element for each filter; the order doesn't matter. Zn, at 9 keV, has attenuation length of 34.1 microns. Al, at 9 keV,
# has attenuation length of 109.7 microns.
phi_0_definer = [0.0, 0.0, 1.0]
phi_limit = [-180.0, 180.0]
gsqr_limit = [0.0, 18.0]
theta_phi_n_pixels_width = 1
theta_phi_n_pixels_height = 1
num_width_subpixels = 1
num_height_subpixels = 1
plot = True
debug = False
name_plot_integrated_intensity = 'integrated_intensity_vs_gsqr.png'
| [
"ajp560@york.ac.uk"
] | ajp560@york.ac.uk |
c625c91f36b9023bdda7ad7f8346b9bde769ae1b | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC300~ABC399/ABC300/e.py | 9ba33d532fb2e724f862b0ad868328126a7e1249 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from functools import lru_cache
def modinv(a: int, m: int) -> int:
'''
モジュラ逆元
ax mod m =1の解x=a^(-1)を返す
Parameters
----------
a:int
m:int
'''
x, y, u, v = 1, 0, 0, 1
M = m
while m > 0:
k = a//m
x -= k*u
y -= k*v
x, u = u, x
y, v = v, y
a, m = m, a % m
assert a == 1, "a and m aren't relatively prime numbers"
if x < 0:
x += M
return x
N = int(input())
MOD = 998244353
P = modinv(5, MOD)
@lru_cache(maxsize=None)
def f(n):
if n >= N:
return 1 if n == N else 0
res = 0
for i in range(2, 7):
res += f(i*n)
return res*P % MOD
ans = f(1)
print(ans)
| [
"ymdysk911@gmail.com"
] | ymdysk911@gmail.com |
6938f0bc75372893e1b90af44297d7efdbdabe3c | 2d2ef049d450ef9ac6459bcdd1ea25fccc0305d5 | /loadTimeEstimator.py | b1f03e7172b7ba44449aeb4afa1aa99899599ebb | [] | no_license | iotmember/code-fights | fc119f53cc42f9fea8a40f43335d93d076c92e9d | e7f1fdb9d5068bd2ed67d9df07541f306097bd19 | refs/heads/master | 2021-05-31T06:03:41.497371 | 2016-04-08T05:40:39 | 2016-04-08T05:40:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | import sys
def loadTimeEstimator(sizes, uploadingStart, V):
finish_time = [x for x in uploadingStart]
t = [0 for x in uploadingStart]
c = len(sizes)
time_ = 0
curr_time = uploadingStart[0]
while (c > 0):
index_of_uploading_start = [i for i, x in enumerate(uploadingStart) if x == curr_time + time_ and sizes[i] != 0 ]
if len(index_of_uploading_start):
speed = V/float(len(index_of_uploading_start))
else:
speed = V
#print index_of_uploading_start
for i in index_of_uploading_start:
if sizes[i] > 0:
sizes[i] = sizes[i] - speed
finish_time[i] = finish_time[i] + 1
t[i] = t[i] + 1
uploadingStart[i] = uploadingStart[i] + 1
time_ += 1
c = len([x for x in sizes if x > 0])
return finish_time
sizes = [21, 10]
uploadingStart = [100, 105]
V = 2
#print loadTimeEstimator(sizes, uploadingStart, V)
print loadTimeEstimator([20, 10], [1, 1], 1)
#print loadTimeEstimator([1, 1, 1], [10, 20, 30], 3)
| [
"nasa.freaks@gmail.com"
] | nasa.freaks@gmail.com |
ff64debff15d72ec3bafdf254c48b07687cfa1bc | fa9d297de5b007e249511191ad9ce99ebf07d640 | /Nodoterreno.py | 20f1479c82ccc1144086393825b8ba26671f37e8 | [] | no_license | LuisBarrera23/IPC2_Proyecto1_202010223 | 0eb9e4e86151a8fae6ea4730b8e666228901e31b | 740dbb1cddf9a80c079503c732eb2998197d7382 | refs/heads/main | 2023-08-09T22:32:14.116505 | 2021-08-30T03:46:56 | 2021-08-30T03:46:56 | 394,154,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | class nodoterreno:
def __init__(self,terreno=None,siguiente=None):
self.terreno=terreno
self.siguiente=siguiente | [
"luisbarrera5662@gmail.com"
] | luisbarrera5662@gmail.com |
e3c70a06800823605b23e90caeeb6c3cb91014cd | ca7d9df0890b0eed1b153737bc67afcd5494d11e | /iocpuller.py | 9461edc31b6e0e7e00f513b673acc24a4ff58bd6 | [] | no_license | KMCGamer/iocpuller | b219bdaf5ca412c9b910a3d46d911d079bb791f6 | 0c2ded1bb5f6611d932a5c62c7968f9fe949bfe8 | refs/heads/master | 2021-03-27T10:36:01.346459 | 2018-02-05T21:28:31 | 2018-02-05T21:28:31 | 120,359,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,791 | py | #!/usr/bin/python -u
# -*- coding: utf-8 -*-
import bisect
import logging
import re
import requests
import argparse
class color:
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# -----------------------------------------------------------------------------
# Locations and Constants
VERSION = "2.1.1"
INTEL_HEADER = """#fields\tindicator\tindicator_type\tmeta.source\tmeta.do_notice
# EXAMPLES:
#66.32.119.38\tIntel::ADDR\tTest Address\tT
#www.honeynet.org\tIntel::DOMAIN\tTest Domain\tT
#4285358dd748ef74cb8161108e11cb73\tIntel::FILE_HASH\tTest MD5\tT
"""
USAGE_DESCRIPTION= \
"""
{1}NAME{0}
iocpuller - pulls ioc data from RT and places it into an intel file.
{1}DESCRIPTION{0}
{2}{1}NOTE: THIS SCRIPT MUST BE RUN AS ROOT.{0}
Pulls ioc data from RT and places it into an intel file. The fields that
are pulled are:
- ioc.domain
- ioc.attackerip
- ioc.filehash
The domains are run through a top website list and a whitelist. If there
are any matches, they are not added to the intel file. You may also edit a
whitelist to remove indicators from the intel file.
{1}FUNCTIONS{0}
{1}pull{0} {2}INTEL_FILE{0} {2}WEBSITES_FILE{0}
Pull ioc data from RT while removing any top websites included in the
top websites file and stores it in the intel file.
{1}whitelist{0}
Manage the whitelist file. Creates a new whitelist if there isnt one.
{1}OPTIONS{0}
{1}-h, --help{0}
Display the manual page.
{1}-v, --version{0}
Display the current version.
""".format(color.END, color.BOLD, color.UNDERLINE, VERSION)
WHITELIST_LOCATION = "/usr/local/bin/iocwhitelist.txt"
# intel.dat options
INDICATOR_TYPE_DOMAIN = "DOMAIN"
INDICATOR_TYPE_FILE_HASH = "FILE_HASH"
INDICATOR_TYPE_IP = "ADDR"
META_DO_NOTICE = "T"
META_SOURCE = "RT_ID: "
# RT options
USER = "user"
PASSWD = "pass"
MIN_TICKET_NUM = "0"
# -----------------------------------------------------------------------------
# Functions
# Parses arguments from the command line. Takes in intel file and top website
# file locations so it can run mostly anywhere.
def parseArguments():
parser = argparse.ArgumentParser(description=USAGE_DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter, add_help=False)
parser.add_argument("-v", "--version", action='version', version='%(prog)s ' + VERSION)
parser.add_argument("-h", "--help", action='version', version=USAGE_DESCRIPTION) # THIS IS A HACK. NO TOUCHIE THANK YOU.
subparsers = parser.add_subparsers(dest='cmd')
# pull command
parser_pull = subparsers.add_parser('pull')
parser_pull.add_argument('intel', metavar="<intel_file>")
parser_pull.add_argument('top_website', metavar="<top_websites_file>")
# whitelist command
parser_whitelist = subparsers.add_parser('whitelist')
args = parser.parse_args()
return args
# Makes a POST to RT with a field and puts the result in an array of strings
# that have their ticket and values
# field (string) = "CF.{ioc.domain}", "CF.{ioc.attackerip}", "CF.{ioc.filehash}"
def pullListByField(field):
# POST to get a list of all things based on a field. ex: CF.{ioc.domain}
query = "'" + field + "'IS NOT NULL AND id > " + MIN_TICKET_NUM + " AND Status != 'rejected' AND Status != 'abandoned'"
url = "https://12.34.56.78/REST/1.0/search/ticket?query=" + query + "&format=s&fields=" + field
postResult = requests.post(url, {'user': USER, 'pass': PASSWD}, verify=False)
postResultArray = postResult.text.splitlines()
# remove first three lines and last two (response msg and empty lines)
del postResultArray[0:3]
del postResultArray[-1]
del postResultArray[-1]
return postResultArray
# Parses the values of a post result array. Makes an array of arrays that contain
# tickets and their values.
# Takes in a post result (array) created by pullListByField()
# return example: [[4044, "www.google.com"], [4045, "www.asdf.com"]]
def parseValues(postResult):
# split the values from their ticket ids
idValueArray = []
for line in postResult:
id = line.split("\t")[0]
values = line.split("\t")[1]
# split space delimited values
if values.find(" ") != -1:
valueArray = values.split(" ")
for value in valueArray:
idValueArray.append([id, value])
# split comma delimited values
elif values.find(",") != -1:
valueArray = values.split(",")
for value in valueArray:
idValueArray.append([id, value])
# add any other exception (single values)
else:
idValueArray.append([id, values])
return idValueArray
# Removes duplicate values from a list while maintaining their id numbers
def removeDuplicates(idValueArray):
uniqueValues = []
uniqueIDValueArray = []
for segment in idValueArray:
value = segment[1]
# check if the value is unique (not seen before)
if value not in uniqueValues:
uniqueValues.append(value)
uniqueIDValueArray.append(segment)
return uniqueIDValueArray
# checks if a value is in a whitelist
def isInWhitelist(value):
for indicator in whitelist:
if indicator == value:
return True
return False
def clearTerminal():
print "\033c"
# returns an array of unique domains and their ids
def getDomains():
# make the set unique, filter out any blank addresses, and websites in the list
def filterDomains(domainArray):
# checks the domain against a list using binary search
def isInTopWebsiteList(domain):
i = bisect.bisect_left(topWebsites, domain)
if i != len(topWebsites) and topWebsites[i] == domain:
return True
else:
return False
# filter out any domain that has these characters:
# '<', '>', '[', ']', '@', '/', '\', '=', '?'
def containsIllegalChar(domain):
if re.search('[\<\>\[\]\@\/\\\=\?]', domain) == None:
return False
else:
return True
# filters out IPs from the domain list
def isIP(domain):
if re.search('[a-zA-Z]', domain) == None:
return True
else:
return False
domainArray = removeDuplicates(domainArray)
domainArray = filter(lambda x: not containsIllegalChar(x[1]), domainArray)
domainArray = filter(lambda x: not isIP(x[1]), domainArray)
domainArray = filter(lambda x: len(x[1]) > 3, domainArray)
domainArray = filter(lambda x: not isInTopWebsiteList(x[1]), domainArray)
domainArray = filter(lambda x: not isInWhitelist(x[1]), domainArray)
return domainArray
# get all domains with ticket ids
postResultArray = pullListByField("CF.{ioc.domain}")
# get all domains into a single array
domainArray = parseValues(postResultArray)
# filter domains and return the array
return filterDomains(domainArray)
# returns an array of unique file hashes and their ids
def getFileHashes():
# get all hashes with ticket ids
postResultArray = pullListByField("CF.{ioc.filehash}")
# get all hashes into a single array
hashArray = parseValues(postResultArray)
# filter out any values that arent 32 length
hashArray = removeDuplicates(hashArray)
hashArray = filter(lambda x: len(x[1]) == 32, hashArray)
hashArray = filter(lambda x: not isInWhitelist(x[1]), hashArray)
return hashArray
# returns an array of unique IPs and their ids
def getIPS():
# get all ips with ticket ids
postResultArray = pullListByField("CF.{ioc.attackerip}")
# get all IPs into a single array
IPArray = parseValues(postResultArray)
IPArray = filter(lambda x: not isInWhitelist(x[1]), IPArray)
# filter out any duplicate IPs
return removeDuplicates(IPArray)
# creates a string compatible to append to the indel.dat file
def buildIntelString(value, type, source, notice):
return value + "\t" + "Intel::" + type + "\t" + source + "\t" + notice
# MAIN IOCPULLER FUNCTION.
# calls all the other functions to pull ioc values from RT.
def main(intelLocation, topWebsitesLocation):
# disable some warnings
logging.captureWarnings(True)
# open intel.dat and top websites file
try:
intelFile = open(intelLocation, "w")
intelFile.write(INTEL_HEADER)
except Exception as e:
print "There was an exception opening and writing to the intel file."
print "Exception: {}\nExiting...".format(e)
quit()
try:
with open(topWebsitesLocation, 'r') as websiteFile:
# get top 10,000 websites and sort alphabetically
global topWebsites
topWebsites = (websiteFile.read()).splitlines()[:10000]
topWebsites.sort()
print "Read top websites file: {}".format(topWebsitesLocation)
except Exception as e:
print "There was an exception reading the top websites file."
print "Exception: {}\nExiting...".format(e)
quit()
# open whitelist file
global whitelist
try:
whitelistFile = open(WHITELIST_LOCATION, "r")
whitelist = map(lambda x: x.rstrip('\r\n'), whitelistFile.readlines())
whitelistFile.close()
except Exception as e:
print "Warning: {}".format(e)
print "Please create a whitelist using: 'iocpuller.py -w'\nExiting..."
quit()
# get all unique ioc.domain, ioc.filehash, and ioc.attackerip
domains = getDomains()
print "Successfully got ioc.domain list."
IPS = getIPS()
print "Successfully got ioc.attackerip list."
fileHashes = getFileHashes()
print "Successfully got ioc.filehash list."
# write to intel file
# first element in the array is the ticket, the second is the value
for domain in domains:
intelFile.write(buildIntelString(domain[1], INDICATOR_TYPE_DOMAIN, \
META_SOURCE + domain[0], META_DO_NOTICE) + "\n")
for IP in IPS:
intelFile.write(buildIntelString(IP[1], INDICATOR_TYPE_IP, \
META_SOURCE + IP[0], META_DO_NOTICE) + "\n")
for filehash in fileHashes:
intelFile.write(buildIntelString(filehash[1], INDICATOR_TYPE_FILE_HASH, \
META_SOURCE + filehash[0], META_DO_NOTICE) + "\n")
print "Created intel file at location: {}".format(intelLocation)
# close files
intelFile.close()
# Provides functionality to update a whitelist for the intel file
def manageWhitelist():
def printWhitelist():
print "Total whitelisted indicators: [{}]".format(len(whitelist))
for idx, line in enumerate(whitelist):
print "{}) {}".format(idx,line)
print
# clear the terminal
clearTerminal()
# Menu for manipulating the whitelist file
menu = {}
menu['1'] = "Add indicator to whitelist."
menu['2'] = "Delete indicator from whitelist."
menu['3'] = "Edit an indicator."
menu['4'] = "Print whitelist."
menu['5'] = "Save whitelist to file."
menu['6'] = "Clear temporary whitelist."
menu['7'] = "Exit."
# try opening the whitelist file
try:
whitelistFile = open(WHITELIST_LOCATION, "r")
whitelist = map(lambda x: x.rstrip('\r\n'), whitelistFile.readlines())
whitelistFile.close()
print "Successfully opened whitelist file at: {}\n".format(WHITELIST_LOCATION)
# prompt to create the whitelist file if it doesnt exist
except Exception as e:
whitelistFile = open(WHITELIST_LOCATION, "w")
whitelistFile.close()
whitelist = []
print "Created new whitelist file at: {}\n".format(WHITELIST_LOCATION)
# Whitelist manager loop.
# Instead of updating the file after each change, a temporary whitelist is
# stored in an array and manipulated. Only saved changes will be pushed to
# the whitelist file.
while True:
# Get the options
options = menu.keys()
options.sort()
whitelist.sort() # sort the whitelist for better lookups
# print menu
for entry in options:
print entry + ") " + menu[entry]
print("-------------------------------------")
# get input
selection = raw_input("Selection: ")
# add indicator to whitelist
if selection == '1':
clearTerminal()
printWhitelist()
enteredExit = False
while not enteredExit:
print "Enter an empty string to return to menu."
indicator = raw_input("Specify an address, ip, or filehash: ")
if indicator != "": # if its not empty
if indicator not in whitelist: # if the indiciator isnt already in the whitelist
whitelist.append(indicator)
clearTerminal()
printWhitelist()
else:
clearTerminal()
printWhitelist()
print "Indicator already in whitelist."
else:
enteredExit = True
clearTerminal()
# delete indicator from whitelist
elif selection == '2':
clearTerminal()
enteredExit = False
while not enteredExit:
# print the current whitelist
printWhitelist()
print "Enter an empty string to return to menu."
# checks if the index specified is valid
validIndex = False
while not validIndex:
index = raw_input("Specify the index of the indicator you want to delete: ")
if index != "":
try:
del(whitelist[int(index)]) # delete the indicator if its valid
validIndex = True
except Exception as e:
print "Invalid index." # redo the input
else:
enteredExit = True
validIndex = True
clearTerminal()
# edit an indicator
elif selection == '3':
clearTerminal()
enteredExit = False
while not enteredExit:
# print the current whitelist
printWhitelist()
print "Enter an empty string to return to menu."
# checks if the index specified is valid
validIndex = False
while not validIndex:
index = raw_input("Specify the index of the indicator you want to modify: ")
if index != "":
try:
whitelist[int(index)] # try and access the whitelist index
newName = raw_input("Specify the new indicator name: ")
whitelist[int(index)] = newName
validIndex = True
except Exception as e:
print "Invalid index." # redo the input
else:
enteredExit = True
validIndex = True
clearTerminal()
# print whitelist
elif selection == '4':
clearTerminal()
printWhitelist()
# save whitelist
elif selection == '5':
try:
with open(WHITELIST_LOCATION, 'w') as whitelistFile:
for line in whitelist:
whitelistFile.write(line + '\n')
clearTerminal()
print "Saved whitelist to: {}.\n".format(WHITELIST_LOCATION)
except Exception as e:
clearTerminal()
print "Warning: {}".format(e)
print "Please run script as root!\nExiting..."
quit()
# clear whitelist
elif selection == '6':
whitelist = []
clearTerminal()
print "Cleared the whitelist.\n"
# exit the menu without saving
elif selection == '7':
print "Exiting..."
break
else:
clearTerminal()
print "Invalid option. Please select again...\n"
# -----------------------------------------------------------------------------
# Start script
if __name__ == "__main__":
# parse arguments
args = parseArguments()
# update the whitelist if the option is selected
if args.cmd == "whitelist":
manageWhitelist()
# pull the ioc's if the option is selected
elif args.cmd == "pull":
main(args.intel, args.top_website)
# this shouldn't ever be called but... what if?
else:
print "Please consult the usage page: './iocpuller.py -h'" | [
"KMCGamer@live.com"
] | KMCGamer@live.com |
9d086a9a5e0386554d865c16beaba00a2871db2a | b2eddd579b39c68884372f71808453d6f50668ae | /step_1_capture_tweets.py | 1c627d8604c2638e383b809d5c952e809c27ef47 | [] | no_license | PhilipGuo1992/csci_5408_assign_2 | 32214185b43ca9086c48ae0ecfb5d291b1afc23b | 39eb825a2768288746bc678892146fe0a067e384 | refs/heads/master | 2020-03-19T12:23:03.344155 | 2018-06-07T21:16:03 | 2018-06-07T21:16:03 | 136,513,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | import tweepy
import time
import json
import csv
consumer_key = "z56cu40Jq1XxntXeGKLfhNZnk"
consumer_secret = "NWsAOtbQ4lVGPq7xooVbE21XEeMnDuFBtdTfyZzc85Czh4wKnm"
access_key = "1003752992171024386-lsvEB53AROSLhKGJgEchdgajkBJTIC"
access_secret = "NV7vVHJX3FrGJFm1fx7hivxKBLOfVom34gYAJeaUEklX1"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# def get_profile(zhenbang):
# api = tweepy.API(auth)
# try:
# user_profile = api.get_user(zhenbang)
# except tweepy.error.TweepError as e:
# user_profile = json.loads(e.response.text)
#
# return user_profile
#
# def get_trends(location_id):
# api = tweepy.API(auth)
# try:
# trends = api.trends_place(location_id)
# except tweepy.error.TweepError as e:
# trends = json.loads(e.response.txt)
#
# return trends
# code from lab.
def get_tweets(query):
api= tweepy.API(auth)
try:
tweets = api.search(query)
except tweepy.error.TweepError as e:
tweets = [json.loads(e.response.text)]
return tweets
queries = ["eminem", "love OR hare", "revival", "slim shady", "rap god", "trump", "donald", "concert"]
with open('tweets.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(['id', 'user', 'created_at', 'text'])
for query in queries:
t = get_tweets(query)
for tweet in t:
writer.writerow([tweet.id_str, tweet.user.screen_name, tweet.created_at, tweet.text.encode('unicode-escape')])
| [
"philipguo1992@gmail.com"
] | philipguo1992@gmail.com |
44efa17d2f52a95bd7265e1773831210ac6db66a | a06c9d95093f8c33aefb13d5a46c1466c43cc1e8 | /Aulas/aula 3/aula3.py | fc75a2811210d51ced11feea1e4e87795a300c9f | [] | no_license | danrleydaniel/pygame | 3b7062a24ce1659f1c23bff70af0a372fba0f5c7 | 9de33102920d021a5f28b79a8fd04ba89e9c07f2 | refs/heads/main | 2023-08-14T16:27:00.766939 | 2021-09-30T17:08:56 | 2021-09-30T17:08:56 | 307,095,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import pygame
def main():
pygame.init()
tela = pygame.display.set_mode([300, 300])
pygame.display.set_caption("Iniciando com Pygame")
relogio = pygame.time.Clock()
cor_branca = (255,255,255)
sup = pygame.Surface((200, 200))
sair = False
while sair != True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sair = True
relogio.tick(27)
tela.fill(cor_branca)
tela.blit(sup, [10,10])
pygame.display.update()
pygame.quit()
main()
| [
"noreply@github.com"
] | danrleydaniel.noreply@github.com |
a24704b833d23a859af9ec1629f1226377f8c7ea | 5dfbfa153f22b3f58f8138f62edaeef30bad46d3 | /bill_ws/build/bill_description/catkin_generated/pkg.develspace.context.pc.py | d846aac1c29db5d24fa24af24e101a5ae58bdccd | [] | no_license | adubredu/rascapp_robot | f09e67626bd5a617a569c9a049504285cecdee98 | 29ace46657dd3a0a6736e086ff09daa29e9cf10f | refs/heads/master | 2022-01-19T07:52:58.511741 | 2019-04-01T19:22:48 | 2019-04-01T19:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "bill_description"
PROJECT_SPACE_DIR = "/home/bill/bill_ros/bill_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"alphonsusbq436@gmail.com"
] | alphonsusbq436@gmail.com |
7c4ca5b5dfae96a3696b405eff6c615b26b86332 | 4c873560c66ce3b84268ad2abcd1ffcada32e458 | /examples/scripts/csc/gwnden_clr.py | d382225550d6ce93e6a690716a2697b9e384579a | [
"BSD-3-Clause"
] | permissive | wangjinjia1/sporco | d21bf6174365acce614248fcd2f24b72d5a5b07f | c6363b206fba6f440dd18de7a17dadeb47940911 | refs/heads/master | 2023-04-02T01:10:02.905490 | 2021-03-29T14:20:57 | 2021-03-29T14:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,966 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Gaussian White Noise Restoration via CSC
========================================
This example demonstrates the removal of Gaussian white noise from a colour image using convolutional sparse coding :cite:`wohlberg-2016-convolutional`,
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \sum_c \left\| \sum_m \mathbf{d}_{m} * \mathbf{x}_{c,m} -\mathbf{s}_c \right\|_2^2 + \lambda \sum_m \| \mathbf{x}_m \|_1 + \mu \| \{ \mathbf{x}_{c,m} \} \|_{2,1}$$
where $\mathbf{d}_m$ is the $m^{\text{th}}$ dictionary filter, $\mathbf{x}_{c,m}$ is the coefficient map corresponding to the $c^{\text{th}}$ colour band and $m^{\text{th}}$ dictionary filter, and $\mathbf{s}_c$ is colour band $c$ of the input image.
"""
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco import util
from sporco import signal
from sporco import fft
from sporco import metric
from sporco import plot
from sporco.cupy import (cupy_enabled, np2cp, cp2np, select_device_by_load,
gpu_info)
from sporco.cupy.admm import cbpdn
"""
Boundary artifacts are handled by performing a symmetric extension on the image to be denoised and then cropping the result to the original image support. This approach is simpler than the boundary handling strategies that involve the insertion of a spatial mask into the data fidelity term, and for many problems gives results of comparable quality. The functions defined here implement symmetric extension and cropping of images.
"""
def pad(x, n=8):
if x.ndim == 2:
return np.pad(x, n, mode='symmetric')
else:
return np.pad(x, ((n, n), (n, n), (0, 0)), mode='symmetric')
def crop(x, n=8):
return x[n:-n, n:-n]
"""
Load a reference image and corrupt it with Gaussian white noise with $\sigma = 0.1$. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.)
"""
img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True,
idxexp=np.s_[:, 160:672])
np.random.seed(12345)
imgn = img + np.random.normal(0.0, 0.1, img.shape).astype(np.float32)
"""
Highpass filter test image.
"""
npd = 16
fltlmbd = 5.0
imgnl, imgnh = signal.tikhonov_filter(imgn, fltlmbd, npd)
"""
Load dictionary.
"""
D = util.convdicts()['G:8x8x128']
"""
Set solver options. See Section 8 of :cite:`wohlberg-2017-convolutional2` for details of construction of $\ell_1$ weighting matrix $W$.
"""
imgnpl, imgnph = signal.tikhonov_filter(pad(imgn), fltlmbd, npd)
W = fft.irfftn(np.conj(fft.rfftn(D[..., np.newaxis, :], imgnph.shape[0:2],
(0, 1))) * fft.rfftn(imgnph[..., np.newaxis], None, (0, 1)),
imgnph.shape[0:2], (0, 1))
W = 1.0/(np.maximum(np.abs(W), 1e-8))
lmbda = 1.5e-2
mu = 2.7e-1
opt = cbpdn.ConvBPDNJoint.Options({'Verbose': True, 'MaxMainIter': 250,
'HighMemSolve': True, 'RelStopTol': 3e-3, 'AuxVarObj': False,
'L1Weight': cp2np(W), 'AutoRho': {'Enabled': False},
'rho': 1e3*lmbda})
"""
Initialise a ``sporco.cupy`` version of a :class:`.admm.cbpdn.ConvBPDNJoint` object and call the ``solve`` method.
"""
if not cupy_enabled():
print('CuPy/GPU device not available: running without GPU acceleration\n')
else:
id = select_device_by_load()
info = gpu_info()
if info:
print('Running on GPU %d (%s)\n' % (id, info[id].name))
b = cbpdn.ConvBPDNJoint(np2cp(D), np2cp(pad(imgnh)), lmbda, mu, opt, dimK=0)
X = cp2np(b.solve())
"""
The denoised estimate of the image is just the reconstruction from the coefficient maps.
"""
imgdp = cp2np(b.reconstruct().squeeze())
imgd = np.clip(crop(imgdp) + imgnl, 0, 1)
"""
Display solve time and denoising performance.
"""
print("ConvBPDNJoint solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgd))
"""
Display the reference, noisy, and denoised images.
"""
fig = plot.figure(figsize=(21, 7))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgn, title='Noisy', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgd, title='CSC Result', fig=fig)
fig.show()
"""
Plot functional evolution during ADMM iterations.
"""
its = b.getitstat()
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional')
"""
Plot evolution of ADMM residuals and ADMM penalty parameter.
"""
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'])
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter')
# Wait for enter on keyboard
input()
| [
"brendt@ieee.org"
] | brendt@ieee.org |
02cf011d3d9b1895c848c8f25e71c77dc301fdcf | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Simulador_20200708161311.py | cbf32fa7cf0f64a93631d4928bd7f10dc6a1caf5 | [
"MIT"
] | permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,983 | py | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.individuos_infectados_tipo_2 = []
self.individuos_infectados_tipo_1 = []
self.individuos_curados = []
self.individuos_mortos = []
self.lista_matrizes_posicionamento = []
self.matriz_status = np.zeros([tamanho_matriz,tamanho_matriz], dtype= int)
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
self.matriz_individuos = pd.DataFrame(columns=range(tamanho_matriz), index=range(tamanho_matriz))
self.matriz_individuos.loc[:] = self.fabrica_individuo.criar_individuo(Individuo.SADIO,(0,0))
self.matriz_status[:] = Individuo.SADIO
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.popular(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(index = [0])
self.salvar_posicionamento()
def salvar_posicionamento(self):
self.lista_matrizes_posicionamento.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice[0], indice[1])
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for vizinho in lista_vizinhos:
x = vizinho[0]
y = vizinho[1]
#verificação de SADIO
if self.matriz_status[x,y] == Individuo.SADIO:
#verificação do novo status
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].infectar()
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append((x,y))
#modifica o status do objeto recém infectado
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_1
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_1
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 2
lista_novos_infectados_tipo2.append((x,y))
#modifica o status do objeto recém infectado
self.matriz_individuos.loc[x,y].status = Individuo.INFECTADO_TIPO_2
#modifica o status na matriz de status
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_2
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def verificar_morte(self, lista_infectantes_tipo2):
lista_mortos = []
for indice in lista_infectantes_tipo2:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_morte()
if novo_status == Individuo.MORTO:
lista_mortos.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.MORTO
return lista_mortos
def verificar_cura(self, lista_infectantes):
lista_curados = []
for indice in lista_infectantes:
novo_status = self.matriz_individuos.loc[indice[0], indice[1]].checagem_cura()
if novo_status == Individuo.CURADO:
lista_curados.append(indice)
self.matriz_status[indice[0], indice[1]] = Individuo.CURADO
return lista_curados
def iterar(self):
#Verifica os novos infectados a partir dos atuais infectantes na matriz
lista_novos_infectados_tipo1_1, lista_novos_infectados_tipo2_1 = self.verificar_infeccao(self.individuos_infectados_tipo_1)
lista_novos_infectados_tipo1_2, lista_novos_infectados_tipo2_2 = self.verificar_infeccao(self.individuos_infectados_tipo_2)
#Verifica morte dos tipo 2
lista_mortos = self.verificar_morte(self.individuos_infectados_tipo_2)
#retirar os mortos da atualização da lista de infectados tipo 2
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_mortos]
#adiciona os novos mortos na lista geral de mortos
self.individuos_mortos = self.individuos_mortos + lista_mortos
#Verificar cura
lista_curados_tipo1 = self.verificar_cura(self.individuos_infectados_tipo_1)
lista_curados_tipo2 = self.verificar_cura(self.individuos_infectados_tipo_2)
#retirar os curados das lista de infectados tipo 1 e 2
self.individuos_infectados_tipo_2 = [i for i in self.individuos_infectados_tipo_2 if i not in lista_curados_tipo2]
self.individuos_infectados_tipo_1 = [i for i in self.individuos_infectados_tipo_1 if i not in lista_curados_tipo1]
#adiciona os novos curados na lista geral de curados
self.individuos_curados = self.individuos_curados + lista_curados_tipo1 + lista_curados_tipo2
# self. #movimentar infectantes:
for i in range(len(self.individuos_infectados_tipo_1)):
self.individuos_infectados_tipo_1[i] = self.mover_infectante(self.individuos_infectados_tipo_1[i])
for i in range(len(self.individuos_infectados_tipo_2)):
self.individuos_infectados_tipo_2[i] = self.mover_infectante(self.individuos_infectados_tipo_2[i])
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.individuos_infectados_tipo_2 = self.individuos_infectados_tipo_2 + lista_novos_infectados_tipo2_1 + lista_novos_infectados_tipo2_2
self.individuos_infectados_tipo_1 = self.individuos_infectados_tipo_1 + lista_novos_infectados_tipo1_1 + lista_novos_infectados_tipo1_2
#salva os resultados da atualização no dataframe:
num_mortos = len(self.individuos_mortos)
num_curados = len(self.individuos_curados)
num_tipo_1 = len(self.individuos_infectados_tipo_1)
num_tipo_2 = len(self.individuos_infectados_tipo_2)
dict = {
'num_sadios':self.populacao_inicial - num_mortos - num_curados - num_tipo_1 - num_tipo_2 ,
'num_infect_t1':num_tipo_1,
'num_infect_t2':num_tipo_2,
'num_curados':num_curados,
'num_mortos':num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
#salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_2.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_2
def mover_infectante(self, indice):
pos_x, pos_y = indice[0], indice[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
novo_x, novo_y = self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
#troca os valores no dataframe
aux = self.matriz_individuos.loc[novo_x, novo_y]
self.matriz_individuos.loc[novo_x, novo_y] = self.matriz_individuos.loc[pos_x, pos_y]
self.matriz_individuos.loc[pos_x, pos_y] = aux
#troca os valores na matriz de status
aux = self.matriz_status[novo_x, novo_y]
self.matriz_status[novo_x, novo_y] = self.matriz_status[pos_x, pos_y]
self.matriz_status[pos_x, pos_y] = aux
return (novo_x, novo_y)
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.
percentual_inicial_tipo2 = 0.
sim = Simulador(
10,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.individuos_infectados_tipo_2)
#print(sim.individuos_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'yellow', 'red'])
for i in range(10):
plt.matshow(sim.lista_matrizes_posicionamento[1], cmap = cmap)
sim.iterar()
plt.show()
# plt.matshow(sim.lista_matrizes_posicionamento[1], cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[2], cmap = cmap)
# sim.iterar()
# plt.matshow(sim.lista_matrizes_posicionamento[3], cmap = cmap)
| [
"eduardo_dut@edu.unifor.br"
] | eduardo_dut@edu.unifor.br |
194d85d0f80ef569c614a64fab89b028bebb7e25 | d4239e239bab52585a2e4409353a6312f5f3f351 | /VBACodesEditor.py | 1478a84a4bcbfdf44a420a36c1422c589dfda133 | [] | no_license | Liu373/Python_VBACodesEditor | 03d10d8f509766ce43b22a60d3559cb8c2698689 | 7bc5fed35f1b1f8cba4c86fff257e3c18930ae04 | refs/heads/main | 2023-08-11T12:12:28.882375 | 2021-09-21T14:13:45 | 2021-09-21T14:13:45 | 401,190,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,377 | py | import comtypes.client
import win32con
import win32com.client
import commctrl
import time
import threading
import win32gui
import os
import tkinter as tk
from tkinter import ttk
import glob
import logging
import ctypes
user32 = comtypes.windll.user32
flag = False
dir_path = os.path.dirname(os.path.realpath('__file__'))
class ProjectConstants:
password = '063'
timeout_second = 100
fail_sleep_duration_second = 0.1
class WaitException(Exception):
pass
def raw_str(string):
return comtypes.c_char_p(bytes(string, 'utf-8'))
def sleep():
time.sleep(ProjectConstants.fail_sleep_duration_second)
def unlock_vba_project(application):
id_password = 0x155e
id_ok = 1
password_window = user32.FindWindowA(None, raw_str("VBAProject Password"))
if password_window == 0:
raise WaitException("Fail to Find Password Window")
print("Found Password Window")
user32.SendMessageA(password_window, commctrl.TCM_SETCURFOCUS, 1, 0)
text_box = user32.GetDlgItem(passowrd_window, id_password)
ok_button = user32.GetDlgItem(password_window, id_ok)
if text_box == 0 and ok_button == 0:
raise WaitException("Fail to Find Textbox and OK Button in Password Window")
user32.SetFocus(text_box)
user32.SendMessageA(text_box, win32com.WM_SETTEXT, None, raw_str(ProjectConstants.password))
Length = user32.SendMessageA(text_box, win32com.WM_GETTEXTLENGTH)
if length != len(ProjectConstants.password):
raise WaitException("Fail to Verify Password Length")
user32.SetFocus(ok_button)
user32.SendMessageA(ok_button, win32con.BM_CLICK, 0, 0)
return True
def close_vba_project_window(application):
id_ok = 1
password_window = user32.FindWindowA(None, raw_str("VBAProject - Project Properties"))
if password_window == 0:
raise WaitException("Fail to Find Project Properties Window to Close")
print("Found Project Properties Window to Close")
user32.SendMessageA(password_window, commctrl.TCM_SETCURFOCUS, 1, 0)
ok_button = user32.GetDlgItem(password_window, id_ok)
if ok_button == 0:
raise WaitExceptiion("Fail to find ok button in project properties window")
user32.SetFocus(ok_button)
user32.SendMessageA(ok_button, win32con.BM_ClICK, 0)
def lock_vba_project(application):
id_ok = 1
id_tabcontrol = 0x3020
id_subdialog = 0x8002
id_checkbox_lock = 0x1557
id_textbox_pass1 = 0x1555
id_textbox_pass2 = 0x1556
password_window = user32.FindWindowA(None, raw_str("VBA Project - Project Properties"))
if password_window == 0:
raise WaitException("Fail to find project properties window")
print("Found project properties window")
tabcontrol = user32.GetDlgItem(password_window, id_tabcontrol)
user32.SendMessageA(tabcontrol, commctrl.TCM_SETCURFOCUS, 1, 0)
if user32.SendMessageA(tabcontrol, commctrl.TCM_GETCURFOCUS) != 1:
raise WaitException("Fail to change tab control")
subdialog = user32.FindWindowExA(password_window, 0, id_subdialog, None)
if subdialog == 0:
raise WaitException("Fail to find subdialog")
checkbox_lock = user32.GetDlgItem(subdialog, id_checkbox_lock)
if checkbox_lock == 0:
raise WaitException("Fail to find checkbox")
user32.SetFocus(checkbox_lock)
user32.SendMessageA(checkbox_lock, win32con.BM_SETCHECK, win32con.BST_CHECKED, 0)
checkbox_state = user32.SendMessageA(checkbox_lock, win32con.BM_GETCHECK)
if checkbox_state != win32con.BST_CHECKED:
raise WaitException("Fail to activate checkbox")
textbox_pass1 = user32.GetDlgItem(subdialog, id_textbox_pass1)
if textbox_pass1 == 0:
raise WaitException("Fail to find password box 1")
user32.SetFocus(textbox_pass1)
user32.SendMessageA(textbox_pass1, win32con.WM_SETTEXT, None, raw_str(ProjectConstants.password))
length = user32SendMessage(textbox_pass1, win32con.WM_GETTEXTLENGTH)
if length != len("063"):
raise WaitException("Fail to complete password box 1")
textbox_pass2 = user32.GetDlgItem(subdialog, id_textbox_pass2)
user32.SetFocus(textbox_pass2)
if textbox_pass2 == 0:
raise WaitException("Fail to find password box 2")
user32.SetFocus(textbox_pass2)
user32.SendMessageA(textbox_pass2, win32con.WM_SETTEXT, None, raw_str(ProjectConstants.password))
length = user32SendMessage(textbox_pass2, win32con.WM_GETTEXTLENGTH)
if length != len("063"):
raise WaitException("Fail to complete password box 2")
ok_button = user32.GetDlgItem(password_window, id_ok)
if ok_button == 0:
raise WaitException("Fail to find OK button")
user32.SetFocus(ok_button)
user32.SendMessageA(ok_button, win32con.BM_CLICK, 0)
return True
def extract_lookup(col_index, row_range, ws):
return [data.Value for data in [ws.Range(loc)
for loc in [str(col_index) + str(ii)
for ii in row_range]]]
def wait_loop(timeout_sec, application, func):
timeout = time.time() + timeout_sec
while time.time() < timeout:
try:
done_run = func(application)
if done_run:
break
except WaitException as e:
print(str(e))
sleep()
def change_property_data(wb_, new_p_version_):
property_ws = wb_.Worksheets("Property Data")
cell = property_ws.Range("B32")
cell.Value = new_p_version_
def change_reference_tables(wb_):
ref_key_col = 'AI'
ref_val_col = 'AJ'
ref_start_row = 3
ref_end_row = 24
to_replace = {'Undoubted: 8.0
'Unrated > 5 years': 3.0,
'Large pool': 6.0,
'Small pool': 3.0}
reference_ws = wb_.Worksheets("Reference Tables")
row_range = range(ref_start_row, ref_end_row + 1)
lookup_keys = extract_lookup(ref_key_col, row_range, reference_ws)
lookup_values = extract_lookup(ref_val_col, row_range, reference_ws)
original_values = dict(zip(lookup_keys, lookup_values))
new_values = original_values.copy()
for k, v in to_replace.items():
new_values[k] = v
for i, k in zip(row_range, lookup_keys):
reference_ws.Range(str(ref_val_col) + str(i)).Value = new_values[k]
def change_debt_Formula(wb_):
formula_ws = wb_.Worksheets("Debt")
for i in range(2, 6):
cell1 = formula_ws.Range("O{0}".format(i))
cell2 = formula_ws.Range("P{0}".format(i))
Formula1 = "=IFERROR(-PMT(XXXXXXXX)".format(i)
Formula2 = same as above
cell1.Value = Formula1
cell2.Value = Formula2
time.sleep(3)
def change_vba_prologue(app_, timeout_second_):
app_.CommandBars.ExecuteMso("ViewCode")
wait_loop(timeout_second_, app_, unlock_vba_project)
wait_loop(timeout_second_, app_, close_vba_project_window)
def change_vba(wb_, old_c_version_, new_c_version_):
match = old_c_version_
replacement = new_c_version_
code_base = wb_.VBAProject.VBComponents("Complete").CodeModule
startrow = 0
while True:
success, startrow, startcol, endrow, endcol = code_base.Find(match, startrow +1, 1, -1, -1)
if not success:
break
old_line = code_base.Lines(startrow, 1)
new_line = old_line[:startcol - 1] + replacement + old_line[endcol - 1:]
code_base.ReplaceLine(startrow, new_line)
def change_vba_formula(wb_, old_intersect1, old_intersect2, old_intersect3):
match1 = old_intersect1
match2 = old_intersect2
match3 = old_intersect3
code_base = wb_.VBAProject.VBAComponents("Sheet03").CodeModule
startrow = 0
while True:
success, startrow, startcol, endrow, endcol = code_base.Find(match1, startrow +1, 1, -1, -1)
if not success:
break
new_line = "'" + match1
code_base.ReplaceLine(startrow, new_line)
startrow = 0
while True:
success, startrow, startcol, endrow, endcol = code_base.Find(match2, startrow +1, 1, -1, -1)
if not success:
break
new_line = "'" + match2
code_base.ReplaceLine(startrow, new_line)
new_line = "'" + match3
code_base.ReplaceLine(startrow, new_line)
time.sleep(2)
def change_back_vba_formula(wb_, new_intersect1, new_intersect2, old_intersect1, old_intersect2, old_intersect3):
match1 = new_intersect1
match2 = new_intersect2
code_base = wb_.VBProject.VBComponents("Sheet03").CodeModule
startrow = 0
while True:
success, startrow, startcol, endrow, endcol = code_base.Find(match1, startrow +1, 1, -1, -1)
if not success:
break
new_line = old_intersect1
code_base.ReplaceLine(startrow, new_line)
startrow = 0
while True:
success, startrow, startcol, endrow, endcol = code_base.Find(match2, startrow +1, 1, -1, -1)
if not success:
break
new_line = old_intersect2
code_base.ReplaceLine(startrow, new_line)
new_line = old_intersect3
code_base.ReplaceLine(startrow, new_line)
time.sleep(2)
def change_vba_epilogue(app_, timeout_second_):
id_project_properties = 2578
app_.VBE.CommandBars.FindControl(Id=id_project_properties).Execute()
wait_loop(timeout_second_, app_, lock_vba_project)
def terminate():
global flag
while (1):
hwnd = win32gui.FindWindow(None, 'VBAProject Password')
if hwnd != 0:
print("\n")
print("\n")
print("Found Password Window")
id_password = 0x155e
id_ok = 1
text_box = user32.GetDlgItem(hwnd, id_password)
ok_button = user32.GetDlgItem(hwnd, id_ok)
if text_box == 0 and ok_button == 0:
raise WaitException("Fail to find textbox and okbutton in password window")
user32.SetFocus(text_box)
user32.SendMessageA(text_box, win32con.WM_SETTEXT, None, raw_str(ProjectConstants.password))
user32.SetFocus(ok_button)
user32.SendMessageA(ok_button, win32con.BM_CLICK, 0, 0)
break
if flag == True:
break
class run_main:
def __init__(self, entry, root):
self.entry = entry
self.root = root
self.sf = ' '
self.of = ' '
def main(self):
my_progress['value'] = 10
self.root.update_idletasks()
app = win32com.client.DispatchEx('Excel.Application')
app.Visible = False
Path = self.entry['Folder_Path'].get()
logfile = dir_path + '\\Excel_Editor_Automation.log'
new_p_version = 'Version: 1.3'
old_c_version = 'Completed-V2'
new_c_version = 'Completed-V3'
new_e_version = '1.3'
match1 = 'If Not Application.Intersect(ActiveCell, Range("B2:X45")) Is Nothing Then'
match2 = 'Sheet3.Range("A4").ClearContents'
match3 = 'End If'
new_match1 = "'" + match1
new_match2 = "'" + match2
LOG_FORMAT = "%(levelname)s:%(asctime)s:%(message)s"
try:
logging.basicConfig(filename = logfile, level = logging.DEBUG, format = LOG_FORMAT, filemode = 'w')
logger = logging.getLogger()
except Exception as e:
ctypes.windll.user32.MessageBox(0, 'Issue for creating log file', 'Warning', 1)
logger.warning('Issue for creating log file')
logger.warning(e)
quit(self.root)
my_progreww['value'] = 20
self.root.upadte_idletasks()
i = 0
logger.info("Pre-Varibles are set up and will start the 'for' loop")
for f in os.listdir(Path):
if f.endswith(".xlsm"):
inp = Path + '\\' + f
outp = Path + '\\Output' + "(Converted_NewVersion_{0})_",format(new_e_version) + f
xlsmCounter = len(glob.glob1(Path, "*.xlsm"))
increment = (90-20)/xlsmCounter
logger.info("Forloop-Variables are set up")
my_progress['value'] = my_progress['value'] + increment
self.root.update_idletasks()
try:
wb = app.Workbooks.Open(inp)
time.sleep(5)
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0} is not found or opened".format(f), "warning", 1)
logger.warning("{0} is not found or opened".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
try:
wb.Unprotect(ProjectConstants.password)
time.sleep(1)
logger.info("{0} has been unprotected".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0} can not be unprotected".format(f), "warning", 1)
logger.warning("{0} can not be unprotected".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
try:
change_property_data(wb, new_p_version)
time.sleep(1)
logger.info("{0}'s Property Data Tab has been updated".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0}'s Property Data Tab can not be updated".format(f), "warning", 1)
logger.warning("{0}'s Property Data Tab can not be updated".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
try:
change_reference_tables(wb)
time.sleep(1)
logger.info("{0}'s Reference Data Tab has been updated".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0}'s Reference Data Tab can not be updated".format(f), "warning", 1)
logger.warning("{0}'s Reference Data Tab can not be updated".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
t = threading.Thread(target = terminate)
t.start()
try:
app.CommandBars.ExecuteMso("ViewCode")
except:
t.join()
None
if t.is_alive():
t.join()
time.sleep(1)
try:
change_vba(wb, old_c_version, new_c_version)
time.sleep(1)
logger.info("{0}'s VBA has been updated".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0}'s VBA can not be updated".format(f), "warning", 1)
logger.warning("{0}'s VBA can not be updated".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
try:
change_vba_formula(wb, match1, match2, match3)
time.sleep(1)
logger.info("{0}'s VBA Debt Formula has been Commented".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0}'s VBA Debt Formula can not be commented".format(f), "warning", 1)
logger.warning("{0}'s VBA Debt Formula can not be commented".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
try:
change_debt_formula(wb)
time.sleep(1)
logger.info("{0}'s Debt Tab Formula has been updated".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0}'s Debt Tab Formula can not be updated".format(f), "warning", 1)
logger.warning("{0}'s Debt Tab Formula can not be updated".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
try:
change_back_vba_formula(wb, new_match1, new_match2, match1, match2, match3)
time.sleep(1)
logger.info("{0}'s VBA Debt Formula has been changed back".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0}'s VBA Debt Formula can not be changed back".format(f), "warning", 1)
logger.warning("{0}'s VBA Debt Formula can not be changed back".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
wb.Protect(ProjectConstants.password)
time.sleep(1)
logger.info("{0} has been re-protected".format(f))
app.DisplayAlerts = False
try:
wb.SaveAs(Filename = outp)
time.sleep(1)
logger.info("{0} has been saved".format(f))
except Exception as e:
ctypes.windll.user32.MessageBoxW(0, "{0} can not be saved as".format(f), "warning", 1)
logger.warning("{0} can not be saved as".format(f))
logger.warning(e)
wb.Close(False)
app.Quit()
continue
app.DisplayAlert = True
wb.Close()
app.Quit()
i += 1
logger.info("{0}'s updates are done".format(f))
if i == 0:
ctypes.windll.user32.MessageBoxW(0, "No file is updated or No xlsm file has been found in the folder", "info", 1)
logger.warning("No file starting with 'EMV' has been found in the folder. Check your Path again")
quit(self.root)
my_progress['value'] = 100
self.root.update_idletasks()
original_files = len(glob.glob1(path, "*.xlsm"))
logger.info("The Whole Process Completed")
print("\n")
print("\n")
print("-"*50)
print("\n")
print("The work is done")
print("\n")
print("-"*50)
self.sf = i
self.0f = original_files
time.sleep(2)
def message(self):
self.main()
ctypes.windll.user32.MessageBoxW(0, "Process completed! You've successfully converted {0} out of {1} files".format(self.sf, self.of), "info", 1)
def makeform(root):
entries = {}
lab = tk.Label(root, width=18, text='Folder Path:', font=(None, 10, 'bold'), anchor='w', fg='White', bg='black')
lab.place(x=30, y=30)
lab = tk.Label(root, width=18, text='Progress Bar:', font=(None, 10, 'bold'), anchor='w', fg='White', bg='black')
lab.place(x=30, y=130)
folder_path_text = tk.StringVar()
folder_path_entry = tk.Entry(root, textvariable=folder_path_text)
folder_path_entry.place(x=150, y=30, width=700, height=25)
entries['Folder_Path'] = folder_path_entry
return entries
def quit(root):
root.destory()
root = tk.Tk()
w = 900
h = 180
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry("%dx%d+%d+%d" % (w, h, x, y))
root.configure(background='black')
root.attributes('-alpha', 0.90)
file_location = makeform(root)
my_progress = ttk.Progressbar(root, orient=tk.HORIZONTAL, length=300, mode="determinate")
my_progress.place(x=150, y=130)
Quick = tk.Button(root, text="Quit", command=lambda root=root: quit(root), height=1, width=10, bg='White', fg='Black', font=(None, 10, 'bold'))
Quick.place(x=260, y=80)
Submit = tk.Button(root, text="Run", command=lambda e=file_location, root=root: [run_main(e, root).message()], height=1, width=10, bg="White", fg="Black", fond=(None, 10, 'bold'))
Submit.place(x=150, y=80)
root.mainloop()
| [
"noreply@github.com"
] | Liu373.noreply@github.com |
1eb48a906c41d240228e260d96f74a91e308d423 | 2afb1095de2b03b05c8b96f98f38ddeca889fbff | /web_scrapping/try_steam_parse.py | f76504a9eb079147e18d2455c67b424ba847329a | [] | no_license | draganmoo/trypython | 187316f8823296b12e1df60ef92c54b7a04aa3e7 | 90cb0fc8626e333c6ea430e32aa21af7d189d975 | refs/heads/master | 2023-09-03T16:24:33.548172 | 2021-11-04T21:21:12 | 2021-11-04T21:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | import glob
import pandas as pd
from bs4 import BeautifulSoup
df = pd.DataFrame()
df1 = pd.DataFrame()
df2 = pd.DataFrame()
for one_file in glob.glob("steam_html_file/*.html"):
f = open(one_file, "r", encoding= "utf-8")
soup = BeautifulSoup(f.read(),"html.parser")
items = soup.find("div", id = "search_result_container")
for item in items.find_all("a"):
try:
###因价格修改统一标签下存在多个子标签,打乱整体标签排列结构。
price_change = item.find("div",class_="col search_price discounted responsive_secondrow")
if not price_change:
###价格无变动的游戏的价格
original_price = item.find("div", class_="col search_price_discount_combined responsive_secondrow").get_text().strip()
else:
###注意,如果发现有价格变动时,虽然不去,但是也要输出空值占位置,为后面合并数据做准备!
original_price = ""
df1 = df1.append({
"3.original price": original_price
}, ignore_index=True)
if price_change:
##价格有变动的游戏现在的价格
changed_price = price_change.get_text().strip()
else:
changed_price = ""
df2 = df2.append({
"4.changed price":changed_price
},ignore_index=True)
# print(changed_price)
###价格信息提取完成
######???待寻找如何将变动后价格拼接到没有价格变动的那一列中,查寻方向:合并多个df时如何填补同一列中的空缺值
name = item.find("div", class_="col search_name ellipsis").find("span").get_text().strip()
release_time = item.find("div", class_="col search_released responsive_secondrow").get_text().strip()
df = df.append({
"1.name": name,
"2.release_time":release_time,
},ignore_index=True)
except:
pass
df2 = df1.join(df2)
df = df.join(df2)
print (df)
df.to_csv("steam_html_file/steam_fps_game.csv", encoding="utf-8-sig")
####
| [
"13701304462@163.com"
] | 13701304462@163.com |
ab5123e5c40629e5280e10236c6abe7c03e7a859 | 07e22b0a383fb0fc198ccd76d51e8b1481aa4d8e | /account/migrations/0001_initial.py | b54fe0b4de9605143ffb4a10cf3027c9b6307492 | [] | no_license | Shivangi438/demo3 | fb43bbf4b32bb1bad1c01841289b6d76f503e8dc | fa73ee485b2175f8abbd7dbc31149157e4fa52a1 | refs/heads/master | 2022-12-08T22:34:46.621994 | 2020-09-13T08:19:46 | 2020-09-13T08:19:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | # Generated by Django 3.1 on 2020-09-06 07:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('username', models.CharField(max_length=50, primary_key='True', serialize=False)),
],
),
migrations.CreateModel(
name='teacher_timetable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.CharField(max_length=50)),
('First_lech', models.CharField(max_length=50)),
('sec_lech', models.CharField(max_length=50)),
('third_lech', models.CharField(max_length=50)),
('fourth_lech', models.CharField(max_length=50)),
('fifth_lech', models.CharField(max_length=50)),
('sixth_lech', models.CharField(max_length=50)),
('sev_lech', models.CharField(max_length=50)),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.user')),
],
),
]
| [
"shpy3296@gmail.com"
] | shpy3296@gmail.com |
472327499a42ace71b4610362be969074821b053 | 8d9c8fd1f8bbdee01bf0bb685feee7f5980484a8 | /RNN_generacion_nombres.py | 3a4fe0bf8dbf2b00a8324320f8970842e4fe12d7 | [] | no_license | avenegascaleron/ejemploRNN | e2784afa2539c959a9799916ecd32bf91fb5dd87 | 8c8e6823bf6adac7b32d892e88ce13203a7e11c5 | refs/heads/main | 2023-01-28T08:59:29.230064 | 2020-12-01T12:53:20 | 2020-12-01T12:53:20 | 317,540,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,988 | py | import numpy as np
np.random.seed(5)
from keras.layers import Input, Dense, SimpleRNN
from keras.models import Model
from keras.optimizers import SGD
from keras.utils import to_categorical
from keras import backend as K
# 1. LECTURA DEL SET DE DATOS
# ===========================================================
nombres = open('apellidosVascos.txt','r', encoding='utf8').read()
nombres = nombres.lower()
# Crear diccionario (listado de caracteres que no se repiten)
alfabeto = list(set(nombres))
tam_datos, tam_alfabeto = len(nombres), len(alfabeto)
print("En total hay %d caracteres, y el diccionario tiene un tamaño de %d caracteres." % (tam_datos, tam_alfabeto))
print(*alfabeto, sep = ", ")
# Conversión de caracteres a índices y viceversa
car_a_ind = { car:ind for ind,car in enumerate(sorted(alfabeto))}
ind_a_car = { ind:car for ind,car in enumerate(sorted(alfabeto))}
print(car_a_ind)
print(ind_a_car)
# 2. MODELO
# ===========================================================
n_a = 25 # Número de unidades en la capa oculta
entrada = Input(shape=(None,tam_alfabeto))
a0 = Input(shape=(n_a,))
celda_recurrente = SimpleRNN(n_a, activation='tanh', return_state = True)
capa_salida = Dense(tam_alfabeto, activation='softmax')
salida = []
hs, _ = celda_recurrente(entrada, initial_state=a0)
salida.append(capa_salida(hs))
modelo = Model([entrada,a0],salida)
#modelo.summary()
opt = SGD(lr=0.0005)
modelo.compile(optimizer=opt, loss='categorical_crossentropy')
# 3. EJEMPLOS DE ENTRENAMIENTO
# ===========================================================
# Crear lista con ejemplos de entrenamiento y mezclarla aleatoriamente
with open("apellidosVascos.txt", encoding='utf8') as f:
ejemplos = f.readlines()
ejemplos = [x.lower().strip() for x in ejemplos]
np.random.shuffle(ejemplos)
# Crear ejemplos de entrenamiento usando un generador
def train_generator():
while True:
# Tomar un ejemplo aleatorio
ejemplo = ejemplos[np.random.randint(0,len(ejemplos))]
# Convertir el ejemplo a representación numérica
X = [None] + [car_a_ind[c] for c in ejemplo]
# Crear "Y", resultado de desplazar "X" un caracter a la derecha
Y = X[1:] + [car_a_ind['\n']]
# Representar "X" y "Y" en formato one-hot
x = np.zeros((len(X),1,tam_alfabeto))
onehot = to_categorical(X[1:],tam_alfabeto).reshape(len(X)-1,1,tam_alfabeto)
x[1:,:,:] = onehot
y = to_categorical(Y,tam_alfabeto).reshape(len(X),tam_alfabeto)
# Activación inicial (matriz de ceros)
a = np.zeros((len(X), n_a))
yield [x, a], y
# 4. ENTRENAMIENTO
# ===========================================================
BATCH_SIZE = 80 # Número de ejemplos de entrenamiento a usar en cada iteración
NITS = 0 # Número de iteraciones
for j in range(NITS):
historia = modelo.fit_generator(train_generator(), steps_per_epoch=BATCH_SIZE, epochs=1, verbose=0)
# Imprimir evolución del entrenamiento cada 1000 iteraciones
if j%1000 == 0:
print('\nIteración: %d, Error: %f' % (j, historia.history['loss'][0]) + '\n')
# 5. GENERACIÓN DE NOMBRES USANDO EL MODELO ENTRENADO
# ===========================================================
def generar_nombre(modelo,car_a_num,tam_alfabeto,n_a):
# Inicializar x y a con ceros
x = np.zeros((1,1,tam_alfabeto,))
a = np.zeros((1, n_a))
# Nombre generado y caracter de fin de linea
nombre_generado = ''
fin_linea = '\n'
car = -1
# Iterar sobre el modelo y generar predicción hasta tanto no se alcance
# "fin_linea" o el nombre generado llegue a los 50 caracteres
contador = 0
while (car != fin_linea and contador != 20):
# Generar predicción usando la celda RNN
a, _ = celda_recurrente(K.constant(x), initial_state=K.constant(a))
y = capa_salida(a)
prediccion = K.eval(y)
# Escoger aleatoriamente un elemento de la predicción (el elemento con
# con probabilidad más alta tendrá más opciones de ser seleccionado)
ix = np.random.choice(list(range(tam_alfabeto)),p=prediccion.ravel())
# Convertir el elemento seleccionado a caracter y añadirlo al nombre generado
car = ind_a_car[ix]
nombre_generado += car
# Crear x_(t+1) = y_t, y a_t = a_(t-1)
x = to_categorical(ix,tam_alfabeto).reshape(1,1,tam_alfabeto)
a = K.eval(a)
# Actualizar contador y continuar
contador += 1
# Agregar fin de línea al nombre generado en caso de tener más de 50 caracteres
if (contador == 20):
nombre_generado += '\n'
print(nombre_generado)
# Generar 100 ejemplos de nombres generados por el modelo ya entrenado
for i in range(100):
generar_nombre(modelo,car_a_ind,tam_alfabeto,n_a) | [
"noreply@github.com"
] | avenegascaleron.noreply@github.com |
44b8b15712428540ec8bb8881ed03e41fb5bbabc | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2167.py | 0d743940a6f806b63db35149af9e8542d6728277 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,035 | py | # qubit number=4
# total number=37
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.5686282702997527,input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=18
prog.h(input_qubit[3]) # number=26
prog.cz(input_qubit[0],input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=28
prog.x(input_qubit[3]) # number=21
prog.rx(0.4241150082346221,input_qubit[2]) # number=33
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=23
prog.cz(input_qubit[1],input_qubit[2]) # number=24
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[2],input_qubit[0]) # number=29
prog.z(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=14
prog.y(input_qubit[0]) # number=15
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2167.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
791cb6aa33976e681d1eda201b25780a2d473025 | 4a4ae0208909f3297021977bcb7733d71d8d1321 | /main.py | cb9a2c85d4c497787c93a7502e37f8f948091e48 | [
"MIT"
] | permissive | weishan-Lin/rl-tf2 | 1f52c55df1974ac093969fa0f21c0f955852e6f8 | 7474df2c6ba9980dccf8946fa95942fe2382cd65 | refs/heads/main | 2023-08-05T17:05:22.205615 | 2021-10-04T00:08:55 | 2021-10-04T00:08:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | import tensorflow as tf
import gym
from rl_tf2.agents.ddpg.actor_network import Actor
from rl_tf2.agents.ddpg.critic_network import Critic
from rl_tf2.agents.ddpg.ddpg_agent import DDPG
import yaml
with open('config.yml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
env = gym.make(config['env'])
if config['agent'] == 'DDPG':
critic = Critic(hidden_size=config['critic']['hidden_size'])
actor = Actor(env.action_space.shape[0],
hidden_size=config['actor']['hidden_size'],
action_lb=env.action_space.low,
action_ub=env.action_space.high)
target_critic = Critic(hidden_size=config['critic']['hidden_size'])
target_actor = Actor(env.action_space.shape[0],
hidden_size=config['actor']['hidden_size'],
action_lb=env.action_space.low,
action_ub=env.action_space.high)
# Making the weights equal
target_actor.set_weights(actor.get_weights())
target_critic.set_weights(critic.get_weights())
agent = DDPG(
env,
actor,
critic,
target_actor,
target_critic,
env_name=config['env'],
replay_size=config['replay_size'],
batch_size=config['batch_size'],
epochs=config['epochs'],
noise_std=config['noise_std'],
noise_type=config['noise_type'],
actor_lr=config['actor_lr'],
critic_lr=config['critic_lr'],
target_network_update_rate=config['target_network_update_rate'],
discount=config['discount'],
max_steps_per_epoch=config['max_steps_per_epoch'],
log_weights=config['log_weights'])
agent.train(test_after_epoch=config['test_after_epoch'],
render=config['render'],
print_step_info=config['print_step_info'])
| [
"xjygr08@gmail.com"
] | xjygr08@gmail.com |
38c048053b17d136f49564e67551e3075fbf6610 | f866b2f3450fe6ebdf17fb853bcd27710548b928 | /eleanor/postcard.py | 6727126d4ad4a849a17947d41f7ccad2bf96a427 | [
"MIT"
] | permissive | viyangshah/eleanor | 5efdb5e33506d0105a2617eee9e4c414df645f2b | 7b3393be544854d9a5272f6ce8a4695fdf3a3609 | refs/heads/master | 2020-06-07T08:06:15.549143 | 2019-06-20T17:05:34 | 2019-06-20T17:05:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,079 | py | import os, sys
from astropy.io import fits
import matplotlib.pyplot as plt
from astropy.wcs import WCS
import numpy as np
import warnings
import pandas as pd
import copy
from .mast import crossmatch_by_position
from urllib.request import urlopen
__all__ = ['Postcard']
class Postcard(object):
"""TESS FFI data for one postcard across one sector.
A postcard is an rectangular subsection cut out from the FFIs.
It's like a TPF, but bigger.
The Postcard object contains a stack of these cutouts from all available
FFIs during a given sector of TESS observations.
Parameters
----------
filename : str
Filename of the downloaded postcard.
location : str, optional
Filepath to `filename`.
Attributes
----------
dimensions : tuple
(`x`, `y`, `time`) dimensions of postcard.
flux, flux_err : numpy.ndarray
Arrays of shape `postcard.dimensions` containing flux or error on flux
for each pixel.
time : float
?
header : dict
Stored header information for postcard file.
center_radec : tuple
RA & Dec coordinates of the postcard's central pixel.
center_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's central pixel on the FFI.
origin_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's (0,0) pixel on the FFI.
"""
def __init__(self, filename, ELEANORURL, location=None):
if location is not None:
self.filename = '{}{}'.format(location, filename)
self.local_path = copy.copy(self.filename)
self.hdu = fits.open(self.local_path)
else:
self.post_dir = os.path.join(os.path.expanduser('~'), '.eleanor/postcards')
if os.path.isdir(self.post_dir) == False:
try:
os.mkdir(self.post_dir)
except OSError:
self.post_dir = '.'
warnings.warn('Warning: unable to create {}. '
'Downloading postcard to the current '
'working directory instead.'.format(self.post_dir))
self.filename = '{}{}'.format(ELEANORURL, filename)
self.local_path = '{}/{}'.format(self.post_dir, filename)
if os.path.isfile(self.local_path) == False:
print("Downloading {}".format(self.filename))
os.system('cd {} && curl -O -L {}'.format(self.post_dir, self.filename))
self.hdu = fits.open(self.local_path)
def __repr__(self):
return "eleanor postcard ({})".format(self.filename)
def plot(self, frame=0, ax=None, scale='linear', **kwargs):
"""Plots a single frame of a postcard.
Parameters
----------
frame : int, optional
Index of frame. Default 0.
ax : matplotlib.axes.Axes, optional
Axes on which to plot. Creates a new object by default.
scale : str
Scaling for colorbar; acceptable inputs are 'linear' or 'log'.
Default 'linear'.
**kwargs : passed to matplotlib.pyplot.imshow
Returns
-------
ax : matplotlib.axes.Axes
"""
if ax is None:
_, ax = plt.subplots(figsize=(8, 7))
if scale is 'log':
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dat = np.log10(self.flux[:, :, frame])
dat[~np.isfinite(dat)] = np.nan
else:
dat = self.flux[:, :, frame]
if ('vmin' not in kwargs) & ('vmax' not in kwargs):
kwargs['vmin'] = np.nanpercentile(dat, 1)
kwargs['vmax'] = np.nanpercentile(dat, 99)
im = ax.imshow(dat, **kwargs)
ax.set_xlabel('Row')
ax.set_ylabel('Column')
cbar = plt.colorbar(im, ax=ax)
if scale == 'log':
cbar.set_label('log$_{10}$ Flux')
else:
cbar.set_label('Flux')
# Reset the x/y ticks to the position in the ACTUAL FFI.
xticks = ax.get_xticks() + self.center_xy[0]
yticks = ax.get_yticks() + self.center_xy[1]
ax.set_xticklabels(xticks)
ax.set_yticklabels(yticks)
return ax
def find_sources(self):
"""Finds the cataloged sources in the postcard and returns a table.
Returns
-------
result : astropy.table.Table
All the sources in a postcard with TIC IDs or Gaia IDs.
"""
result = crossmatch_by_position(self.center_radec, 0.5, 'Mast.Tic.Crossmatch').to_pandas()
result = result[['MatchID', 'MatchRA', 'MatchDEC', 'pmRA', 'pmDEC', 'Tmag']]
result.columns = ['TessID', 'RA', 'Dec', 'pmRA', 'pmDEC', 'Tmag']
return result
@property
def header(self):
return self.hdu[1].header
@property
def center_radec(self):
return(self.header['CEN_RA'], self.header['CEN_DEC'])
@property
def center_xy(self):
return (self.header['CEN_X'], self.header['CEN_Y'])
@property
def origin_xy(self):
return (self.header['POSTPIX1'], self.header['POSTPIX2'])
@property
def flux(self):
return self.hdu[2].data
@property
def dimensions(self):
return self.flux.shape
@property
def flux_err(self):
return self.hdu[3].data
@property
def time(self):
return (self.hdu[1].data['TSTOP'] + self.hdu[1].data['TSTART'])/2
@property
def wcs(self):
return WCS(self.header)
@property
def quality(self):
return self.hdu[1].data['QUALITY']
@property
def bkg(self):
return self.hdu[1].data['BKG']
@property
def barycorr(self):
return self.hdu[1].data['BARYCORR']
@property
def ffiindex(self):
return self.hdu[1].data['FFIINDEX']
class Postcard_tesscut(object):
"""TESS FFI data for one postcard across one sector.
A postcard is an rectangular subsection cut out from the FFIs.
It's like a TPF, but bigger.
The Postcard object contains a stack of these cutouts from all available
FFIs during a given sector of TESS observations.
Parameters
----------
filename : str
Filename of the downloaded postcard.
location : str, optional
Filepath to `filename`.
Attributes
----------
dimensions : tuple
(`x`, `y`, `time`) dimensions of postcard.
flux, flux_err : numpy.ndarray
Arrays of shape `postcard.dimensions` containing flux or error on flux
for each pixel.
time : float
?
header : dict
Stored header information for postcard file.
center_radec : tuple
RA & Dec coordinates of the postcard's central pixel.
center_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's central pixel on the FFI.
origin_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's (0,0) pixel on the FFI.
"""
def __init__(self, cutout, location=None):
self.hdu = cutout
def plot(self, frame=0, ax=None, scale='linear', **kwargs):
"""Plots a single frame of a postcard.
Parameters
----------
frame : int, optional
Index of frame. Default 0.
ax : matplotlib.axes.Axes, optional
Axes on which to plot. Creates a new object by default.
scale : str
Scaling for colorbar; acceptable inputs are 'linear' or 'log'.
Default 'linear'.
**kwargs : passed to matplotlib.pyplot.imshow
Returns
-------
ax : matplotlib.axes.Axes
"""
if ax is None:
_, ax = plt.subplots(figsize=(8, 7))
if scale is 'log':
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dat = np.log10(self.flux[:, :, frame])
dat[~np.isfinite(dat)] = np.nan
else:
dat = self.flux[:, :, frame]
if ('vmin' not in kwargs) & ('vmax' not in kwargs):
kwargs['vmin'] = np.nanpercentile(dat, 1)
kwargs['vmax'] = np.nanpercentile(dat, 99)
im = ax.imshow(dat, **kwargs)
ax.set_xlabel('Row')
ax.set_ylabel('Column')
cbar = plt.colorbar(im, ax=ax)
if scale == 'log':
cbar.set_label('log$_{10}$ Flux')
else:
cbar.set_label('Flux')
# Reset the x/y ticks to the position in the ACTUAL FFI.
xticks = ax.get_xticks() + self.center_xy[0]
yticks = ax.get_yticks() + self.center_xy[1]
ax.set_xticklabels(xticks)
ax.set_yticklabels(yticks)
return ax
def find_sources(self):
"""Finds the cataloged sources in the postcard and returns a table.
Returns
-------
result : astropy.table.Table
All the sources in a postcard with TIC IDs or Gaia IDs.
"""
result = crossmatch_by_position(self.center_radec, 0.5, 'Mast.Tic.Crossmatch').to_pandas()
result = result[['MatchID', 'MatchRA', 'MatchDEC', 'pmRA', 'pmDEC', 'Tmag']]
result.columns = ['TessID', 'RA', 'Dec', 'pmRA', 'pmDEC', 'Tmag']
return result
@property
def header(self):
return self.hdu[1].header
@property
def center_radec(self):
return(self.header['RA_OBJ'], self.header['DEC_OBJ'])
@property
def center_xy(self):
return (self.header['1CRV4P']+16, self.header['1CRV4P']+16)
@property
def origin_xy(self):
return (self.header['1CRV4P'], self.header['1CRV4P'])
@property
def flux(self):
return self.hdu[1].data['FLUX']
@property
def dimensions(self):
return self.flux.shape
@property
def flux_err(self):
return self.hdu[1].data['FLUX_ERR']
@property
def time(self):
return self.hdu[1].data['TIME']
@property
def wcs(self):
return WCS(self.header)
@property
def quality(self):
sector = self.header['SECTOR']
array_obj = urlopen('https://archipelago.uchicago.edu/tess_postcards/metadata/s{0:04d}/quality_s{0:04d}.txt'.format(sector))
A = [int(x) for x in array_obj.read().decode('utf-8').split()]
return A
@property
def bkg(self):
return np.nanmedian(self.hdu[1].data['FLUX_BKG'], axis=(1,2))
@property
def barycorr(self):
return self.hdu[1].data['TIMECORR']
@property
def ffiindex(self):
sector = self.header['SECTOR']
array_obj = urlopen('https://archipelago.uchicago.edu/tess_postcards/metadata/s{0:04d}/cadences_s{0:04d}.txt'.format(sector))
A = [int(x) for x in array_obj.read().decode('utf-8').split()]
return A | [
"bmontet@uchicago.edu"
] | bmontet@uchicago.edu |
c6b5593b63f105914856900aebbc5be8af1a513d | 7e90a1f8280618b97729d0b49b80c6814d0466e2 | /workspace_pc/catkin_ws/cartographer_ws/build_isolated/jackal_navigation/catkin_generated/generate_cached_setup.py | 4daf5fb9eb5f1b0d60987d0c7e079fc39de7971f | [] | no_license | IreneYIN7/Map-Tracer | 91909f4649a8b65afed56ae3803f0c0602dd89ff | cbbe9acf067757116ec74c3aebdd672fd3df62ed | refs/heads/master | 2022-04-02T09:53:15.650365 | 2019-12-19T07:31:31 | 2019-12-19T07:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/gse5/catkin_ws/cartographer_ws/install_isolated;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/gse5/catkin_ws/cartographer_ws/devel_isolated/jackal_navigation/env.sh')
output_filename = '/home/gse5/catkin_ws/cartographer_ws/build_isolated/jackal_navigation/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"sh9339@outlook.com"
] | sh9339@outlook.com |
68a5dfb492a916a1c01d489302bca9f55e915fb8 | 11b13e5c0a5286a6877ecb4f24c5c8b4d69e26d2 | /GMM/fit.py | b7ba45670ecd81c904c15dac50d67c4e52bf6683 | [] | no_license | ruozhengu/machine-learning-model-by-scratch | bcf015b6b7fd768304749858c4006864257b747c | bd4b1da212c5a1626ce89d4cdafc0fe6a6cf1f05 | refs/heads/master | 2022-11-14T03:06:17.768123 | 2020-07-14T07:15:03 | 2020-07-14T07:15:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | from GMM import EM
import numpy as np
from keras.datasets import mnist
from sklearn.decomposition import PCA
def fit_and_eval():
k = 5
# load data
n_train = 60000
n_test = 10000
x_eval = np.zeros((n_test, 5))
y_heta = np.zeros((n_test, 1))
size = 28 # size of image is 28 pixels
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# flat the image
x_train = x_train.reshape((n_train, size ** 2)) / 255
x_test = x_test.reshape((n_test, size ** 2)) / 255
# divide into 10 classes
classes = [[] for _ in range(10)]
for _x, _y in zip(x_train, y_train):
classes[_y].append(_x)
# PCA to reduce dimensions
pca_clf = PCA(n_components=50)
pca_clf.fit(x_train)
classes_after_pca = [pca_clf.transform(classes[i]) for i in range(10)]
test_after_pca = pca_clf.transform(x_test)
# training for each class
train_result = [EM(classes_after_pca[i], k) for i in range(10)]
# evaluation on test data
for i in range(10):
means, Sk, pi_k, loss = train_result[i]
for _k in range(k):
mean_diff = test_after_pca - means[_k]
expo = np.exp(-0.5 * np.einsum('ij,ij->i', np.divide(mean_diff, Sk[_k]), mean_diff))
x_eval[:, _k] = (1./np.sqrt(np.prod(Sk[_k]))) * pi_k[_k] * expo
y_heta = np.c_[y_heta, np.sum(x_eval, axis=1)]
y_heta = y_heta[:, 1:]
# pick the final prediction (which distribution) by selecting the largest prob
pred = [list(y_heta[_i, :]).index(max(list(y_heta[_i, :]))) for _i in range(n_test)]
error = [int(pred[i] != y_test[i]) for i in range(len(pred))]
print("error rate is: %f" % float(sum(error) / len(pred)))
if __name__ == '__main__':
fit_and_eval()
| [
"gu.gabriel@hotmail.com"
] | gu.gabriel@hotmail.com |
88bd4a7be1bb90e388797cbac655dfadd940bb34 | 00635315cacca50f08685e25e5f3e4bcbd1a287b | /Django_citas/settings.py | 79adbac223b724818dc1584b145dadb433951f3c | [] | no_license | DiegoArredo/Django-Citas | ceda2f6f93390f199dd2d4d6b7cc687a82bac07f | d205488dd363526ce06e6a63a881b05329af6937 | refs/heads/master | 2023-08-03T21:05:23.436404 | 2021-09-13T01:51:45 | 2021-09-13T01:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,297 | py | """
Django settings for Django_citas project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-at^m__5^b1#tm(126#(t%5alt%e_3l%m46y6)zdgyjl!gt6+dm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"account",
"core",
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django_citas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Django_citas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"diego.arredo18@gmail.com"
] | diego.arredo18@gmail.com |
96a2513a19ec5ef5b4cef589ef45c1624ee248cb | 117f066c80f3863ebef74463292bca6444f9758a | /data_pulling/crypto/do.py | 33e917485159521a9e506bfcba5606efbf76ad82 | [] | no_license | cottrell/notebooks | c6de3842cbaeb71457d270cbe6fabc8695a6ee1b | 9eaf3d0500067fccb294d064ab78d7aaa03e8b4d | refs/heads/master | 2023-08-09T22:41:01.996938 | 2023-08-04T22:41:51 | 2023-08-04T22:41:51 | 26,830,272 | 3 | 1 | null | 2023-03-04T03:58:03 | 2014-11-18T21:14:23 | Python | UTF-8 | Python | false | false | 1,908 | py | import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import os
import glob
import inspect
def get_pandas_read_csv_defaults():
# probably fragile
i = inspect.getfullargspec(pd.read_csv)
v = i.defaults
k = i.args[-len(v):]
kwargs = dict(zip(k, v))
return kwargs
_mydir = os.path.dirname(os.path.realpath('__file__'))
def load_raw():
# note manually removed some bad row
kwargs = get_pandas_read_csv_defaults()
kwargs['thousands'] = ',' # always do this
kwargs['parse_dates'] = ['Date']
kwargs['na_values'] = ['-']
kwargs['dtype'] = 'str'
dtype = {
'Close': 'float',
'High': 'float',
'Low': 'float',
'Market Cap': 'float',
'Open': 'float',
'Volume': 'float'
}
meta = pd.read_csv(os.path.join(_mydir, 'Top100Cryptos/data/100 List.csv'))
names = meta.Name.tolist()
files = [os.path.join(_mydir, 'Top100Cryptos/data/{}.csv'.format(x)) for x in names]
# files = glob.glob(os.path.join(_mydir, 'Top100Cryptos/data/*.csv'))
dfs = list()
datadir = os.path.join(_mydir, 'parsed')
if not os.path.exists(datadir):
os.makedirs(datadir)
for i, (name, f) in enumerate(zip(names, files)):
mtime = os.path.getmtime(f)
dirname = os.path.join(datadir, 'name={}/mtime={}'.format(name, mtime))
filename = os.path.join(dirname, 'data.parquet')
if not os.path.exists(filename):
df = pd.read_csv(f, **kwargs)
df = pa.Table.from_pandas(df)
if not os.path.exists(dirname):
os.makedirs(dirname)
print('writing {}'.format(filename))
pq.write_table(df, filename)
pq.read_table('./parsed') # test
else:
print('{} exists'.format(filename))
return pq.read_table('./parsed') # test
# id big ups big downs
df = load_raw()
df = df.sort_values('Date')
| [
"cottrell@users.noreply.github.com"
] | cottrell@users.noreply.github.com |
d038c06c6c4f20653a17f5fb33b4d16d637fb9be | 66acbd1f601e00f311c53a9ce0659e5b56c87fef | /pre_analysis/observable_analysis/topc4mcintervalanalyser.py | 508e973104b53d9e4245701856db48d1f55c9b6c | [
"MIT"
] | permissive | hmvege/LatticeAnalyser | fad3d832190f4903642a588ed018f6cca3858193 | 6c3e69ab7af893f23934d1c3ce8355ac7514c0fe | refs/heads/master | 2021-05-25T11:46:30.278709 | 2019-04-11T14:14:23 | 2019-04-11T14:14:23 | 127,303,453 | 0 | 1 | null | 2018-10-12T21:09:58 | 2018-03-29T14:29:14 | Python | UTF-8 | Python | false | false | 601 | py | from pre_analysis.core.flowanalyser import FlowAnalyser
class Topc4MCIntervalAnalyser(FlowAnalyser):
"""Class for topological charge with quartic topological charge."""
observable_name = r"$\langle Q^4 \rangle$"
observable_name_compact = "topc4MC"
x_label = r"$\sqrt{8t_{f}}$ [fm]"
y_label = r"$\langle Q^4 \rangle$"
def __init__(self, *args, **kwargs):
super(Topc4MCIntervalAnalyser, self).__init__(*args, **kwargs)
self.y **= 4
def main():
exit("Module Topc4MCIntervalAnalyser not intended for standalone usage.")
if __name__ == '__main__':
main() | [
"hmvege@ulrik.uio.no"
] | hmvege@ulrik.uio.no |
5556284697c83072c5f8adcd31eebd42bde99e85 | 9acff625f8a82c510b23213e4eed9b5c94d84580 | /analyze.py | f25e88e4b51899131cd7e4beff16375ee7ff65f8 | [] | no_license | jennalau/lyft-data-challenge | 69b970b4214e63abd9f34cb3c31cbf0e571b9c08 | d3c6cac9807de0aaa87d31f87e237a26bb487672 | refs/heads/master | 2020-07-08T12:48:12.376898 | 2019-09-16T17:32:58 | 2019-09-16T17:32:58 | 203,675,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | from utils import *
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
def calcTotalDrivingMin(data):
"""
calculate mean # of driving min for all drivers for all days
data shape = (122, 46, 24)
"""
total = []
for i in range(0,24):
total.append([0,0,0])
for driver in data:
for hrs in driver: # 1x24 np array of int64
hrs = hrs.tolist()
for hr in hrs:
if hr > 0:
hrsIdx = hrs.index(hr)
# number of mins
total[hrsIdx][0] += hr
# number of drivers
total[hrsIdx][1] += 1
total = np.asarray(total)
# calculate means minutes worked
for t in total:
t[2] = t[0] / t[1]
return total
def calcResidual(data, driver_hash, avg_time):
num_drivers = len(driver_hash)
residuals = []
for i in range(num_drivers):
residuals.append([0] * 24)
driver_working_min = []
for i in range(num_drivers):
driver_working_min.append(0)
for day in data:
for driver in range(len(day)):
driver_time = day[driver]
r = driver - avg_time
residuals[driver] += r
# calculate number of minutes worked
total_min = np.sum(driver)
driver_working_min[driver] += total_min
residuals = np.asarray(residuals)
driver_working_min = np.asarray(driver_working_min)
return residuals, driver_working_min
def driverEfficiency(residuals, driver_working_min, driver_hash):
driver_rankings = dict()
for r in range(len(residuals)):
sum_r = np.sum(residuals[r])
score = 0
if driver_working_min[r] > 0:
score = sum_r / driver_working_min[r]
# note: we ignored the drivers who were onboarded, but did not work at all
driver_rankings[getDriverID(driver_hash, r)] = score
return driver_rankings
def analyze(data, driver_hash):
# calculate total # of driving minutes for all drivers across the entire dataset timespan for
# each 1-hour interval & the # of drivers working in each interval
totalTime = calcTotalDrivingMin(data) # 24 x 3
# calculate residuals for each driver
avg_times = totalTime[:, -1:]
residuals, driver_working_min = calcResidual(data, driver_hash, np.squeeze(avg_times, axis=-1))
driver_rankings = driverEfficiency(residuals, driver_working_min, driver_hash)
# generate visuals
visRidePopularity(totalTime[:,0:1])
compPrimeTime(residuals, driver_working_min, totalTime)
def visRidePopularity(mins):
time_intervals = [x for x in range(24)]
print(time_intervals)
mins = np.squeeze(mins, axis=-1)
d = {'Time': time_intervals, 'Average # of Minutes Driven': mins}
data = pd.DataFrame(data = d)
ax = sns.scatterplot(x=data['Time'], y=data['Average # of Minutes Driven'], data=data)
fig = ax.get_figure()
fig.savefig('ride_popularity.png')
def compPrimeTime(residuals, driver_working_min, totalTime):
print('residuals: ', residuals.shape) # 46 x 24
resulting_diff = []
for i in range(24):
diff = residuals[:,i:i+1]
resulting_diff.append(np.mean(diff))
print(resulting_diff)
means = totalTime[:,0:1]
means = np.squeeze(means, axis=-1)
resulting_diff = np.asarray(resulting_diff)
print("means", means.shape)
print('resulting_diff: ', resulting_diff.shape)
# resulting_diff = np.squeeze(resulting_diff, axis=-1)
d = {'Prime Time': means, 'Residual': resulting_diff}
data = pd.DataFrame(data = d)
ax = sns.scatterplot(x=data['Prime Time'], y=data['Residual'], data=data)
fig = ax.get_figure()
fig.savefig('prime_time.png')
all_scores = list(zip(resulting_diff, means))
return all_scores
| [
"jennanla@usc.edu"
] | jennanla@usc.edu |
1a321791650a05c8b77ab5c95f563d2f6b201961 | 0dcbcf0ed0e19a7406e85f1f5f4f957ab2a6975b | /plasTeX/Packages/listings.py | 2f163dd70fae8b9761fbc3de9bcdb434e5da17b3 | [
"MIT"
] | permissive | rsbowman/plastex-mobi | aa3762ffb9e48f8c2120a79c7ed4c2e2717e3308 | 8ea054c63e9deb9dd302d950e636256f9e5e75e9 | refs/heads/master | 2021-01-19T18:32:12.720364 | 2014-06-30T12:07:45 | 2014-06-30T12:07:45 | 21,349,475 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | #!/usr/bin/env python
import sys, re, codecs
from plasTeX import Base
try: import pygments
except: pygments = None
class listingsname(Base.Command):
unicode = 'Listing'
PackageOptions = {}
def ProcessOptions(options, document):
document.context.newcounter('listings',
resetby='chapter',
format='${thechapter}.${listings}')
PackageOptions.update(options)
class lstset(Base.Command):
args = 'arguments:dict'
def invoke(self, tex):
Base.Command.invoke(self, tex)
if 'language' in self.attributes['arguments']:
self.ownerDocument.context.current_language = \
self.attributes['arguments']['language']
class lstlisting(Base.verbatim):
args = '[ arguments:dict ]'
counter = 'listings'
def invoke(self, tex):
if self.macroMode == Base.Environment.MODE_END:
return
s = ''.join(Base.verbatim.invoke(self, tex)[1:]).replace('\r','').split('\n')
_format(self, s)
class lstinline(Base.verb):
args = '[ arguments:dict ]'
def invoke(self, tex):
_format(self, ''.join(Base.verb.invoke(self, tex)[2:-1]))
class lstinputlisting(Base.Command):
args = '[ arguments:dict ] file:str'
counter = 'listings'
def invoke(self, tex):
Base.Command.invoke(self, tex)
if 'file' not in self.attributes or not self.attributes['file']:
raise ValueError('Malformed \\lstinputlisting macro.')
_format(self, codecs.open(self.attributes['file'], 'r',
self.config['files']['input-encoding'], 'replace'))
def _format(self, file):
if self.attributes['arguments'] is None:
self.attributes['arguments'] = {}
linenos = False
if 'numbers' in self.attributes['arguments'] or 'numbers' in PackageOptions:
linenos = 'inline'
# If this listing includes a label, inform plasTeX.
if 'label' in self.attributes['arguments']:
if hasattr(self.attributes['arguments']['label'], 'textContent'):
self.ownerDocument.context.label(
self.attributes['arguments']['label'].textContent)
else:
self.ownerDocument.context.label(
self.attributes['arguments']['label'])
# Check the textual LaTeX arguments and convert them to Python
# attributes.
if 'firstline' in self.attributes['arguments']:
first_line_number = int(self.attributes['arguments']['firstline'])
else:
first_line_number = 0
if 'lastline' in self.attributes['arguments']:
last_line_number = int(self.attributes['arguments']['lastline'])
else:
last_line_number = sys.maxint
# Read the file, all the while respecting the "firstline" and
# "lastline" arguments given in the document.
self.plain_listing = ''
for current_line_number, line in enumerate(file):
current_line_number += 1
if (current_line_number >= first_line_number) and \
(current_line_number <= last_line_number):
# Remove single-line "listings" comments. Only
# comments started by "/*@" and ended by "@*/" are
# supported.
line = re.sub('/\*@[^@]*@\*/', '', line)
# Add the just-read line to the listing.
self.plain_listing += '\n' + line
# Create a syntax highlighted XHTML version of the file using Pygments
if pygments is not None:
from pygments import lexers, formatters
try:
lexer = lexers.get_lexer_by_name(self.ownerDocument.context.current_language.lower())
except Exception, msg:
lexer = lexers.TextLexer()
self.xhtml_listing = pygments.highlight(self.plain_listing, lexer, formatters.HtmlFormatter(linenos=linenos))
| [
"r.sean.bowman@gmail.com"
] | r.sean.bowman@gmail.com |
1253730c187a79635a06e3f974cfc757d73a20e1 | 909323b8a1baca96711a0b5285b5af233dda70ae | /01 PY0101EN Python Basics for Data Science/Module 2 - Python Data Structures/Dictionaries.py | d10c9175548d0ef9e261c71bd20ee73ffbb837f1 | [] | no_license | ekmanch/IBM-Python-Data-Science | 629084c104294c0dcd67a592f514c5b160859fcb | 813a8bde137b90b4dfd74387b263987c06e08ba7 | refs/heads/main | 2023-03-28T06:08:34.161225 | 2021-03-20T18:37:32 | 2021-03-20T18:37:32 | 330,181,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,130 | py | ######################################
# Dictionaries Lab 1
######################################
# You will need the dictionary D:
from typing import Dict
D={'a':0,'b':1,'c':2}
# Find the value for the key 'a'
print(D['a'])
# Find the keys of the dictionary D
print(D.keys())
######################################
# Dictionaries Quiz
######################################
# Question 1
# Consider the following dictionary:
D={'a':0,'b':1,'c':2}
# What is the result of the following: D.values()
# Answer prediction:
# answer will be a list of values in D, i.e. 0, 1 and 2
print(D.values())
# Question 2
# Consider the following dictionary:
D={'a':0,'b':1,'c':2}
# What is the output of the following D['b'] :
# Answer prediction:
# answer will be the value of key 'b', i.e. 1
print(D['b'])
######################################
# Dictionaries Lab 2
######################################
# Question 1
# You will need this dictionary for the next two questions:
soundtrack_dic = {"The Bodyguard":"1992", "Saturday Night Fever":"1977"}
soundtrack_dic
# a) In the dictionary soundtrack_dic what are the keys ?
# Answer prediction
# "The Bodyguard" and "Saturday Night Fever"
print(soundtrack_dic.keys())
# b) In the dictionary soundtrack_dic what are the values ?
# Answer prediction
# "1992" and "1977"
print(soundtrack_dic.values())
# Question 2
# You will need this dictionary for the following questions:
# The Albums Back in Black, The Bodyguard and Thriller have the following
# music recording sales in millions 50, 50 and 65 respectively:
# a) Create a dictionary album_sales_dict where the keys are
# the album name and the sales in millions are the values.
album_sales_dict = {"Back in Black":50, "The Bodyguard":50, "Thriller":65}
print(album_sales_dict)
# b) Use the dictionary to find the total sales of Thriller:
print(album_sales_dict["Thriller"])
# c) Find the names of the albums from the dictionary using the method keys():
print(album_sales_dict.keys())
# d) Find the values of the recording sales from the dictionary
# using the method values:
print(album_sales_dict.values()) | [
"christian.ekman89@gmail.com"
] | christian.ekman89@gmail.com |
8ef604e567085a98f8973285131c2bdfcfa87320 | 45db4a55c6bd5137b17bf8dfa54ed94f361c3bf6 | /ResonantCircuits/parallelResonantCircuit.py | 913cbbe0b0728612811e4e777255d1dfd7520aae | [] | no_license | CatT-DancingDev/PythonProjects | 1be3e8f0b0528be1ccbe8aeadb76ac8a5f9961ae | 7b59d9b1843eaddb9254f980f178d6e8ba551106 | refs/heads/main | 2023-04-15T08:06:25.240981 | 2021-04-25T04:13:15 | 2021-04-25T04:13:15 | 361,327,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,878 | py | ###################################################################################################
#
# Program: Resonant Circuit Design
# Module: parallelResonantCircuit.py
# Author: Catherine Trujillo
# Course: CSC 217-470
# Date: 7/07/2020
#
###################################################################################################
#
#
# Description: This module defines/implements the subclass ParallelResonantCircuit, which extends
# the class Resonant Circuit by adding a method to design the parallel circuit for
# the given RFR values. It also overrides the superclasses display method to include
# a print out of the design values:
# - self._R = Resistance
# - self._C = Capacitance
# - self._L = Inductance
#
############################## SUBCLASS METHODS LIST ##############################################
#
# __init__(self)
# designCircuit(self)
# display(self)
#
############################## LIBRARIES AND MODULES ##############################################
from resonantCircuit import ResonantCircuit
############################## SUBCLASS DEFINITION ################################################
class ParallelResonantCircuit(ResonantCircuit):
############################## METHODS ############################################################
#
# Method: __init__(self)
#
# Parameters: self
# Return Value: ParallelResonantCircuit object
#
# Purpose: SuperClass Constructor initializes fields for:
# _rf = Resonant Frequency in rad/s
# _b = Bandwidth in rad/s
# _k = Gain at RF
#
# SubClass Constructor initiliazes fields for:
# _R = Resistance
# _C = Capacitance
# _L = Inductance
#
####################################################################################################
def __init__(self):
super().__init__()
self._R = 0
self._C = 0
self._L = 0
####################################################################################################
#
# Method: designCircuit(self)
#
# Parameters: self
# Return Value: None
#
# Purpose: Retrieve data from superclass fields for use in design calculations. Set subclass
# instance fields using design equations provided in textbook
#
####################################################################################################
def designCircuit(self):
# Retrive data from superclass fields for use in design calculations
rf = super().getRF()
b = super().getB()
k = super().getK()
# Set subclass instance fields using design equations provided in textbook
self._R = k
self._C = 1 / (b * self._R)
self._L = 1 / ((rf ** 2) * self._C)
####################################################################################################
#
# Method: display(self)
#
# Parameters: self
# Return Value: None
#
# Purpose: This method extends the superclass display method to include a printout of the
# Parallel Resonant Circuit Design Values
#
####################################################################################################
def display(self):
# Include superclass display method
super().display()
# Add parallel Circuit Design
print("PARALLEL CIRCUIT DESIGN")
print("R = {}".format(self._R))
print("C = {}".format(self._C))
print("L = {} \n".format(self._L))
################################## END SUBCLASS ###################################################
| [
"noreply@github.com"
] | CatT-DancingDev.noreply@github.com |
d4796b17e0004688ffac671106272d188dbca7d6 | 1cbc8d9b99e5ac8397a857f4c20adb5c7662171f | /helloworldapp/helloworldapp/__init__.py | 84707a415502114026b8c35a4e51bdbdfc478844 | [] | no_license | six0h/helloworldapp | 255cc08a41317c040c8ad150a8f0246309055a45 | 311aa2c5df5aef763d068e879541b8f77ff1a062 | refs/heads/master | 2021-01-19T22:34:29.180128 | 2017-04-20T15:03:04 | 2017-04-20T15:03:04 | 88,826,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | # -*- coding: utf-8 -*-
__author__ = """Cody Halovich"""
__email__ = 'me@codyhalovich.com'
__version__ = '0.1.0'
| [
"cody@savtechnology.com"
] | cody@savtechnology.com |
b84e755e93027b2789dea29567569defeb59a598 | 40f6176d172025c7061d3c1fe027628c06c86f39 | /setup.py | 015a81e636e7c57885954242016210af5a43c5c7 | [] | no_license | rsriram315/eds_covid-19 | 8adca890fcf93552b9761036d6b268213afd10bc | 528695a430ff13c9dcc6e969ebf1f7988e26c434 | refs/heads/master | 2022-12-07T16:38:21.071965 | 2020-09-01T20:25:40 | 2020-09-01T20:25:40 | 291,678,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Applied Data science on COVID-19 Data',
author='Sriram Ramachandran',
license='',
)
| [
"rsriram315@gmail.com"
] | rsriram315@gmail.com |
f5442a5cd16a2677c528d2570c555dac671f2270 | 43535a0667aa1d745fdd718bb8c81b2e616633be | /SLQ/sess_4/4a_citycycle.py | d33344469ec0bc4abccf20a9fea8994831d47573 | [] | no_license | anniequasar/session-summaries | 25336afe94b5033ae009bd124e00d4adf4748a99 | acc17b640e1a737e70b310cb2c8ce21aac35c6da | refs/heads/master | 2023-08-08T03:01:43.030746 | 2023-07-19T01:41:34 | 2023-07-19T01:41:34 | 165,978,720 | 17 | 11 | null | 2022-04-26T07:43:49 | 2019-01-16T05:20:31 | Jupyter Notebook | UTF-8 | Python | false | false | 2,134 | py | # Author: Tim Cummings
# See https://data.brisbane.qld.gov.au for other data sets
# Register at https://developer.jcdecaux.com/ to get an api_key
# contract list GET https://api.jcdecaux.com/vls/v1/contracts?apiKey={api_key} HTTP/1.1
# station list GET https://api.jcdecaux.com/vls/v1/stations?contract={contract_name}&apiKey={api_key} HTTP/1.1
# station info GET https://api.jcdecaux.com/vls/v1/stations/{station_number}?contract={contract_name}&apiKey={api_key} HTTP/1.1
from urllib.request import urlopen
import json
# TODO enter your api key as issued by jcdecaux
api_key = '1fbb..............................82151e'
# contract name from jcdecaux contract list
contract_name = 'Brisbane'
# url for station list with placeholders for contract name and api key
url_station_list = "https://api.jcdecaux.com/vls/v1/stations?contract={contract_name}&apiKey={api_key}"
# use str format function to replace placeholders with values from variables
url = url_station_list.format(contract_name=contract_name, api_key=api_key)
# load the json (JavaScript Object Notation) data from the url's http response into a python list of dicts
station_list = json.load(urlopen(url))
# initialise variables which will capture the bike stations with the most available bikes
stations_with_most_bikes = []
most_bikes = 0
# Loop through all bike stations finding those with the maximum number of available bikes (could be more than one)
for station in station_list:
print(station)
if most_bikes < station['available_bikes']:
# This station exceeds all previous ones so replace list with just this one and save the number of bikes
most_bikes = station['available_bikes']
stations_with_most_bikes = [station]
elif most_bikes == station['available_bikes']:
# This station equals previous maximum so add it to the list
stations_with_most_bikes.append(station)
# Display results
print()
print("Most number of available bikes =", most_bikes, 'at the following station(s)')
for station in stations_with_most_bikes:
print(station['name'], 'at', station['position'])
| [
"noreply@github.com"
] | anniequasar.noreply@github.com |
37b0f73442e6b0db42d0419136e19faef5f2f973 | d272b041f84bbd18fd65a48b42e0158ef6cceb20 | /catch/datasets/tacaribe_mammarenavirus.py | 7f62021e43d0f87c81e26077042b3721995eee6d | [
"MIT"
] | permissive | jahanshah/catch | bbffeadd4113251cc2b2ec9893e3d014608896ce | 2fedca15f921116f580de8b2ae7ac9972932e59e | refs/heads/master | 2023-02-19T13:30:13.677960 | 2021-01-26T03:41:10 | 2021-01-26T03:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | """Dataset with 'Tacaribe mammarenavirus' sequences.
A dataset with 5 'Tacaribe mammarenavirus' sequences. The virus is
segmented and has 2 segments. Based on their strain and/or isolate,
these sequences were able to be grouped into 4 genomes. Many genomes
may have fewer than 2 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetMultiChrom
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (L|S)\]')
m = c.search(header)
if not m:
raise Exception("Unknown or invalid segment in header %s" % header)
seg = m.group(1)
return "segment_" + seg
def seq_header_to_genome(header):
import re
c = re.compile(r'\[genome (.+)\]')
m = c.search(header)
if not m:
raise Exception("Unknown genome in header %s" % header)
return m.group(1)
chrs = ["segment_" + seg for seg in ['L', 'S']]
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr,
seq_header_to_genome=seq_header_to_genome)
ds.add_fasta_path("data/tacaribe_mammarenavirus.fasta.gz", relative=True)
sys.modules[__name__] = ds
| [
"hmetsky@gmail.com"
] | hmetsky@gmail.com |
a83ca6ce05b21ca251fbaf60b2b466a01099eeb9 | 8fe8294460dd743534d0c10a9288da3f3d2ae155 | /scripts/mimic3benchmarks_inhospital_mortality/predictions_collapsed/zzz_old_code/train_random_forest_with_per_tslice_features.smk | b0244edb0bbd708ce40fd4f097f44524bbe9cd1d | [
"MIT"
] | permissive | tufts-ml/time_series_prediction | 32132a6c0d17ce196ea97b55387ea27fa18ba6e3 | 831c388d8854773b7545a3a681a596f7c2d98dff | refs/heads/master | 2023-04-08T20:03:05.647403 | 2022-07-19T19:10:06 | 2022-07-19T19:10:06 | 161,347,900 | 8 | 7 | MIT | 2023-03-24T22:43:26 | 2018-12-11T14:40:58 | Jupyter Notebook | UTF-8 | Python | false | false | 2,764 | smk | '''
Train random fores on collapsed features for mimic3 inhospital mortality
Usage
-----
snakemake --cores 1 --snakefile train_random_forest.smk train_and_evaluate_classifier
'''
sys.path.append('../predictions_collapsed/')
from config_loader import (
D_CONFIG,
DATASET_STD_PATH, DATASET_SPLIT_PATH,
PROJECT_REPO_DIR, PROJECT_CONDA_ENV_YAML,
DATASET_SPLIT_FEAT_PER_TSLICE_PATH,
RESULTS_FEAT_PER_TSLICE_PATH,
DATASET_SPLIT_COLLAPSED_FEAT_PER_TSLICE_PATH,
RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH)
random_seed_list=D_CONFIG['CLF_RANDOM_SEED_LIST']
CLF_TRAIN_TEST_SPLIT_PATH = os.path.join(DATASET_SPLIT_COLLAPSED_FEAT_PER_TSLICE_PATH, 'classifier_train_test_split_dir')
RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH = os.path.join(RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH, 'random_forest')
print("Training logistic regression")
print("--------------------------")
print("Results and trained model will go to:")
print(RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH)
rule train_and_evaluate_classifier:
input:
script=os.path.join(PROJECT_REPO_DIR, 'src', 'eval_classifier.py'),
x_train_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'x_train.csv'),
x_test_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'x_test.csv'),
y_train_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'y_train.csv'),
y_test_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'y_test.csv'),
x_dict_json=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'x_dict.json'),
y_dict_json=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'y_dict.json')
params:
output_dir=RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH,
random_seed=int(random_seed_list[0])
output:
output_html=os.path.join(RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH, "report.html")
conda:
PROJECT_CONDA_ENV_YAML
shell:
'''
mkdir -p {params.output_dir} && \
python -u {input.script} \
random_forest \
--outcome_col_name {{OUTCOME_COL_NAME}} \
--output_dir {params.output_dir} \
--train_csv_files {input.x_train_csv},{input.y_train_csv} \
--test_csv_files {input.x_test_csv},{input.y_test_csv} \
--data_dict_files {input.x_dict_json},{input.y_dict_json} \
--merge_x_y False \
--validation_size 0.15 \
--key_cols_to_group_when_splitting {{SPLIT_KEY_COL_NAMES}} \
--random_seed {params.random_seed}\
--n_splits 2 \
--scoring roc_auc_score \
--threshold_scoring balanced_accuracy_score \
--class_weight balanced \
'''.replace("{{OUTCOME_COL_NAME}}", D_CONFIG["OUTCOME_COL_NAME"])\
.replace("{{SPLIT_KEY_COL_NAMES}}", D_CONFIG["SPLIT_KEY_COL_NAMES"]) | [
"prath01@alpha001.lux.tufts.edu"
] | prath01@alpha001.lux.tufts.edu |
47b0fad3467437ec0622fddde5ff65dbed7f685e | a306e621d15d6287f75c8e4f22329da810408605 | /tests/test_distance.py | 2500daf3774b7e965c5bd7d243e4f01b24e8e026 | [
"MIT"
] | permissive | moble/quaternionic | c6175a8e5ff57fbb9d2f2462bc761368f3b4fa66 | 074b626d0c63aa78479ff04ed41638931ca6693a | refs/heads/main | 2023-06-08T08:21:46.827232 | 2023-02-07T17:36:31 | 2023-02-07T17:36:38 | 286,745,519 | 73 | 7 | MIT | 2023-05-27T12:19:43 | 2020-08-11T13:00:26 | Python | UTF-8 | Python | false | false | 3,116 | py | import warnings
import numpy as np
import quaternionic
import pytest
@pytest.mark.parametrize("rotor,rotation,slow", [ # pragma: no branch
(quaternionic.distance.rotor, quaternionic.distance.rotation, True),
quaternionic.distance.CreateMetrics(lambda f: f, quaternionic.utilities.pyguvectorize) + (False,)
], ids=["jit metrics", "non-jit metrics"])
def test_metrics(Rs, array, rotor, rotation, slow):
metric_precision = 4.e-15
Rs = array(Rs.ndarray)
one = array(1, 0, 0, 0)
intrinsic_funcs = (rotor.intrinsic, rotation.intrinsic)
chordal_funcs = (rotor.chordal, rotation.chordal)
metric_funcs = intrinsic_funcs + chordal_funcs
rotor_funcs = (rotor.intrinsic, rotor.chordal)
rotation_funcs = (rotation.intrinsic, rotation.chordal)
distance_dict = {func: func(Rs, Rs[:, np.newaxis]) for func in metric_funcs}
# Check non-negativity
for mat in distance_dict.values():
assert np.all(mat >= 0.)
# Check discernibility
for func in metric_funcs:
if func in chordal_funcs:
eps = 0
else:
eps = 5.e-16
if func in rotor_funcs:
target = Rs != Rs[:, np.newaxis]
else:
target = np.logical_and(Rs != Rs[:, np.newaxis], Rs != - Rs[:, np.newaxis])
assert ((distance_dict[func] > eps) == target).all()
# Check symmetry
for mat in distance_dict.values():
assert np.allclose(mat, mat.T, atol=metric_precision, rtol=0)
# Check triangle inequality
for mat in distance_dict.values():
assert ((mat - metric_precision)[:, np.newaxis, :] <= mat[:, :, np.newaxis] + mat).all()
# Check distances from self or -self
for func in metric_funcs:
# All distances from self should be 0.0
if func in chordal_funcs:
eps = 0
else:
eps = 5.e-16
assert (np.diag(distance_dict[func]) <= eps).all()
# Chordal rotor distance from -self should be 2
assert (abs(rotor.chordal(Rs, -Rs) - 2.0) < metric_precision).all()
# Intrinsic rotor distance from -self should be 2pi
assert (abs(rotor.intrinsic(Rs, -Rs) - 2.0 * np.pi) < metric_precision).all()
# Rotation distances from -self should be 0
assert (rotation.chordal(Rs, -Rs) == 0.0).all()
assert (rotation.intrinsic(Rs, -Rs) < 5.e-16).all()
# We expect the chordal distance to be smaller than the intrinsic distance (or equal, if the distance is zero)
assert np.logical_or(rotor.chordal(one, Rs) < rotor.intrinsic(one, Rs), Rs == one).all()
if slow:
# Check invariance under overall rotations: d(R1, R2) = d(R3*R1, R3*R2) = d(R1*R3, R2*R3)
for func in rotor.chordal, rotation.intrinsic:
rotations = Rs[:, np.newaxis] * Rs
right_distances = func(rotations, rotations[:, np.newaxis])
assert (abs(distance_dict[func][:, :, np.newaxis] - right_distances) < metric_precision).all()
left_distances = func(rotations[:, :, np.newaxis], rotations[:, np.newaxis])
assert (abs(distance_dict[func] - left_distances) < metric_precision).all()
| [
"michael.oliver.boyle@gmail.com"
] | michael.oliver.boyle@gmail.com |
070f9fae7f4af5744ad3cfe5bb1332147cfc2637 | 13b22a505cbeed3f88653cd379d72ff797068935 | /jenkins_jobs/builder.py | b1b23e2cbe8696c73572efdde25edc35e4f7181d | [
"Apache-2.0"
] | permissive | jaybuff/jenkins-job-builder | af88e4dfd7fe99d17b5e87f2058c0e99cda1d4bd | e3e8f6363f515051e94dc9ebb3ec1473857e4389 | refs/heads/master | 2021-01-15T17:51:28.890445 | 2013-07-25T16:15:48 | 2013-07-25T16:15:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,875 | py | #!/usr/bin/env python
# Copyright (C) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage jobs in Jenkins server
import os
import hashlib
import yaml
import json
import xml.etree.ElementTree as XML
from xml.dom import minidom
import jenkins
import re
import pkg_resources
import logging
import copy
import itertools
from jenkins_jobs.errors import JenkinsJobsException
logger = logging.getLogger(__name__)
def deep_format(obj, paramdict):
"""Apply the paramdict via str.format() to all string objects found within
the supplied obj. Lists and dicts are traversed recursively."""
# YAML serialisation was originally used to achieve this, but that places
# limitations on the values in paramdict - the post-format result must
# still be valid YAML (so substituting-in a string containing quotes, for
# example, is problematic).
if isinstance(obj, str):
ret = obj.format(**paramdict)
elif isinstance(obj, list):
ret = []
for item in obj:
ret.append(deep_format(item, paramdict))
elif isinstance(obj, dict):
ret = {}
for item in obj:
ret[item] = deep_format(obj[item], paramdict)
else:
ret = obj
return ret
class YamlParser(object):
def __init__(self, config=None):
self.registry = ModuleRegistry(config)
self.data = {}
self.jobs = []
def parse(self, fn):
data = yaml.load(open(fn))
for item in data:
cls, dfn = item.items()[0]
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item named "
"'{0}'. Missing indent?".format(n))
name = dfn['name']
group[name] = dfn
self.data[cls] = group
def getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def applyDefaults(self, data):
whichdefaults = data.get('defaults', 'global')
defaults = self.data.get('defaults', {}).get(whichdefaults, {})
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def generateXML(self, jobs_filter=None):
changed = True
while changed:
changed = False
for module in self.registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self):
changed = True
for job in self.data.get('job', {}).values():
if jobs_filter and job['name'] not in jobs_filter:
continue
logger.debug("XMLifying job '{0}'".format(job['name']))
job = self.applyDefaults(job)
self.getXMLForJob(job)
for project in self.data.get('project', {}).values():
logger.debug("XMLifying project '{0}'".format(project['name']))
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = jobspec.items()[0]
else:
jobname = jobspec
jobparams = {}
job = self.getJob(jobname)
if job:
# Just naming an existing defined job
continue
# see if it's a job group
group = self.getJobGroup(jobname)
if group:
for group_jobname in group['jobs']:
job = self.getJob(group_jobname)
if job:
continue
template = self.getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = {}
d.update(project)
d.update(jobparams)
d.update(group)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self.getXMLForTemplateJob(d, template, jobs_filter)
continue
# see if it's a template
template = self.getJobTemplate(jobname)
if template:
d = {}
d.update(project)
d.update(jobparams)
self.getXMLForTemplateJob(d, template, jobs_filter)
def getXMLForTemplateJob(self, project, template, jobs_filter=None):
dimensions = []
for (k, v) in project.items():
if type(v) == list and k not in ['jobs']:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
checksums = set([])
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params.update(values)
expanded = deep_format(template, params)
# Keep track of the resulting expansions to avoid
# regenerating the exact same job. Whenever a project has
# different values for a parameter and that parameter is not
# used in the template, we ended up regenerating the exact
# same job.
# To achieve that we serialize the expanded template making
# sure the dict keys are always in the same order. Then we
# record the checksum in an unordered unique set which let
# us guarantee a group of parameters will not be added a
# second time.
uniq = json.dumps(expanded, sort_keys=True)
checksum = hashlib.md5(uniq).hexdigest()
# Lookup the checksum
if checksum not in checksums:
# We also want to skip XML generation whenever the user did
# not ask for that job.
job_name = expanded.get('name')
if jobs_filter and job_name not in jobs_filter:
continue
logger.debug("Generating XML for template job {0}"
" (params {1})".format(
template['name'], params))
self.getXMLForJob(expanded)
checksums.add(checksum)
def getXMLForJob(self, data):
kind = data.get('project-type', 'freestyle')
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.projects', name=kind):
Mod = ep.load()
mod = Mod(self.registry)
xml = mod.root_xml(data)
self.gen_xml(xml, data)
job = XmlJob(xml, data['name'])
self.jobs.append(job)
break
def gen_xml(self, xml, data):
for module in self.registry.modules:
if hasattr(module, 'gen_xml'):
module.gen_xml(self, xml, data)
class ModuleRegistry(object):
def __init__(self, config):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.global_config = config
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(lambda a, b: cmp(a.sequence, b.sequence))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = mod
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
def dispatch(self, component_type,
parser, xml_parent,
component, template_data={}):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YMAL Parser
:arg Element xml_parent: the parent XML element
:arg dict template_data: values that should be interpolated into
the component definition
See :py:class:`jenkins_jobs.modules.base.Base` for how to register
components of a module.
See the Publishers module for a simple example of how to use
this method.
"""
if component_type not in self.modules_by_component_type:
raise JenkinsJobsException("Unknown component type: "
"'{0}'.".format(component_type))
component_list_type = self.modules_by_component_type[component_type] \
.component_list_type
if isinstance(component, dict):
# The component is a sigleton dictionary of name: dict(args)
name, component_data = component.items()[0]
if template_data:
# Template data contains values that should be interpolated
# into the component definition
s = yaml.dump(component_data, default_flow_style=False)
s = s.format(**template_data)
component_data = yaml.load(s)
else:
# The component is a simple string name, eg "run-tests"
name = component
component_data = {}
# Look for a component function defined in an entry point
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.{0}'.format(component_list_type), name=name):
func = ep.load()
func(parser, xml_parent, component_data)
else:
# Otherwise, see if it's defined as a macro
component = parser.data.get(component_type, {}).get(name)
if component:
for b in component[component_list_type]:
# Pass component_data in as template data to this function
# so that if the macro is invoked with arguments,
# the arguments are interpolated into the real defn.
self.dispatch(component_type,
parser, xml_parent, b, component_data)
class XmlJob(object):
def __init__(self, xml, name):
self.xml = xml
self.name = name
def md5(self):
return hashlib.md5(self.output()).hexdigest()
# Pretty printing ideas from
# http://stackoverflow.com/questions/749796/pretty-printing-xml-in-python
pretty_text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
def output(self):
out = minidom.parseString(XML.tostring(self.xml))
out = out.toprettyxml(indent=' ')
return self.pretty_text_re.sub('>\g<1></', out)
class CacheStorage(object):
def __init__(self, jenkins_url):
cache_dir = self.get_cache_dir()
# One cache per remote Jenkins URL:
host_vary = re.sub('[^A-Za-z0-9\-\~]', '_', jenkins_url)
self.cachefilename = os.path.join(
cache_dir, 'cache-host-jobs-' + host_vary + '.yml')
try:
yfile = file(self.cachefilename, 'r')
except IOError:
self.data = {}
return
self.data = yaml.load(yfile)
logger.debug("Using cache: '{0}'".format(self.cachefilename))
yfile.close()
@staticmethod
def get_cache_dir():
home = os.path.expanduser('~')
if home == '~':
raise OSError('Could not locate home folder')
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(home, '.cache')
path = os.path.join(xdg_cache_home, 'jenkins_jobs')
if not os.path.isdir(path):
os.makedirs(path)
return path
def set(self, job, md5):
self.data[job] = md5
yfile = file(self.cachefilename, 'w')
yaml.dump(self.data, yfile)
yfile.close()
def is_cached(self, job):
if job in self.data:
return True
return False
def has_changed(self, job, md5):
if job in self.data and self.data[job] == md5:
return False
return True
class Jenkins(object):
def __init__(self, url, user, password):
self.jenkins = jenkins.Jenkins(url, user, password)
def update_job(self, job_name, xml):
if self.is_job(job_name):
logger.info("Reconfiguring jenkins job {0}".format(job_name))
self.jenkins.reconfig_job(job_name, xml)
else:
logger.info("Creating jenkins job {0}".format(job_name))
self.jenkins.create_job(job_name, xml)
def is_job(self, job_name):
return self.jenkins.job_exists(job_name)
def get_job_md5(self, job_name):
xml = self.jenkins.get_job_config(job_name)
return hashlib.md5(xml).hexdigest()
def delete_job(self, job_name):
if self.is_job(job_name):
self.jenkins.delete_job(job_name)
def get_jobs(self):
return self.jenkins.get_jobs()
class Builder(object):
def __init__(self, jenkins_url, jenkins_user, jenkins_password,
config=None):
self.jenkins = Jenkins(jenkins_url, jenkins_user, jenkins_password)
self.cache = CacheStorage(jenkins_url)
self.global_config = config
def delete_job(self, name):
self.jenkins.delete_job(name)
if(self.cache.is_cached(name)):
self.cache.set(name, '')
def delete_all_jobs(self):
jobs = self.jenkins.get_jobs()
for job in jobs:
self.delete_job(job['name'])
def update_job(self, fn, names=None, output_dir=None):
if os.path.isdir(fn):
files_to_process = [os.path.join(fn, f)
for f in os.listdir(fn)
if (f.endswith('.yml') or f.endswith('.yaml'))]
else:
files_to_process = [fn]
parser = YamlParser(self.global_config)
for in_file in files_to_process:
logger.debug("Parsing YAML file {0}".format(in_file))
parser.parse(in_file)
if names:
logger.debug("Will filter out jobs not in %s" % names)
parser.generateXML(names)
parser.jobs.sort(lambda a, b: cmp(a.name, b.name))
for job in parser.jobs:
if names and job.name not in names:
continue
if output_dir:
if names:
print job.output()
continue
fn = os.path.join(output_dir, job.name)
logger.debug("Writing XML to '{0}'".format(fn))
f = open(fn, 'w')
f.write(job.output())
f.close()
continue
md5 = job.md5()
if (self.jenkins.is_job(job.name)
and not self.cache.is_cached(job.name)):
old_md5 = self.jenkins.get_job_md5(job.name)
self.cache.set(job.name, old_md5)
if self.cache.has_changed(job.name, md5):
self.jenkins.update_job(job.name, job.output())
self.cache.set(job.name, md5)
else:
logger.debug("'{0}' has not changed".format(job.name))
| [
"jenkins@review.openstack.org"
] | jenkins@review.openstack.org |
7271a590d509168d243ce7c8c3d509719a72fae1 | b9c67cbe41da7743dbe46e7054844ff68a1c0fb4 | /plot_tri.py | d466a28ba3e0957dc50dedc4c358bfcffd98f7ab | [] | no_license | SuperKam91/McAdam | a82c0a8f4366880566b6a1dd2c551b9572c44b22 | ab2f36417cb870e611a5f43ad7854fdfc2d1e439 | refs/heads/master | 2021-07-18T19:44:44.144640 | 2018-05-06T12:33:38 | 2018-05-06T12:33:38 | 132,277,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | #!/usr/bin/python26
from getdist import plots, MCSamples, loadMCSamples
import getdist
ch_path=[]
while 1:
ch_path.append(raw_input('Path to chains or q to quit: ').strip())
if ch_path[-1]=='q':
ch_path=ch_path[:-1]
break
ch=[]
for p in ch_path:
ch.append(loadMCSamples(p))
pars=ch[0].getParamNames()
if len(ch)>1:
print 'Select parameters based on first chain entry; will look for matching parameter names in subsequent entries'
print 'Parameters are: '
print pars
plotpars=[]
while 1:
p=raw_input('Parameter to plot or q to quit: ').strip()
if p=='q': break
plotpars.append(pars.parWithName(p).label)
plotpar_names=['plot'+str(i) for i in range(len(plotpars))]
do_markers=raw_input('Enter true values (y/n): ').strip()
markers={}
if 'y' in do_markers.lower():
print 'Enter true value or non-numerical value to skip'
for p in plotpars:
m=raw_input('True value for '+p+': ')
try:
markers[p]=float(m)
except:
pass
for i, samps in enumerate(ch):
parsi=samps.getParamNames()
for p_label in plotpars:
found=False
for j, pj in enumerate(parsi.names):
if pj.label==p_label:
found=True
p_name=plotpar_names[plotpars.index(p_label)]
if p_label[0]=='M' and 'M_{\odot}' in p_label:
# Add a derived parameter for mass to get around labelling issues
M=eval('samps.getParams().'+pj.name)
M1=samps.ranges.getLower(pj.name)
M2=samps.ranges.getUpper(pj.name)
p_label_new=p_label.replace('M_{\odot}', '10^{14} M_{\odot}')
samps.addDerived(M*1e-14, name=p_name, label=p_label_new)
samps.updateBaseStatistics()
# Adding the derived parameter doesn't retain the range info
if M1 is None:
M1='N'
else:
M1*=1e-14
if M2 is None:
M2='N'
else:
M2*=1e-14
samps.setRanges({p_name: [M1, M2]})
if p_label in markers.keys() and i==0:
markers[p_label]*=1e-14
elif p_label[0]=='S' and 'Jy' in p_label:
# Convert source fluxes to mJy
S=eval('samps.getParams().'+pj.name)
S1=samps.ranges.getLower(pj.name)
S2=samps.ranges.getUpper(pj.name)
samps.addDerived(S*1e3, name=p_name, label=p_label.replace('Jy', 'mJy'))
samps.updateBaseStatistics()
# Adding the derived parameter doesn't retain the range info
if S1 is None:
S1='N'
else:
S1*=1e3
if S2 is None:
S2='N'
else:
S2*=1e3
samps.setRanges({p_name: [S1, S2]})
if p_label in markers.keys() and i==0:
markers[p_label]*=1e3
else:
pj.name=p_name
if not found:
print 'Warning: '+p_label+' not found in '+ch_path[i]
samps.setParamNames(parsi)
leg=[]
for c in ch:
leg.append(c.getName())
g=plots.getSubplotPlotter(width_inch=8)
g.settings.axes_fontsize=8
g.settings.alpha_filled_add=0.4
g.triangle_plot(ch, plotpar_names,
filled_compare=True,
legend_labels=leg,
legend_loc='upper right')
for ipar, p in enumerate(plotpars):
if p in markers.keys():
ax=g.subplots[ipar,ipar]
ax.axvline(x=markers[p], color='k')
for jpar, p2 in enumerate(plotpars[ipar+1:]):
if p2 in markers.keys():
jplot=ipar+jpar+1
ax=g.subplots[jplot,ipar]
ax.plot(markers[p], markers[p2], '*k')
g.export(ch_path[0]+'_tri.png')
| [
"kj316@cam.ac.uk"
] | kj316@cam.ac.uk |
a28eb576387611a80ec9b659a172f16f38ac1fc9 | 34959caee120ba712b50c52a50975ad39a1b150c | /constants.py | c804cb10e6b961cc9bc6b42aa386b140d8198aea | [] | no_license | satirmo/Team-Awsomesauce | eb2f3e585b70089497f45cbd27cac21b5771f91c | 13e0ec212bf6d66db5ffae9fc5ec4f74c0ed9e4c | refs/heads/master | 2021-01-19T17:07:03.784179 | 2017-05-08T15:42:32 | 2017-05-08T15:42:32 | 88,304,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | # Group Members : John Atti, Mark Bonadies, Tomas Carino, Aayush Shrestha, & Amanda Steidl
# Project : Final Demo : Cozmo Driving Course
# Course : CMPS 367 Robotics
# Professor : Benjamin Fine
# Date : 05.01.2016
# Main Contributor(s) of Section : Amanda Steidl
# Current File : constants.py
class CONST:
def __init__ (self):
self.MAX_LIMIT = 55
self.MIN_LIMIT = 40
self.ROAD_WIDTH = 88.9 # millimeters 3.5 inches
self.MID_WIDTH = 12.7 # millimeters .5 inches
self.COZMO_FRONT = 31.75 # millimeters 1.25
self.COZMO_LENGTH = 88.9
class decisions:
def __init__(self):
self.TURN_LEFT = 0
self.TURN_RIGHT = 1
self.TURN_OPTIONAL_LEFT = 2
self.TURN_OPTIONAL_RIGHT = 3
self.STOP_AHEAD = 4
self.COZMO_AHEAD_STOP = 5
self.COZMO_AHEAD = 6
self.WAIT = 7
self.SPEED_UPDATE = 8
self.CORRECT_LEFT = 9
self.CORRECT_RIGHT = 10
self.CONTINUE = 11
| [
"amanda.steidl@gmail.com"
] | amanda.steidl@gmail.com |
bbba29c77ed91e9e1f9c2b9e3b0adf06c9fb0fe1 | db35888bbcacc90e923fdb10312df82f03c40c60 | /WHILELOOPchallenges03.py | 73452b60c8cd7e9c0c4d87e8844f7b04a80decb2 | [] | no_license | DiogoCondecoOphs/Y11-Python- | 45b47286439865e6201114d43e4c72c5f12d77fc | d1e45ad026fd2d9f0cd36a76db51ed126a902118 | refs/heads/master | 2023-04-13T08:41:25.820805 | 2021-04-23T14:50:10 | 2021-04-23T14:50:10 | 296,599,361 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | #WHILELOOPchallenges03
#Diogo.c
num1= int(input("please enter a number "))
total = num1
val= "y"
while val == "y":
num2 = int(input("please enter another number "))
total = total + num2
val=str(input("would you like to add another number y/n "))
print("total was",total)
| [
"noreply@github.com"
] | DiogoCondecoOphs.noreply@github.com |
0c6e5ddba7ebbe6e00461c9af0795cf0f598a220 | a2b598d8e89c1755f683d6b6fe35c3f1ef3e2cf6 | /search/[boj]1072_게임_이분탐색.py | 9b2ba1666cc1fce2d54fdd1b38de88a172296a19 | [
"MIT"
] | permissive | DongHyunByun/algorithm_practice | cbe82606eaa7f372d9c0b54679bdae863aab0099 | dcd595e6962c86f90f29e1d68f3ccc9bc673d837 | refs/heads/master | 2022-09-24T22:47:01.556157 | 2022-09-11T07:36:42 | 2022-09-11T07:36:42 | 231,518,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | while(1):
try:
X,Y=map(int,input().split())
nowPercent=int(Y*100/X)
target=nowPercent+1
#a번 더 이겼을때 퍼센트
def percent(a):
return int((Y+a)*100/(X+a))
if nowPercent>=99:
print(-1)
else:
left=0
right=9999999999
#처음으로 승률+1 이상이 나오는 부분을 찾아
while(left<right):
mid=(left+right)//2
#print(left, right, mid)
#print(percent(mid))
if target<=percent(mid):
right=mid
else:
left=mid+1
print(right)
except:
break | [
"noreply@github.com"
] | DongHyunByun.noreply@github.com |
28bfa6b818b26e8d7e4657da0f4649ecd145616e | 120673df478f641a7a0824a074b4b26eeea2846a | /duplicates.py | 95b390554f7bf9a69d11c8ab57c2255be041b061 | [] | no_license | nicholsl/PythonProjects | 413034434e8084a62326a31ef70f09765f180b4e | c2c76409ab0844d67257b980214158dbc83924de | refs/heads/master | 2020-04-26T11:24:33.694907 | 2019-01-11T04:15:11 | 2019-01-11T04:15:11 | 173,515,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | ## This program runs a test of knowledge
# First get the test questions
# Later this will be modified to use file io.
def get_questions():
# notice how the data is stored as a list of lists
return [["What color is the daytime sky on a clear day? ", "blue"],
["What is the answer to life, the universe and everything? ", "42"],
["What is a three letter word for mouse trap? ", "cat"],
["What noise does a truly advanced machine make?", "ping"]]
# This will test a single question
# it takes a single question in
# it returns True if the user typed the correct answer, otherwise False
def check_question(question_and_answer):
# extract the question and the answer from the list
# This function takes a list with two elements, a question and an answer.
question = question_and_answer[0]
answer = question_and_answer[1]
# give the question to the user
given_answer = input(question)
# compare the user's answer to the tester's answer
if answer == given_answer:
print("Correct")
return True
else:
print("Incorrect, correct was:", answer)
return False
# This will run through all the questions
def run_test(questions):
if len(questions) == 0:
print("No questions were given.")
# the return exits the function
return
index = 0
right = 0
while index < len(questions):
# Check the question
#Note that this is extracting a question and answer list from the list of lists.
if check_question(questions[index]):
right = right + 1
# go to the next question
index = index + 1
# notice the order of the computation, first multiply, then divide
print("You got", right * 100 / len(questions),\
"% right out of", len(questions))
# now let's get the questions from the get_questions function, and
# send the returned list of lists as an argument to the run_test function.
def menu():
menu_item = 0
while menu_item !=9:
print("(1) Take the test")
print("(2) View the questions and answers")
print("(9) Quit")
menu_item = int(input("Choose an option using 1, 2, or 9"))
if menu_item == 1:
run_test(get_questions())
elif menu_item == 2:
taco = get_questions()
for item in taco:
print("Question: ",item[0])
print("Answer: ",item[1])
print("You have quit the program.")
menu()
| [
"yumetaki@gmail.com"
] | yumetaki@gmail.com |
d7f797ef4b5ed6b8a1b9e335a74a534bcc982d87 | da349082f2dc259fc2469aacf1ad6d2ebe6ea8d3 | /Exam March 28/02/02. Mountain Run.py | a922e128491cc78705aea64e97385168b8863f81 | [] | no_license | lsnvski/SoftUni | 0f8e80d6d8b098028f4652693cb698cacc6c2ee3 | 984db6a2edcfd40a98987a3d32fb5370f9793d3c | refs/heads/main | 2023-01-31T13:52:29.541592 | 2020-12-13T12:55:15 | 2020-12-13T12:55:15 | 303,548,844 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import math
record = float(input())
meter_per_sec = float(input())
time_for_meter = float(input())
time = (math.floor(meter_per_sec / 50) * 30) + (meter_per_sec * time_for_meter)
if record > time:
print(f"Yes! The new record is {time:.2f} seconds.")
else:
print(f"No! He was {time - record:.2f} seconds slower.") | [
"lsnvski@abv.bg"
] | lsnvski@abv.bg |
5a82148a31bf8f2f5d8fa145f859153c97c63e7d | db9247b2cf24d9f49225b5a8b6ab4918af0abde9 | /Test-AWS-price-list-Details.py | c7b6b300c123593934f480bfd6eed20ed3946e28 | [] | no_license | anecula/AWS-CostEstimation-Python | af599538d574130d0c62a7e63547e096c32797f7 | e3e8f39f653affb58d823558693c537596b2638d | refs/heads/master | 2020-04-01T14:06:49.401710 | 2018-10-27T08:28:14 | 2018-10-27T08:28:14 | 153,280,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import argparse
import boto3
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--days', type=int, default=30)
args = parser.parse_args()
now = datetime.datetime.utcnow()
start = (now - datetime.timedelta(days=args.days)).strftime('%Y-%m-%d')
end = now.strftime('%Y-%m-%d')
cd = boto3.client('ce', 'us-east-1')
results = []
token = None
while True:
if token:
kwargs = {'NextPageToken': token}
else:
kwargs = {}
data = cd.get_cost_and_usage(TimePeriod={'Start': start, 'End': end}, Granularity='DAILY', Metrics=['UnblendedCost'], GroupBy=[{'Type': 'DIMENSION', 'Key': 'LINKED_ACCOUNT'}, {'Type': 'DIMENSION', 'Key': 'SERVICE'}], **kwargs)
results += data['ResultsByTime']
token = data.get('NextPageToken')
if not token:
break
print('\t'.join(['TimePeriod', 'LinkedAccount', 'Service', 'Amount', 'Unit', 'Estimated']))
for result_by_time in results:
for group in result_by_time['Groups']:
amount = group['Metrics']['UnblendedCost']['Amount']
unit = group['Metrics']['UnblendedCost']['Unit']
print(result_by_time['TimePeriod']['Start'], '\t', '\t'.join(group['Keys']), '\t', amount, '\t', unit, '\t', result_by_time['Estimated'])
| [
"srinu24.a@gmail.com"
] | srinu24.a@gmail.com |
7f0200d78f46ea457f3b02e334ed5ce822ffc726 | afc3dddd1c9c7c05436af5c8b41b3215d075905a | /Intermediate 2016/increment.py | 4591045221f591a06f4be5a2acfd6ea0e28512ec | [] | no_license | JohnathanLP/minecraftpython | 9f5b668ea8d21ee78e1341f2099c2a4ff642a602 | 76d91e95128bfabb1f03f80c5a4f7f16899a270d | refs/heads/master | 2021-01-17T13:02:54.784724 | 2016-07-15T19:06:56 | 2016-07-15T19:06:56 | 56,737,802 | 0 | 0 | null | 2016-07-15T19:06:58 | 2016-04-21T02:56:01 | Python | UTF-8 | Python | false | false | 569 | py | import os
import time
fout = open("timerecord.txt",'w')
fout.truncate()
try:
limit = input("How long do you want to count? ")
seconds = 0.00
#take it to the limit
while seconds <= limit:
os.system('clear')
print seconds
seconds += .01
time.sleep(.01)
#one more time
print "All done!"
fout.write("Total time passed: ")
s = str(seconds)
fout.write(s)
fout.close()
except KeyboardInterrupt:
fout.write("Total time passed: ")
s = str(seconds)
fout.write(s)
fout.close()
| [
"johnathanlpowell@gmail.com"
] | johnathanlpowell@gmail.com |
35214fa22f54ec813cf37fce5d8abb7862d782ca | eb397d0383138412c64788834e4073e650b26212 | /game.py | f4333c03688811d5f1c69dd1c6f78b05e20c6816 | [] | no_license | jerseymec/Hilo | e26db92ebf99c915b1ad1f498c0ce0b252230811 | 236c98ec1423b13f6c3a72d1768d892bd10d7851 | refs/heads/master | 2020-09-22T05:34:36.101803 | 2019-11-30T21:57:01 | 2019-11-30T21:57:01 | 225,068,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | import random
while True:
rand_num = random.randint(1, 193)
print(rand_num)
if rand_num < 40:
break
| [
"meldoncharles@hotmail.com"
] | meldoncharles@hotmail.com |
d5b4a608c9726f75c6b1686c1985c5c4f548ef98 | 7aff63055c63b41e39f99a87d0628383f61bee54 | /Composite_index/4. Aggregation/MPI_Mazziotta_Pareto.py | d88da10caac02c1655dfd757e34c5a763c2fb8d7 | [] | no_license | IacopoTesti/Spatial_Data_Science_Rome | 634f39fdb5246f5fa14e8db9ec9e55b4a1d19d3d | a63adb33b4b84c28f30131c7b805bb1a50979979 | refs/heads/master | 2023-03-18T08:22:07.472985 | 2021-03-16T17:30:36 | 2021-03-16T17:30:36 | 262,096,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | ### This block defines the aggregation for normalization with MPI (Mazziotta Pareto Index)
#### aggregation MPI is used (similar to geometric mean because the arithmetic mean is corrected by a penalty function)
# all variables needed for the formula abovementioned are defined
# mean of the z elements of the standardized matrix
z_media = df_MPI.mean(axis=1)
# standard deviation of elements z of standardized matrix
z_sigma = df_MPI.std(axis=1)
# compute cv
cv = z_sigma / z_media
# calculating index MPI using positive sign (because our phenomenon is negative)
MPI_agg = z_media + z_sigma*cv
# transform the MPI index from series to dataframe
MPI_agg = MPI_agg.to_frame(name='mpi_index')
df_MPI_index = df_MPI.merge(MPI_agg, left_index=True, right_index=True)
df_MPI_index = df_MPI_index.round(3)
print("This is the matrix with the composite index computed with MPI normalization and MPI aggregation")
# export csv
df_MPI_index.to_csv('df_mpi_index.csv')
# prints first rows
df_MPI_index.head()
| [
"noreply@github.com"
] | IacopoTesti.noreply@github.com |
cb6bf07d43b3538bd0f67d1273b2348a33926802 | 0d419daf3514e06d18107ae834ff72ae6535c04d | /pilot/pilot/controllers/error.py | 45bb750651d3348bf28be9a0cbf1359bfa44a16b | [] | no_license | grid4hpc/pilot | f7efc2a4786c8146096b614c6f81f04283bf83e8 | 183c8cf9b71acaa6f55565091f76afc9b30ffe52 | refs/heads/master | 2016-09-06T02:30:11.126632 | 2013-10-29T16:10:49 | 2013-10-29T16:10:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | import cgi
from paste.urlparser import PkgResourcesParser
from pylons import request
from pylons.controllers.util import forward
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from pilot.lib.base import BaseController
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
resp = request.environ.get('pylons.original_response')
content = literal(resp.body) or cgi.escape(request.GET.get('message', ''))
page = error_document_template % \
dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=cgi.escape(request.GET.get('code', str(resp.status_int))),
message=content)
return page
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request.environ['PATH_INFO'] = '/%s' % path
return forward(PkgResourcesParser('pylons', 'pylons'))
| [
"shamardin@gmail.com"
] | shamardin@gmail.com |
58f3ad5187db0ba90a597d319ecd2fd4036de17e | fd74a044c0037796455ba4bd4fd44f11c3323599 | /Practice/ABC/Bcontest037_a.py | 25217aff10ee643818c607b06b0b3160e6edfb8b | [] | no_license | tegetege/tegetege_AtCoder | 5ac87e0a7a9acdd50d06227283aa7d95eebe2e2f | ba6c6472082e8255202f4f22a60953d0afe21591 | refs/heads/master | 2022-03-25T00:29:22.952078 | 2022-02-10T14:39:58 | 2022-02-10T14:39:58 | 193,516,879 | 0 | 0 | null | 2019-06-25T13:53:13 | 2019-06-24T14:02:05 | Python | UTF-8 | Python | false | false | 55 | py | A,B,C = map(int,input().split())
print(int(C/min(A,B))) | [
"m_take7_ex_d@yahoo.co.jp"
] | m_take7_ex_d@yahoo.co.jp |
9eb6bae7627c2afdc8c8b8050b3ee526b76ad8c9 | c513e3ca72ce5e8ebe62d00c37531603f1b541c1 | /part1/nn_models.py | 7a400deb0cead2dc5a2bf7c20ecb44158a101a61 | [] | no_license | 0akhilesh9/reinforcement_learning_algos | e3cdb9f9b5333e9c655506a3acb1a825591b9068 | 7750a85cc98b5c1697cb3272fc3a7a572e383d0c | refs/heads/main | 2023-03-08T10:37:29.828739 | 2021-02-25T06:15:37 | 2021-02-25T06:15:37 | 342,087,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | import torch
import torch.nn as nn
import torch.autograd as autograd
class ConvolutionalNN(nn.Module):
# Init the layer dimensions and network architecture
def __init__(self, input_dim, output_dim):
super(ConvDQN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.fc_input_dim = self.feature_size()
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
self.fc = nn.Sequential(
nn.Linear(self.fc_input_dim, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, self.output_dim)
)
# Forward Pass
def forward(self, state):
features = self.conv_net(state)
features = features.view(features.size(0), -1)
qvals = self.fc(features)
return qvals
def feature_size(self):
return self.conv_net(autograd.Variable(torch.zeros(1, *self.input_dim))).view(1, -1).size(1)
# Fully connected NN
class FullConnectedNN(nn.Module):
# Init the layer dimensions and network architecture
def __init__(self, input_dim, output_dim):
super(FullConnectedNN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.fc = nn.Sequential(
nn.Linear(self.input_dim[0], 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, self.output_dim)
)
# Forward Pass
def forward(self, input_state_params):
q_logits = self.fc(input_state_params)
return q_logits | [
"noreply@github.com"
] | 0akhilesh9.noreply@github.com |
487d70bc23e229a453f023037c3ecd226010880b | ea08a577ce67633a4663890df93690231e9e5736 | /N-body simulation.py | dcbd1d5d4a507ba5391a3dab45afb81109981787 | [] | no_license | tjredfern/N-body-numerical-simulation | 91f8bccadc318ed96526adc117d34ecdb650e420 | f85e4cc51730195ad70ef1bccad72b8fb6be7f13 | refs/heads/master | 2022-11-27T16:08:33.235051 | 2020-07-31T13:05:00 | 2020-07-31T13:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,686 | py | #==========================================================================================================================================
# Script written in Python to integrate the equations of motion of N particles interacting with each other gravitationally with high precision.
# The script computes the equations of motion and use scipy.integrate to integrate them.
# Then it uses matplotlib to visualize the solution
#==========================================================================================================================================
import numpy as np
import sympy as sp
# Define a Vector2D class
class Vec2:
def __init__(self, x, y):
self.x = x
self.y = y
# Used for debugging. This method is called when you print an instance
def __str__(self):
return f"({self.x}, {self.y})"
def __add__(self, v):
return Vec2(self.x + v.x, self.y + v.y)
def __radd__(self, v):
return Vec2(self.x + v.x, self.y + v.y)
def __sub__(self, v):
return Vec2(self.x - v.x, self.y - v.y)
def __rsub__(self, v):
return Vec2(v.x- self.x , v.y - self.y)
def __mul__(self, n):
return Vec2(self.x * n, self.y * n)
def __rmul__(self, n):
return Vec2(self.x * n, self.y * n)
def dot(self, v):
return self.x*v.x + self.y*v.y
def get_length(self):
return np.sqrt(self.dot(self) )
# Define a Particle class. The particles are the bodies attracting each other
class Particle():
# n = number of particles
n = 0
def __init__(self,initial_pos,initial_vel, mass):
# i = particle index
self.i = Particle.n
Particle.n += 1
self.m = mass
self.G = 1 # change this to 6.67408 × 1e-11 if you want real world measuring units.
# pos, vel, acc = symbolic variables
self.pos = Vec2(sp.symbols("x_"+str(self.i)),sp.symbols("y_"+str(self.i)))
self.vel = Vec2(sp.symbols("vx_"+str(self.i)),sp.symbols("vy_"+str(self.i)))
self.acc = Vec2(0,0)
# lamb_vel, lamd_acc = lambdify functions.
self.lamb_vel = Vec2(None,None)
self.lamd_acc = Vec2(None,None)
# initial_pos, initial_vel = intial position and velocity
self.initial_pos = initial_pos
self.initial_vel = initial_vel
# vf_vel, vf_acc = functions used in vectorfield() function
self.vf_vel = Vec2(0,0)
self.vf_acc = Vec2(0,0)
# sol_pos, sol_vel = position and velocity solution list obtained after the integration of the equations of motion
self.sol_pos = Vec2(None,None)
self.sol_vel = Vec2(None,None)
# compute particle acceleration using Newton's law of universal gravitation
def calculate_acc(self,particles):
for j in range(len(particles)):
if self.i !=j:
self.acc += (particles[j].pos - self.pos)*particles[j].m*self.G*(1/(((self.pos.x-particles[j].pos.x)**2 + (self.pos.y-particles[j].pos.y)**2)**(3/2)))
# lambdified symbolic functions are faster for numerical calculations.
# I used this approach (compute first symbolic equations of motion and then compile the function with lambdify)
# to avoid python loops in the vectorfield function which needs to be run thousands of times and that is slow.
def lambdify_vel(self,particles):
self.lamb_vel.x = sp.lambdify(self.vel.x, self.vel.x)
self.lamb_vel.y = sp.lambdify(self.vel.y, self.vel.y)
def lambdify_acc(self,particles):
var = []
for j in range(len(particles)):
var.append(particles[j].pos.x)
var.append(particles[j].pos.y)
self.lamd_acc.x = sp.lambdify([var], self.acc.x)
self.lamd_acc.y = sp.lambdify([var], self.acc.y)
#Input here the initial conditions of the particles and their masses
################################################################################################################################
#particle list
par = []
#create the particles
par.append(Particle(initial_pos = Vec2(2,5), initial_vel = Vec2(0.5,0.5) , mass = 1.))
par.append(Particle(initial_pos = Vec2(5,2), initial_vel = Vec2(0.5,0.2) , mass = 1.))
par.append(Particle(initial_pos = Vec2(3,3), initial_vel = Vec2(0.1,0.5) , mass = 1.))
par.append(Particle(initial_pos = Vec2(0.6,2.5), initial_vel = Vec2(0.5,0.5) , mass = 1.))
# Simulation time and number of steps
t_end = 60.0
steps = 800
################################################################################################################################
n = len(par)
#create the functions to integrate
for i in range(n):
par[i].calculate_acc(par)
for i in range(n):
par[i].lambdify_vel(par)
par[i].lambdify_acc(par)
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
def vectorfield(var, t):
'''
integrate function
the function calculates f, a list with all differential equations of motion in the order
diff(x0), diff(y0), diff(x1), diff(y1)...diff(xn-1), diff(yn-1), diff(vx0), diff(vy0)...diff(vxn-1), diff(vyn-1)
it can be optimized, but it's done to be readable
'''
pos = var[0:2*n]
vel = var[2*n:4*n]
f = []
for i in range(0,n):
par[i].vf_vel.x = par[i].lamb_vel.x(vel[2*i])
par[i].vf_vel.y = par[i].lamb_vel.y(vel[2*i + 1])
f.append(par[i].vf_vel.x)
f.append(par[i].vf_vel.y)
for i in range(0,n):
par[i].vf_acc.x = par[i].lamd_acc.x(pos)
par[i].vf_acc.y = par[i].lamd_acc.y(pos)
f.append(par[i].vf_acc.x)
f.append(par[i].vf_acc.y)
return f
from scipy.integrate import odeint
# set the initial conditions
var = []
for i in range(len(par)):
var.append(par[i].initial_pos.x)
var.append(par[i].initial_pos.y)
for i in range(len(par)):
var.append(par[i].initial_vel.x)
var.append(par[i].initial_vel.y)
# ODE solver parameters
t = np.linspace(0,t_end,steps+1)
sol = odeint(vectorfield, var, t)
sol = np.transpose(sol)
# order the solution for clarity
for i in range(n):
par[i].sol_pos.x = sol[2*i]
par[i].sol_pos.y = sol[2*i+1]
for i in range(n):
par[i].sol_vel.x = sol[2*n + 2*i]
par[i].sol_vel.y = sol[2*n + 2*i+1]
# Calculate the total Energy of the system. The energy should be constant.
# Potential Energy
Energy = 0
for i in range(0,n):
for j in range(i+1,n):
Energy += (-1/(((par[i].sol_pos.x-par[j].sol_pos.x)**2 + (par[i].sol_pos.y-par[j].sol_pos.y)**2)**(1/2)))
# Kinetic Energy
for i in range(0,n):
Energy += 0.5*(par[i].sol_vel.x*par[i].sol_vel.x + par[i].sol_vel.y*par[i].sol_vel.y)
# Visualization of the solution with matplotlib. It uses a slider to change the time
################################################################################################################################
plt.style.use('dark_background')
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(1,1,1)
plt.subplots_adjust(bottom=0.2,left=0.15)
ax.axis('equal')
ax.axis([-1, 30, -1, 30])
ax.set_title('Energy =' + str(Energy[0]))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
circle = [None]*n
line = [None]*n
for i in range(n):
circle[i] = plt.Circle((par[i].sol_pos.x[0], par[i].sol_pos.y[0]), 0.08, ec="w", lw=2.5, zorder=20)
ax.add_patch(circle[i])
line[i] = ax.plot(par[i].sol_pos.x[:0],par[i].sol_pos.y[:0])[0]
from matplotlib.widgets import Slider
slider_ax = plt.axes([0.1, 0.05, 0.8, 0.05])
slider = Slider(slider_ax, # the axes object containing the slider
't', # the name of the slider parameter
0, # minimal value of the parameter
t_end, # maximal value of the parameter
valinit=0, # initial value of the parameter
color = '#5c05ff'
)
def update(time):
i = int(np.rint(time*steps/t_end))
ax.set_title('Energy =' + str(Energy[i]))
for j in range(n):
circle[j].center = par[j].sol_pos.x[i], par[j].sol_pos.y[i]
line[j].set_xdata(par[j].sol_pos.x[:i+1])
line[j].set_ydata(par[j].sol_pos.y[:i+1])
slider.on_changed(update)
plt.show()
| [
"noreply@github.com"
] | tjredfern.noreply@github.com |
9ca59003a4044b3e9194cf46b6a1df42749829f2 | f638793eaf038b0f5b6fc21099ef486d5f1a9c4b | /hacker_rank/default_arg.py | be29a49192fa9b1f27bdd3d737c4f44ce641619e | [] | no_license | barbocz/UdemyCourse | 8c8a377c7d3a1c3c091cd9a98d3cd730a08de82d | eb9c9e217ca8357699967fc98ecdd5af9db295e5 | refs/heads/master | 2022-12-24T18:32:36.427401 | 2020-09-30T19:03:33 | 2020-09-30T19:03:33 | 298,615,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | class EvenStream(object):
def __init__(self):
self.current = 0
def get_next(self):
to_return = self.current
self.current += 2
return to_return
class OddStream(object):
def __init__(self):
self.current = 1
def get_next(self):
to_return = self.current
self.current += 2
return to_return
def print_from_stream(n, stream=EvenStream()):
for _ in range(n):
print(stream.get_next())
queries = int(input())
for _ in range(queries):
stream_name, n = input().split()
n = int(n)
if stream_name == "even":
print("even "+str(n))
print_from_stream(n)
else:
print("odd "+str(n))
print_from_stream(n, OddStream())
| [
"barbocz.attila@gmail.com"
] | barbocz.attila@gmail.com |
f865b3508c6ba6b9fd2a0f572519eae1a3eced74 | 52ed0fe71b8fefcd292ae16e4da9df8c84aedbbf | /probabilidades/env/bin/easy_install-3.8 | eca82ed48d78ef7f5cc12431e0a4f1d2c67f58c2 | [] | no_license | Viistorrr/python_platzi_route | 596b13f2602a612ca64a878bfdb8fd1a34691413 | 36f25737dc970353f333717798afc0ad29d70a6c | refs/heads/master | 2022-11-26T14:50:40.168772 | 2020-07-25T17:54:15 | 2020-07-25T17:54:15 | 281,691,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | 8 | #!/Users/macbookpro/Documents/platzi/python_platzi_route/probabilidades/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"victormeza41@gmail.com"
] | victormeza41@gmail.com |
9678cb52350193a1095fe3ab30afa8b5e511b73e | 04850099bbe9f7548793eb8fd6376fbdb2f07670 | /ecg/from_MCP3008.py | 109a60e19d49ff1c895139308ad2aa3b6abbceeb | [] | no_license | sam-luby/ECG-Pi | f92f48312347c2b0954cd3bc9b959c5363ebd92d | c0e3d520f15e6e2acd2fb3943c00800efaf1ff5d | refs/heads/master | 2022-07-21T19:07:08.341152 | 2019-03-05T14:31:54 | 2019-03-05T14:31:54 | 122,356,393 | 2 | 1 | null | 2022-06-21T23:29:09 | 2018-02-21T15:47:15 | Python | UTF-8 | Python | false | false | 1,146 | py | import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import pandas as pd
#run pip3 install Adafruit-GPIO & pip3 install Adafruit-MCP3008 if packages not found
# retrieve data from ADC using SPI interface
def get_data_from_MCP(T, filename):
fs = 250
i = 0
Nsamp = T*fs
milestones = []
SPI_PORT = 0
SPI_DEVICE = 0
for x in range(1, 11): # List to store Nsamp/multiples of 10 for %age calc
milestones.append(int(x * (Nsamp / 10)))
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
values = []
percentage = 10
print('Reading MCP3008 values, press Ctrl-C to quit...')
while True and i < Nsamp:
value = mcp.read_adc(0)
values.append(value)
i+=1
if i in milestones: # percentage completed indication for user
print("Collecting data, {}% complete.".format(percentage))
percentage += 10
time.sleep(0.004) # not the best
print(len(values))
print("Recording complete, processing ECG data...")
dat = pd.DataFrame(values)
dat.to_csv(filename, index=False)
return Nsamp
| [
"samwluby@gmail.com"
] | samwluby@gmail.com |
012d51acc6c656aee5030104ef582244d8f617f2 | 93d5f864f4e24892c3df665ffd32d3a39faf644f | /Tareas/T01/menu.py | 64fcb6287472f3cd7cb197f8a28a03bf4b83b9c4 | [] | no_license | Benjaescobar/programacion_avanzada | ecf177b6ddf0a250682c7952580b0786197ce0a8 | c0664208b13a9c9216a9361e75560ebd8cf92f45 | refs/heads/master | 2023-05-30T15:33:00.249358 | 2021-05-25T21:24:18 | 2021-05-25T21:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,656 | py |
from parametros import INICIADOR_LOOP, DIAS_COMPETENCIA
from cargar_datos import cargar_delegaciones
from campeonato import Campeonato
from delegaciones import IEEEsparta, DCCrotona
from funciones import ingresar_input, crear_archivo, seleccionar_jugador_lesionado
##### Funciones utiles #####
def ingresar_apodo():
# Chequea he ingresa cualquier apodo ingresado
while INICIADOR_LOOP:
nombre_entrenador = input()
if nombre_entrenador.isalnum():
return nombre_entrenador
else:
print("Nombre invalido :(, ingrese uno con caracteres alfanumericos")
continue
###############################
########## M E N U S ##########
###############################
def menu_entrenador(entrenador, torneo):
# M E N U E N T R E N A D O R
# ENTRENADOR ES EN REALIDAD UNA INSTANCIA
# DE LA DELEGACION CORRESPONDIENTE AL USUARIO
while INICIADOR_LOOP:
print("[0] Fichar deportista")
print("[1] Entrenar deportista")
print("[2] Sanar deportista")
print("[3] Comprar tecnologia")
print("[4] Usar habiliidad especial")
print("[5] Volver al menu anterior")
print("[6] Salir del programa")
comando = ingresar_input(["0","1","2","3","4","5","6"])
if comando == "0":
entrenador.fichar_deportistas(torneo)
elif comando == "1":
entrenador.entrenar_deportistas(torneo)
elif comando == "2":
torneo.entrenador.sanar_lesiones(torneo)
elif comando == "3":
entrenador.comprar_tecnologia(torneo)
elif comando == "4":
entrenador.habilidad_especial(torneo)
elif comando == "5":
return True
else:
return False
##### COMPETENCIA #####
def menu_competencia(torneo):
pass
##### MENU PRINCIPAL #####
def menu_principal(entrenador, torneo):
# Esta variable el proposito de iniciar los while de cada menu.
while INICIADOR_LOOP:
print(torneo.dia_actual)
print(DIAS_COMPETENCIA)
if torneo.dia_actual >= DIAS_COMPETENCIA:
print("\nSE ACABO LA COMPETENCIA! -O-\n")
with open("resultados.txt", "r") as resultados:
for row in resultados:
print(row)
# torneo.mostrar_estado()
if torneo.medallero["IEEEsparta"] > torneo.medallero["DCCrotona"]:
print("\nFelicitaciones a", torneo.entrenador.delegacion, "por su victoria\n")
elif torneo.medallero["IEEEsparta"] < torneo.medallero["DCCrotona"]:
print("\nFelicitaciones a", torneo.rival.delegacion, "por su victoria\n")
else:
print("NO GANO NADIEEEEEEE\n")
return
if torneo.dia_actual % 2 != 0:
imprimir = "y te toca entrenar, aprovecha de hacer mejoras utiles\n"
print("\nEstas en el dia",torneo.dia_actual,imprimir)
print("[0] Menu entrenador")
print("[1] Simular competencias")
print("[2] Mostrar estado competencia")
print("[3] Salir del programa")
# ingresar_input es una funcion que corrobora que el comando ingresado es valido
comando = ingresar_input(["0","1","2","3"])
if comando == "0":
menu = menu_entrenador(entrenador,torneo)
# menu_entrenador retorna False para salir del programa
if menu:
continue
else:
print("Adios")
return
elif comando == "1":
# ACA VA LA COMPETENCIA
torneo.dia_actual += 1
torneo.competencias()
torneo.dia_actual +=1
pass
elif comando == "2":
# estado competencia
torneo.mostrar_estado()
else:
return
##### MENU INICIADOR JUEGO #####
def menu_inicio():
while INICIADOR_LOOP:
print("[0] Iniciar juego\n[1] Salir del programa")
comando = ingresar_input(["0","1"])
if comando == "0":
crear_archivo()
# SE INICIA EL JUEGO
print("Ingrese su nombre:")
nombre_entrenador = ingresar_apodo()
print("Ingrese el nombre de su rival:")
nombre_rival = ingresar_apodo()
print("Escoja una delegacion")
print("[0] IEEEsparta\n[1] DCCrotona")
comando = ingresar_input(["0","1"])
if comando == "0":
# delegaciones retorna un diccionario
datos_esparta = cargar_delegaciones()["IEEEsparta"]
datos_crotona = cargar_delegaciones()["DCCrotona"]
entrenador = IEEEsparta(nombre_entrenador,datos_esparta[0],datos_esparta[1],datos_esparta[2],datos_esparta[3])
rival = DCCrotona(nombre_rival,datos_crotona[0],datos_crotona[1],datos_crotona[2],datos_crotona[3])
torneo = Campeonato(entrenador,rival)
menu_principal(entrenador,torneo)
elif comando == "1":
datos_esparta = cargar_delegaciones()["IEEEsparta"]
datos_crotona = cargar_delegaciones()["DCCrotona"]
rival = IEEEsparta(nombre_rival, datos_esparta[0], datos_esparta[1], datos_esparta[2], datos_esparta[3])
entrenador = DCCrotona(nombre_entrenador, datos_crotona[0], datos_crotona[1], datos_crotona[2], datos_crotona[3])
torneo = Campeonato(entrenador,rival)
menu_principal(entrenador,torneo)
else:
return
| [
"benja.escobar.b@gmail.com"
] | benja.escobar.b@gmail.com |
b71d0ba69122f20eb76eb0293f5134ff216d358c | 50e5d9961505b046edc6b8c210fbc17e954a4394 | /LeetCode/Dynamic Programming/Maximum Subarray.py | 9b86013a3d7768deeaaa743c7a8a4e10dfb0ff70 | [] | no_license | chai1323/Data-Structures-and-Algorithms | e1ba49080dfbc16aec7060064ed98711a602b625 | 97b9600c5b4d71bce6d8d5b1a52c99e8ff4c8d1b | refs/heads/master | 2023-05-26T13:21:09.487453 | 2021-02-11T11:32:10 | 2021-02-11T11:32:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | '''Given an integer array nums, find the contiguous subarray (containing at least one number)
which has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6. '''
class Solution:
def maxSubArray(self, A: List[int]) -> int:
local_max = 0
global_max = -200000000000000000000000000000000000000000
for i in range(len(A)):
local_max = max(A[i],(A[i] + local_max))
if(local_max > global_max):
global_max = local_max
return global_max
| [
"noreply@github.com"
] | chai1323.noreply@github.com |
b6139653ae942b7e144940a2e1f5a870f1debf20 | 701a7bfab7e6e33951c5d418731c008c30c7d0db | /products/migrations/0006_product_media.py | e13190efcd7c10b7237c18abecacf9db7482bab4 | [] | no_license | pjelelhml/bootcamp-django | 4aade18f3e67fa450bcdd3188fab1140afa1684d | f2cb0bf729f16148d1d835a9caa67df96a48cf5e | refs/heads/main | 2023-06-13T08:29:41.318027 | 2021-07-12T20:53:31 | 2021-07-12T20:53:31 | 374,385,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Generated by Django 3.2.4 on 2021-07-09 14:33
from django.db import migrations, models
import products.storages
class Migration(migrations.Migration):
dependencies = [
('products', '0005_product_image'),
]
operations = [
migrations.AddField(
model_name='product',
name='media',
field=models.FileField(blank=True, null=True, storage=products.storages.ProtectedStorage, upload_to='products/'),
),
]
| [
"paulo3385@hotmail.com"
] | paulo3385@hotmail.com |
b74e4582bc25ab70c230b807cd60cc78749dc992 | 34b2402c2058267d9d154b7d150aa61b5324d73c | /proxymodules/http_strip.py | e96a63fe9d0d87f53017ee7c584256476f27ba82 | [
"MIT"
] | permissive | ickerwx/tcpproxy | 0d56241187a3e90d21afb3937c511c88bb389706 | fdf749ef941eca3fc02ddc456dfe36f53b58e22a | refs/heads/master | 2023-08-29T21:07:14.823784 | 2023-07-15T18:20:41 | 2023-07-15T18:20:41 | 32,262,026 | 295 | 97 | MIT | 2023-08-24T18:28:40 | 2015-03-15T13:32:05 | Python | UTF-8 | Python | false | false | 857 | py | #!/usr/bin/env python3
import os.path as path
class Module:
def __init__(self, incoming=False, verbose=False, options=None):
# extract the file name from __file__. __file__ is proxymodules/name.py
self.name = path.splitext(path.basename(__file__))[0]
self.description = 'Remove HTTP header from data'
self.incoming = incoming # incoming means module is on -im chain
def detect_linebreak(self, data):
line = data.split(b'\n', 1)[0]
if line.endswith(b'\r'):
return b'\r\n' * 2
else:
return b'\n' * 2
def execute(self, data):
delimiter = self.detect_linebreak(data)
if delimiter in data:
data = data.split(delimiter, 1)[1]
return data
if __name__ == '__main__':
print('This module is not supposed to be executed alone!')
| [
"mail@renewerner.net"
] | mail@renewerner.net |
a75471f9aa157b4f6ec54a2ee0b120c12d7fd17d | a10d048b1780adc0f8da05d9d1e851a529807279 | /WorkWithFile/main.py | b586d488029a6e6be5711bac9693f2bb6f88816f | [] | no_license | jiroblea/Self-Taught-Programmer-Book | c198fb2640a89d97808d66c98d4b0d21c048e627 | 35c85b2e804e767e1ffd2bdb1c40e65cfcee1e2a | refs/heads/master | 2023-08-18T13:40:03.408870 | 2021-10-24T08:42:50 | 2021-10-24T08:42:50 | 371,001,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | import utilities as util
import csv
name, age = util.biographies()
fields = ["Names", "Age"]
rows = [name, age]
filename = input("File to be called: ")
filename = filename + ".csv"
with open(filename, "w") as csvfile:
filewriter = csv.writer(csvfile, delimiter = ",")
filewriter.writerow(fields)
filewriter.writerow(rows)
print(f"Name: {name} \nAge: {age}")
print(filename)
print("Done")
# # importing the csv module
# import csv
# import os
# # field names
# fields = ['Name', 'Branch', 'Year', 'CGPA']
# # data rows of csv file
# rows = [['Nikhil', 'COE', '2', '9.0'],
# ['Sanchit', 'COE', '2', '9.1'],
# ['Aditya', 'IT', '2', '9.3'],
# ['Sagar', 'SE', '1', '9.5'],
# ['Prateek', 'MCE', '3', '7.8'],
# ['Sahil', 'EP', '2', '9.1']]
# # name of csv file
# filename = "university.csv"
# # writing to csv file
# with open(filename, 'w') as csvfile:
# # creating a csv writer object
# csvwriter = csv.writer(csvfile)
# # writing the fields
# csvwriter.writerow(fields)
# # writing the data rows
# csvwriter.writerows(rows)
| [
"jirooblea@gmail.com"
] | jirooblea@gmail.com |
5cab019870ce8d4033bcf29458e13fbf7f4aed13 | f85e4937fb580d082e83c606a0a58aedbdc140fe | /Pytho chan/Nigma24.py | f8b253bcd37b32ff6c6bb996f4c58779eaff9302 | [] | no_license | matteobaire/pychallenge | 81b0bd8abedbf8f549b6b78f6d2284e1c14e9aba | 9f5d3bf699ee063686978f7ba73734c5b131f036 | refs/heads/master | 2021-01-01T18:41:34.650712 | 2015-09-11T13:04:26 | 2015-09-11T13:04:26 | 42,308,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,172 | py | # Copyright for the following five classes by John Eriksson
# <http://arainyday.se/>, 2006. Originally written for the AStar
# library <http://www.pygame.org/projects/9/195/> and released into the
# public domain. Thanks a lot!
from PIL import Image
class Path:
def __init__(self, nodes, totalCost):
self.nodes = nodes
self.totalCost = totalCost
def getNodes(self):
return self.nodes
def getTotalMoveCost(self):
return self.totalCost
class Node:
def __init__(self, location, mCost, lid, parent=None):
self.location = location
self.mCost = mCost
self.parent = parent
self.score = 0
self.lid = lid
def __eq__(self, n):
if n.lid == self.lid:
return 1
else:
return 0
class AStar:
def __init__(self, maphandler):
self.mh = maphandler
def _getBestOpenNode(self):
bestNode = None
for n in self.on:
if not bestNode:
bestNode = n
elif n.score <= bestNode.score:
bestNode = n
return bestNode
def _tracePath(self, n):
nodes = []
totalCost = n.mCost
p = n.parent
nodes.insert(0, n)
while True:
if p.parent is None:
break
nodes.insert(0, p)
p = p.parent
return Path(nodes, totalCost)
def _handleNode(self, node, end):
i = self.o.index(node.lid)
self.on.pop(i)
self.o.pop(i)
self.c.append(node.lid)
nodes = self.mh.getAdjacentNodes(node, end)
for n in nodes:
if n.location == end:
return n
elif n.lid in self.c:
continue
elif n.lid in self.o:
i = self.o.index(n.lid)
on = self.on[i]
if n.mCost < on.mCost:
self.on.pop(i)
self.o.pop(i)
self.on.append(n)
self.o.append(n.lid)
else:
self.on.append(n)
self.o.append(n.lid)
return None
def findPath(self, fromlocation, tolocation):
self.o = []
self.on = []
self.c = []
end = tolocation
fnode = self.mh.getNode(fromlocation)
self.on.append(fnode)
self.o.append(fnode.lid)
nextNode = fnode
while nextNode is not None:
finish = self._handleNode(nextNode, end)
if finish:
return self._tracePath(finish)
nextNode = self._getBestOpenNode()
return None
class SQ_Location:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, l):
if l.x == self.x and l.y == self.y:
return 1
else:
return 0
class SQ_MapHandler:
def __init__(self, mapdata, width, height):
self.m = mapdata
self.w = width
self.h = height
def getNode(self, location):
x = location.x
y = location.y
if x < 0 or x >= self.w or y < 0 or y >= self.h:
return None
d = self.m[(y * self.w) + x]
if d == -1:
return None
return Node(location, d, ((y * self.w) + x))
def getAdjacentNodes(self, curnode, dest):
result = []
cl = curnode.location
dl = dest
n = self._handleNode(cl.x + 1, cl.y, curnode, dl.x, dl.y)
if n:
result.append(n)
n = self._handleNode(cl.x - 1, cl.y, curnode, dl.x, dl.y)
if n:
result.append(n)
n = self._handleNode(cl.x, cl.y + 1, curnode, dl.x, dl.y)
if n:
result.append(n)
n = self._handleNode(cl.x, cl.y - 1, curnode, dl.x, dl.y)
if n:
result.append(n)
return result
def _handleNode(self, x, y, fromnode, destx, desty):
n = self.getNode(SQ_Location(x, y))
if n is not None:
dx = max(x, destx) - min(x, destx)
dy = max(y, desty) - min(y, desty)
emCost = dx + dy
n.mCost += fromnode.mCost
n.score = n.mCost + emCost
n.parent = fromnode
return n
return None
def main():
img = Image.open("maze.png")
maze = img.load()
mapdata = []
# Translate pixel data into something that AStar understands.
for elt in img.getdata():
if elt == (255, 255, 255, 255):
mapdata.append(-1)
else:
mapdata.append(1)
# Define start and destination points.
mapdata[639] = 5
mapdata[410241] = 6
astar = AStar(SQ_MapHandler(mapdata, 641, 641))
start = SQ_Location(639, 0)
end = SQ_Location(1, 640)
p = astar.findPath(start, end)
data = []
# Extract data from "logs".
for node in p.nodes:
if node.location.x % 2 and node.location.y % 2:
data.append(chr(maze[node.location.x, node.location.y][0]))
h = open("unzip-me.zip", "wb")
h.write("".join(data))
h.close()
if __name__ == "__main__":
main() | [
"mbdigital@virgilio.it"
] | mbdigital@virgilio.it |
818fb09f8f5de94bdddf44acd471f366bfd04c70 | eb463217f001a8ff63243208dc2bb7e355793548 | /src/richie/plugins/section/migrations/0002_add_template_field.py | d3c83581c472881596481d657675e6d9f2744e84 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | phuoclhb/richie | 25020254b635c41648d65a30b3f2405007bd8a39 | 328167d02f9596c8b1d428655f0de1bed7fb277d | refs/heads/master | 2020-08-13T07:14:22.006472 | 2019-10-11T15:31:02 | 2019-10-11T15:58:48 | 214,930,515 | 1 | 0 | MIT | 2019-10-14T02:27:14 | 2019-10-14T02:27:13 | null | UTF-8 | Python | false | false | 631 | py | # Generated by Django 2.1.7 on 2019-02-22 01:57
from django.db import migrations, models
from ..defaults import SECTION_TEMPLATES
class Migration(migrations.Migration):
dependencies = [("section", "0001_initial")]
operations = [
migrations.AddField(
model_name="section",
name="template",
field=models.CharField(
choices=SECTION_TEMPLATES,
default=SECTION_TEMPLATES[0][0],
help_text="Optional template for custom look.",
max_length=150,
verbose_name="Template",
),
)
]
| [
"sveetch@gmail.com"
] | sveetch@gmail.com |
e6dc56afa4414aaf765a2ecc588488d2ab650a35 | a86552e1da790c6a24ab0009d84be270e303c1a8 | /highlighting numbers, if, for, while/Queen's move.py | 8e1bc399649ca35fcebc715fa7c444ed0a57e1b0 | [] | no_license | dimasiklrnd/python | 4c9da1c0e7fa834bcf9083c475654e2b4d6ef0eb | 1c0a84ab8242b577c92c1ec8b83ad80216fdc972 | refs/heads/master | 2020-06-29T08:22:26.845678 | 2020-01-09T19:59:23 | 2020-01-09T19:59:23 | 200,410,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | '''Шахматный ферзь может ходить на любое число клеток по горизонтали, по вертикали или по диагонали. Даны две различные клетки шахматной доски, определите, может ли ферзь попасть с первой клетки на вторую одним ходом. Для простоты можно не рассматривать случай, когда данные клетки совпадают.
Формат входных данных
Программа получает на вход четыре числа от 1 до 8 каждое, задающие номер столбца и номер строки сначала для первой клетки, потом для второй клетки.
Формат выходных данных
Программа должна вывести YES, если из первой клетки ходом ферзя можно попасть во вторую. В противном случае - NO'''
x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
if abs(x1 - x2) == abs(y1 - y2) or x1 == x2 or y1 == y2:
print('YES')
else:
print('NO')
| [
"dimasiklrnd@gmail.com"
] | dimasiklrnd@gmail.com |
2302a6695ea93d585a3bdfd4cc25f1fe488046cf | 1caeb7c3f73562176cdd33d983fc6af40e890c71 | /服务器与计算机视觉模块/View/view.py | 02932cc7fb5640708d6d8cfe863cec6d9b3ece5d | [] | no_license | Yimyl/Care | abe6c302b1a669f9f79c13f9a061bee10699acb4 | b0c72c6536b97a74284ac98e704bd9f0f9c4c57f | refs/heads/master | 2020-06-18T13:04:09.625187 | 2019-07-10T08:06:04 | 2019-07-10T08:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,629 | py | from flask_cors import CORS
from flask_login import LoginManager, login_user, login_required
import json
import datetime
import os
from flask_sqlalchemy import SQLAlchemy
import pymysql
import socket
import cv2
import threading
import struct
import numpy
import _thread
from Util.JudgeInteract import faceRegniZation
from flask import Flask, render_template, Response, request
from Util import send, receive
from Vision.Face import CollectFaces
from model import Sys_user, Volunteer_info, Employee_info, Oldperson_info, Event_info
pymysql.install_as_MySQLdb()
app = Flask(__name__)
CORS(app, supports_credentials=True)
login_manager = LoginManager()
login_manager.init_app(app)
app.config['JSON_AS_ASCII'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:123456@localhost/first_flask"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['SECRET_KEY'] = os.urandom(24)
app.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=7)
db = SQLAlchemy(app)
status = 0
sz = (640, 480)
fps = 5
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vout = cv2.VideoWriter()
count = 0
def constructPath():
global count
count += 1
return 'C:/Users/Administrator/Desktop/Cares/Vision/output' + str(count) + '.avi'
@app.route('/login', methods=['POST', 'GET'])
def login():
msg = None
if request.method == 'POST' or request.method == 'GET':
temp = json.dumps(request.get_json())
data = json.loads(temp)
user = Sys_user.query.filter(Sys_user.identify == data['id']).first()
if user and user.check_passwd(data['password']) :
#login_user(user)
msg = {"valid": "done"}
else:
msg = {"valid": "error"}
return json.dumps(msg)
@app.route('/register', methods=['POST', 'GET'])
def register():
msg = None
if request.method == 'POST' or request.method == 'GET':
temp = json.dumps(request.get_json())
data = json.loads(temp)
user = Sys_user(data['id'], data['password'], data['real_name'], data['gender'], data['telephone'])
if Sys_user.query.filter(Sys_user.identify == data['id']).first():
msg = {"valid":"error"}
else:
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/modify', methods=['POST', 'GET'])
def modify():
msg = None
if request.method == 'POST' or request.method == 'GET':
temp = json.dumps(request.get_json())
data = json.loads(temp)
#data = json.loads(data)
user = Sys_user.query.filter(Sys_user.identify == data['name']).first()
if user and user.check_passwd(data['password']) == False:
user.mod(data['name'], data['password'])
msg = {"valid": "done"}
else:
msg = {"valid": "error"}
return json.dumps(msg)
@login_required
@app.route('/oldperson_required', methods=['POST', 'GET'])
def oldperson_required():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.oldperson_info")
db.session.commit()
data = list(data)
payload = []
content = {}
for result in data:
content = {'id': result[0], 'name': result[1].encode("unicode_escape").decode("unicode_escape"), 'gender': result[2], 'tel': result[3], 'pic_src': result[4],
'checkin_date': result[5].strftime("%Y-%m-%d"),'checkout_date': None, 'first_guardian_name': result[7],
'first_guardian_relation': result[8],'first_guardian_tel':result[9]}
payload.append(content)
content = {}
return json.dumps(payload)
@login_required
@app.route('/oldperson', methods=['POST', 'GET'])
def oldperson():
if request.method == 'POST' or request.method == 'GET':
db.session.execute("DELETE FROM first_flask.oldperson_info")
db.session.commit()
temp = json.dumps(request.get_json())
data = json.loads(temp)
data = json.loads(data)
for item in data:
user = Oldperson_info(int(item['id']), item['name'],
item['gender'], int(item['tel']), None, datetime.datetime.now(),None,
item['first_guardian_name'],None, int(item['first_guardian_tel']))
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/volunteer_required', methods=['POST', 'GET'])
def volunteer_required():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.volunteer_info")
db.session.commit()
data = list(data)
payload = []
content = {}
for result in data:
content = {'id': result[0], 'name': result[1].encode("unicode_escape").decode("unicode_escape"), 'gender': result[2], 'tel': result[3], 'pic_src': result[4],
'checkin_date': result[5].strftime("%Y-%m-%d"),'checkout_date': None}
payload.append(content)
content = {}
return json.dumps(payload)
@login_required
@app.route('/volunteer', methods=['POST', 'GET'])
def volunteer():
if request.method == 'POST' or request.method == 'GET':
db.session.execute("DELETE FROM first_flask.volunteer_info")
db.session.commit()
temp = json.dumps(request.get_json())
data = json.loads(temp)
data = json.loads(data)
for item in data:
user = Volunteer_info(int(item['id']), item['name'],
item['gender'], int(item['tel']), None,datetime.datetime.now(),
None)
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/employee_required', methods=['POST', 'GET'])
def employee_required():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.employee_info")
db.session.commit()
data = list(data)
payload = []
content = {}
for result in data:
content = {'id': result[0], 'name': result[1].encode("unicode_escape").decode("unicode_escape"), 'gender': result[2], 'tel': result[3], 'pic_src': result[4],
'hire_date': result[5].strftime("%Y-%m-%d"),'resign_date':None}
payload.append(content)
content = {}
return json.dumps(payload)
@login_required
@app.route('/employee', methods=['POST', 'GET'])
def employee():
if request.method == 'POST' or request.method == 'GET':
db.session.execute("DELETE FROM first_flask.employee_info")
db.session.commit()
temp = json.dumps(request.get_json())
data = json.loads(temp)
data = json.loads(data)
for item in data:
user = Oldperson_info(int(item['id']), item['name'],
item['gender'], int(item['tel']), None,datetime.datetime.now(),
None)
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/smile', methods=['POST', 'GET'])
def smile():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 0")
db.session.commit()
smile_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
smile_time[str(day+2)]+=1
return json.dumps(smile_time)
@login_required
@app.route('/invade', methods=['POST', 'GET'])
def invaded():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 2")
db.session.commit()
invaded_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
invaded_time[str(day+2)]+=1
print(json.dumps(invaded_time))
return json.dumps(invaded_time)
@login_required
@app.route('/interact', methods=['POST', 'GET'])
def interact():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 1")
db.session.commit()
interact_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
interact_time[str(day+2)]+=1
return json.dumps(interact_time)
@login_required
@app.route('/fall', methods=['POST', 'GET'])
def fall():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 3")
db.session.commit()
fall_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
fall_time[str(day)]+=1
return json.dumps(fall_time)
@login_required
@app.route('/forbidden', methods=['POST', 'GET'])
def forbiddien():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 4")
db.session.commit()
forbiddien_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
forbiddien_time[str(day+2)]+=1
return json.dumps(forbiddien_time)
@app.route('/user/<userid>') # 主页
def user(userid):
CollectFaces.collect(userid)
return "ok"
@app.route('/send/<userid>') # 主页
def sendmessage(userid):
if send.send(userid):
# receive.receive(userid)
send.socket_client('../images/'+userid+'.zip')
return "ok"
return "no"
class Camera_Connect_Object:
temp = None
def __init__(self,D_addr_port=["",8880]):
self.resolution=[640,480]
self.addr_port=D_addr_port
self.src=888+15 #双方确定传输帧数,(888)为校验值
self.interval=0 #图片播放时间间隔
self.img_fps=5 #每秒传输多少帧数
def Set_socket(self):
self.client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.client.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
def Socket_Connect(self):
self.Set_socket()
self.client.connect(self.addr_port)
print("IP is %s:%d" % (self.addr_port[0],self.addr_port[1]))
def shut_down(self):
self.client.close()
def RT_Image(self):
#按照格式打包发送帧数和分辨率
vout.open(constructPath(), fourcc, fps, sz, True)
self.name=self.addr_port[0]+" Camera"
self.client.send(struct.pack("lhh", self.src, self.resolution[0], self.resolution[1]))
while(True):
info=struct.unpack("lhh",self.client.recv(8))
buf_size=info[0] #获取读的图片总长度
if buf_size:
try:
self.buf=b"" #代表bytes类型
temp_buf=self.buf
while(buf_size): #读取每一张图片的长度
temp_buf=self.client.recv(buf_size)
buf_size-=len(temp_buf)
self.buf+=temp_buf #获取图片
data = numpy.fromstring(self.buf, dtype='uint8') #按uint8转换为图像矩阵
self.image = cv2.imdecode(data, 1) #图像解码
vout.write(self.image)
Camera_Connect_Object.temp = self.image
_thread.start_new_thread(faceRegniZation, ( self.image, ))
cv2.imshow("Face Recongnition", self.image)
# cv2.imshow(self.name, self.image) #展示图片
except:
pass
finally:
if(cv2.waitKey(10)==27): #每10ms刷新一次图片,按‘ESC’(27)退出
self.client.close()
cv2.destroyAllWindows()
break
def get_frame(self):
# info = struct.unpack("lhh", self.client.recv(8))
# buf_size = info[0]
# temp_buf = self.client.recv(buf_size)
# buf_size -= len(temp_buf)
# self.buf += temp_buf # 获取图片
# data = numpy.fromstring(self.buf, dtype='uint8') # 按uint8转换为图像矩阵
# self.image = cv2.imdecode(data, 1) # 图像解码
# ret, jpeg = cv2.imencode('.jpg', self.image)
ret, jpeg = cv2.imencode('.jpg', Camera_Connect_Object.temp)
return jpeg.tobytes()
def Get_Data(self,interval):
showThread=threading.Thread(target=self.RT_Image)
showThread.start()
@app.route('/') # 主页
def index():
# jinja2模板,具体格式保存在index.html文件中
# return render_template('index.html')
return "hello world"
def gen(camera):
while True:
frame = camera.get_frame()
# 使用generator函数输出视频流, 每次请求输出的content类型是image/jpeg
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
camera = Camera_Connect_Object()
@app.route('/video_feed') # 这个地址返回视频流响应
def video_feed():
global status
if (status == 1):
camera.Socket_Connect()
camera.Get_Data(camera.interval)
status = status - 1
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/release')
def release():
global status
status = status + 1
vout.release()
camera.shut_down()
return "ok"
if __name__ == '__main__':
app.debug = True
camera.addr_port[0] = "192.168.10.104"
camera.addr_port = tuple(camera.addr_port)
camera.Socket_Connect()
camera.Get_Data(camera.interval)
app.run(host='0.0.0.0', debug=True, port=5000)
| [
"tiamo39@vip.aa.com"
] | tiamo39@vip.aa.com |
60b20aa9d3b0bab7d87c6610f60a4be9b8b08c52 | b25fe0d0e401ef3a0ba52751326a427d575ce2bc | /GALAXY_wrapper/run_enrichment.py | 2a793bddc019621885382c8f2c38b1997ab326df | [
"LicenseRef-scancode-biopython"
] | permissive | szymczakpau/biopython | 9fd8385396073d43f4758c60f45aace9aa032e17 | 6f997cd1ea7daf89f0b70854401da4cde35d6a00 | refs/heads/master | 2021-01-26T04:40:40.853772 | 2020-02-26T16:36:28 | 2020-02-26T16:36:28 | 243,308,366 | 0 | 0 | NOASSERTION | 2020-02-26T16:20:02 | 2020-02-26T16:20:00 | null | UTF-8 | Python | false | false | 5,250 | py | #!/usr/bin/python
import sys
import os
import math
import argparse
import Bio.Ontology
import Bio.Ontology.IO as OntoIO
def read_list(filename):
out = []
with open(filename, 'r') as file_in:
line = file_in.readline() #header
if not (line[0] == '!' or line[0] == '#'): file_in.seek(0)
for line in file_in:
content = line.strip().split('\t')
if len(content) <= 1:
if content[0] != "":
if len(content[0].split('_')) < 2:
out.append( content[0])
else:
out.append( "_".join(content[0].split('_')[1:-1]))
elif content[1] == '1':
if len(content[0].split('_')) < 2:
out.append( content[0])
else:
out.append( "_".join(content[0].split('_')[1:-1]))
elif content[1] != '0':
raise Exception("Invalid values in list of genes: second column includes %s instead of 0 or 1"%content[1])
return out
def run_term(assocs, go_graph, gene_list, corrections):
from Bio.Ontology import TermForTermEnrichmentFinder
ef = TermForTermEnrichmentFinder(assocs, go_graph)
result = ef.find_enrichment(gene_list, corrections)
return result
def run_parent_child(assocs, go_graph, gene_list, corrections, method):
from Bio.Ontology import ParentChildEnrichmentFinder
ef = ParentChildEnrichmentFinder(assocs, go_graph)
result = ef.find_enrichment(gene_list, corrections, method)
return result
def check_file(parser, arg, openparam):
if openparam == 'r':
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
try:
f=open(arg, openparam)
f.close()
except:
parser.error("Cannot create file %s" % arg)
def main():
main_parser = argparse.ArgumentParser(description='run Gene Ontology')
subparsers = main_parser.add_subparsers(dest='which', help='type of enrichment analysis')
subparsers.required = True
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('required named arguments')
required.add_argument('-o', '--out', type=str, required=True, nargs = '+',
help='output file')
required.add_argument('-i', '--inp', type=str, required=True,
help='input gene list file')
required.add_argument('-a', '--assoc', type=str, required=True,
help='input associations file (.gaf)')
required.add_argument('-g', '--gograph', type=str, required=True,
help='input GO graph file (.obo)')
parser.add_argument('-f', '--outputformat', choices=["html","txt", "gml", "png", "tabular"], nargs = "+",
help='output file format', default = ["html"])
parser.add_argument('-c', '--corrections', choices=["bonferroni","bh_fdr", "bonferroni,bh_fdr", "bh_fdr,bonferroni"],
help='multiple hypothesis testing corrections', nargs='+', default=[])
parser1 = subparsers.add_parser("term-for-term", parents=[parser])
parser2 = subparsers.add_parser("parent-child", parents=[parser])
#Parent-child
parser2.add_argument('-m', '--method', choices=["union", "intersection"],
help='method used to compute probabilities', default = "union")
#validate args
if len(sys.argv) < 2:
main_parser.print_usage()
sys.exit(1)
args = main_parser.parse_args()
if len(args.out) != len(args.outputformat):
main_parser.error("Number of output files doesn't match numer of formats!")
check_file(main_parser, args.inp, 'r')
check_file(main_parser, args.assoc, 'r')
check_file(main_parser, args.gograph, 'r')
for f in args.out:
check_file(main_parser, f, 'w+')
cors = []
for cor in args.corrections:
if "," in cor:
cors += cor.split(",")
else:
cors.append(cor)
args.corrections = list(set(cors))
#Read inputs
gene_list = read_list(args.inp)
go_graph = OntoIO.read(args.gograph, "obo")
assocs = OntoIO.read(args.assoc, "gaf", assoc_format = "in_mem_sql")
result=None
if args.which == "term-for-term":
result = run_term(assocs, go_graph, gene_list, args.corrections)
elif args.which == "parent-child":
result = run_parent_child(assocs, go_graph, gene_list, args.corrections, args.method)
else:
parser.error("Method unimplemented!")
assert result!= None, "An error occured while computing result"
print result
for outfilename, outputformat in zip(args.out, args.outputformat):
with open(outfilename, 'w+') as outfile:
if outputformat == 'html':
OntoIO.pretty_print(result, go_graph, outfile, outputformat, go_to_url="http://amigo.geneontology.org/amigo/term/")
else:
OntoIO.pretty_print(result, go_graph, outfile, outputformat)
if __name__ == "__main__":
main()
| [
"julia.hermanizycka@gmail.com"
] | julia.hermanizycka@gmail.com |
270c15670e030d0104c5c652e4fe7cb418d3d976 | d659810b24ebc6ae29a4d7fbb3b82294c860633a | /aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/GetOverviewRequest.py | 45fcc3ddbf67d6dc972d5537367c6ddd7257cc6e | [
"Apache-2.0"
] | permissive | leafcoder/aliyun-openapi-python-sdk | 3dd874e620715173b6ccf7c34646d5cb8268da45 | 26b441ab37a5cda804de475fd5284bab699443f1 | refs/heads/master | 2023-07-31T23:22:35.642837 | 2021-09-17T07:49:51 | 2021-09-17T07:49:51 | 407,727,896 | 0 | 0 | NOASSERTION | 2021-09-18T01:56:10 | 2021-09-18T01:56:09 | null | UTF-8 | Python | false | false | 1,556 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class GetOverviewRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'GetOverview','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
85dfa9657bf5f1207e0b7cd837ff3661aa12b093 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/2. Add Two Numbers/solution1.py | 78df80bb3d9a1d82a8d444589710b5f138669603 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
dummy_head = curr_head = ListNode()
p, q = l1, l2
carry = 0
while p or q:
if p:
carry += p.val
p = p.next
if q:
carry += q.val
q = q.next
curr_head.next = ListNode(carry % 10)
curr_head = curr_head.next
carry //= 10
if carry > 0:
curr_head.next = ListNode(carry)
return dummy_head.next
| [
"info@crazysquirrel.ru"
] | info@crazysquirrel.ru |
ee0d33f5290bac527b87dc1b699c7442a39292b2 | 1214d7d393f6a8edb64e6136dc0b253e0597f4a6 | /bokeh/bokeh_label.py | e8da41d3f378ca7a65a516126efaf282ff741026 | [] | no_license | stockdata123/upgraded-journey | 047319acba7d0e822c581c40ad8b51f34cfe35f1 | 76b97c7944352170d51003be39efdd7683c4bb99 | refs/heads/master | 2023-06-28T01:06:59.493345 | 2021-08-02T01:23:02 | 2021-08-02T01:23:02 | 391,776,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label
output_file("label.html", title="label.py example")
source = ColumnDataSource(data=dict(height=[66, 71, 72, 68, 58, 62],
weight=[165, 189, 220, 141, 260, 174],
names=['Mark', 'Amir', 'Matt', 'Greg',
'Owen', 'Juan']))
p = figure(title='Dist. of 10th Grade Students at Lee High',
x_range=Range1d(140, 275))
p.scatter(x='weight', y='height', size=8, source=source)
#p.xaxis[0].axis_label = 'Weight (lbs)'
#p.yaxis[0].axis_label = 'Height (in)'
labels = LabelSet(x='weight', y='height', text='names', source=source)
#labels = LabelSet(x='weight', y='height', text='names', level='glyph',
# x_offset=5, y_offset=5, source=source, render_mode='canvas')
'''
citation = Label(x=70, y=70, x_units='screen', y_units='screen',
text='Collected by Luke C. 2016-04-01', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
'''
p.add_layout(labels)
#p.add_layout(citation)
show(p) | [
"ssophiemarceau@gmail.com"
] | ssophiemarceau@gmail.com |
2f4d3ed5aeb0dd33c30eeb4314e8635d304e8da0 | 307d3dea0a67bc779b65839fad410536d8ee636e | /data/formatted_data.py | ebc47895870b77670cc4a9e2c2c02876a690fafd | [] | no_license | PatelMohneesh/Network-Visualization-with-SigmaJS | c179a7cd321457d60be6b62fd9de3d810c6bb47e | 3b62025903837454d14093b7a4be76179b522ccf | refs/heads/master | 2021-09-24T10:50:48.065943 | 2018-10-08T16:08:32 | 2018-10-08T16:08:32 | 100,344,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 14:46:24 2017
@author: Mohneesh
"""
import json
import os
#Considering the same format of the data as provided by "Benjie", network_temp.json
filename = 'network_temp.json'
with open(filename, 'r') as f:
data = json.load(f)
# data = original data we revieve from the Rest API's
#print (data)
edge_data = data['edges']
node_data = data['nodes']
#Modifying Nodes Data
def ModNodesData(data):
for mydict in data:
mydict['type'] = mydict['label'] #Grapg_db 'lable' = 'type' for UI
mydict['label'] = mydict['id']
return mydict
#NO Need to Modify Edge Data as it's in the required format form Graphdb Rest API's
#Extracting Nodes out of Edges from Edge File
Mod_Node = ModNodesData(node_data)
print(edge_data)
node_list = []
for mydict in edge_data:
print (mydict)
#nodes = {}
#for i in mydict:
#nodes['id'] = i['source']
#nodes['label'] = i['source']
#print (nodes)
| [
"mp3542@columbia.edu"
] | mp3542@columbia.edu |
281890c279ba18dc825d3b09570b691ceeaf4bc2 | 73b793758d0db27e4d67e6effbda40f5b550a9f4 | /clientes/comandos.py | a8025e49c2a97a46d2dbc01c81ced6a1bec07659 | [] | no_license | RubenMaier/python_crud_ventas | a696fb52ec4a2ac37e5983c211fc8ff9bc4fee00 | c95af4646bb8173c41e79118a6f53919308a0898 | refs/heads/master | 2020-08-12T06:38:02.986974 | 2019-12-29T13:39:30 | 2019-12-29T13:39:30 | 214,707,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,536 | py | import click
# modificamos los comandos dentro del grupo clientes
# definimos nuestros comandos básicos
from clientes.servicios import ServiciosClientes
from clientes.modelo import Cliente
@click.group() # con esto los convertimos en comandos de click
def clientes():
"""Administrador de ciclo de vida de clientes"""
pass
@clientes.command()
@click.option(
'-n', # abreviación
'--nombre', # nombre completo del comando
type=str, # tipo de dato de entrada
prompt=True, # si no viene el nombre incluido, la consola se lo pide
help='El nombre del cliente') # mensaje de ayuda
@click.option(
'-e',
'--empresa',
type=str,
prompt=True,
help='La empresa del cliente')
@click.option(
'-em',
'--email',
type=str,
prompt=True,
help='El email del cliente')
@click.option(
'-r',
'--rol',
type=str,
prompt=True,
help='El rol del cliente')
@click.pass_context
def crear(contexto, nombre, empresa, email, rol):
""" Crea un nuevo cliente """
cliente = Cliente(nombre, empresa, email, rol)
servicios_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
servicios_cliente.crear_cliente(cliente)
@clientes.command()
@click.pass_context
def listar(contexto):
"""Lista todo los clientes"""
servicios_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
lista_clientes = servicios_cliente.listar_clientes()
# para imprimir algo en la consola hacemos uso de click.echo y no de print porque la forma en que funciona...
# la libreria click en los distintos SO varia, y asi aseguramos mostrar todo bajo un mismo formato
click.echo(' ID | NOMBRE | EMPRESA | EMAIL | ROL')
click.echo('*' * 100)
for cliente in lista_clientes:
click.echo('{uid} | {nombre} | {empresa} | {email} | {rol}'.format(
uid=cliente['uid'],
nombre=cliente['nombre'],
empresa=cliente['empresa'],
email=cliente['email'],
rol=cliente['rol']))
@clientes.command()
@click.argument(
'cliente_id',
type=str)
@click.pass_context
def actualizar(contexto, cliente_id):
"""Actualiza el cliente"""
servicio_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
cliente = _buscar_cliente_por_id(servicio_cliente, cliente_id)
if cliente != None: # si la lista no es vacía entonces...
# creamos un flujo de actualización
# desempaqueto al primer elemento de la lista que es el cliente que quiero actualizar
# debo instanciar al cliente en su clase Clientes por lo que le pasamos la referencia como: **cliente[0]
cliente_actualizado = _flujo_de_cliente_actualizado(
_diccionario_a_objeto(cliente))
servicio_cliente.actualizar_cliente(cliente_actualizado)
click.echo('El cliente fue actualizado')
else:
click.echo('El cliente no fue encontrado')
@clientes.command()
@click.argument(
'cliente_id',
type=str)
@click.pass_context
def eliminar(contexto, cliente_id):
"""Elimina el cliente"""
servicio_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
cliente = _buscar_cliente_por_id(servicio_cliente, cliente_id)
print(cliente)
if cliente != None: # si la lista no es vacía entonces...
servicio_cliente.borrar_cliente(_diccionario_a_objeto(cliente))
click.echo('El cliente fue eliminado')
else:
click.echo('El cliente no fue encontrado')
def _buscar_cliente_por_id(servicio_cliente, cliente_id):
lista_clientes = servicio_cliente.listar_clientes()
# queremos al cliente de todos los clientes que se encuentren en la lista de clientes...
# que cumpla con la condición de que su id es la que nos pasaron por parametro
cliente = [
cliente for cliente in lista_clientes if cliente['uid'] == cliente_id]
if len(cliente) > 0:
return cliente[0]
return None
def _diccionario_a_objeto(cliente_dic):
return Cliente(**cliente_dic)
def _flujo_de_cliente_actualizado(cliente):
click.echo('Deja vacío si no quiere modificar el valor')
cliente.nombre = click.prompt(
'Nuevo nombre', type=str, default=cliente.nombre)
cliente.empresa = click.prompt(
'Nuevo empresa', type=str, default=cliente.empresa)
cliente.email = click.prompt(
'Nuevo email', type=str, default=cliente.email)
cliente.rol = click.prompt(
'Nuevo rol', type=str, default=cliente.rol)
return cliente
comandos_declarados = clientes
| [
"ruben@MacBook-Pro-2017-15-Inch.local"
] | ruben@MacBook-Pro-2017-15-Inch.local |
74b4ed23694523deb7002963f183afb60094dad0 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /decoding/GAD/fairseq/modules/scalar_bias.py | c96247c75914fabb8a2b7ff731bb82b588f72690 | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 888 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class ScalarBias(torch.autograd.Function):
"""
Adds a vector of scalars, used in self-attention mechanism to allow
the model to optionally attend to this vector instead of the past
"""
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, size[dim] - 1).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
| [
"tage@microsoft.com"
] | tage@microsoft.com |
44f6e5b18d17003a26d61b4f72c2690406caa75e | c9307021a54fb97eb7e1a76b4aae23d9f5206c71 | /app/external/playhouse/postgres_ext.py | ce50ca9f9b6ab78f67c2aed472b3a0d3b488b098 | [] | no_license | fitzterra/sshKeyServer | 7b331ca4fc50f66371b36d86d6d2ba64ec991784 | 6146fe9f5b945288cd186a09d277bfe13171507a | refs/heads/master | 2021-01-19T14:06:27.539059 | 2017-06-08T12:36:29 | 2017-06-08T12:36:29 | 19,774,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,455 | py | """
Collection of postgres-specific extensions, currently including:
* Support for hstore, a key/value type storage
* Support for UUID field
"""
import uuid
from peewee import *
from peewee import Expression
from peewee import logger
from peewee import Node
from peewee import Param
from peewee import QueryCompiler
from peewee import SelectQuery
from psycopg2 import extensions
from psycopg2.extensions import adapt
from psycopg2.extensions import AsIs
from psycopg2.extensions import register_adapter
from psycopg2.extras import register_hstore
try:
from psycopg2.extras import Json
except:
Json = None
class _LookupNode(Node):
def __init__(self, node, parts):
self.node = node
self.parts = parts
super(_LookupNode, self).__init__()
def clone_base(self):
return type(self)(self.node, list(self.parts))
class JsonLookup(_LookupNode):
def __getitem__(self, value):
return JsonLookup(self.node, self.parts + [value])
class ObjectSlice(_LookupNode):
@classmethod
def create(cls, node, value):
if isinstance(value, slice):
parts = [value.start or 0, value.stop or 0]
elif isinstance(value, int):
parts = [value]
else:
parts = map(int, value.split(':'))
return cls(node, parts)
def __getitem__(self, value):
return ObjectSlice.create(self, value)
class _Array(Node):
def __init__(self, field, items):
self.field = field
self.items = items
super(_Array, self).__init__()
def adapt_array(arr):
conn = arr.field.model_class._meta.database.get_conn()
items = adapt(arr.items)
items.prepare(conn)
return AsIs('%s::%s%s' % (
items,
arr.field.get_column_type(),
'[]'* arr.field.dimensions))
register_adapter(_Array, adapt_array)
class IndexedField(Field):
def __init__(self, index_type='GiST', *args, **kwargs):
kwargs.setdefault('index', True) # By default, use an index.
super(IndexedField, self).__init__(*args, **kwargs)
self.index_type = index_type
class ArrayField(IndexedField):
def __init__(self, field_class=IntegerField, dimensions=1,
index_type='GIN', *args, **kwargs):
self.__field = field_class(*args, **kwargs)
self.dimensions = dimensions
self.db_field = self.__field.get_db_field()
super(ArrayField, self).__init__(
index_type=index_type, *args, **kwargs)
def __ddl_column__(self, column_type):
sql = self.__field.__ddl_column__(column_type)
sql.value += '[]' * self.dimensions
return sql
def __getitem__(self, value):
return ObjectSlice.create(self, value)
def contains(self, *items):
return Expression(self, OP_ACONTAINS, _Array(self, list(items)))
def contains_any(self, *items):
return Expression(self, OP_ACONTAINS_ANY, _Array(self, list(items)))
class DateTimeTZField(DateTimeField):
db_field = 'datetime_tz'
class HStoreField(IndexedField):
db_field = 'hash'
def __init__(self, *args, **kwargs):
super(HStoreField, self).__init__(*args, **kwargs)
def __getitem__(self, key):
return Expression(self, OP_HKEY, Param(key))
def keys(self):
return fn.akeys(self)
def values(self):
return fn.avals(self)
def items(self):
return fn.hstore_to_matrix(self)
def slice(self, *args):
return fn.slice(self, Param(list(args)))
def exists(self, key):
return fn.exist(self, key)
def defined(self, key):
return fn.defined(self, key)
def update(self, **data):
return Expression(self, OP_HUPDATE, data)
def delete(self, *keys):
return fn.delete(self, Param(list(keys)))
def contains(self, value):
if isinstance(value, dict):
return Expression(self, OP_HCONTAINS_DICT, Param(value))
elif isinstance(value, (list, tuple)):
return Expression(self, OP_HCONTAINS_KEYS, Param(value))
return Expression(self, OP_HCONTAINS_KEY, value)
def contains_any(self, *keys):
return Expression(self, OP_HCONTAINS_ANY_KEY, Param(value))
class JSONField(Field):
db_field = 'json'
def __init__(self, *args, **kwargs):
if Json is None:
raise Exception('Your version of psycopg2 does not support JSON.')
super(JSONField, self).__init__(*args, **kwargs)
def db_value(self, value):
return Json(value)
def __getitem__(self, value):
return JsonLookup(self, [value])
class UUIDField(Field):
db_field = 'uuid'
def db_value(self, value):
return str(value)
def python_value(self, value):
return uuid.UUID(value)
OP_HKEY = 'key'
OP_HUPDATE = 'H@>'
OP_HCONTAINS_DICT = 'H?&'
OP_HCONTAINS_KEYS = 'H?'
OP_HCONTAINS_KEY = 'H?|'
OP_HCONTAINS_ANY_KEY = 'H||'
OP_ACONTAINS = 'A@>'
OP_ACONTAINS_ANY = 'A||'
class PostgresqlExtCompiler(QueryCompiler):
def _create_index(self, model_class, fields, unique=False):
clause = super(PostgresqlExtCompiler, self)._create_index(
model_class, fields, unique)
# Allow fields to specify a type of index. HStore and Array fields
# may want to use GiST indexes, for example.
index_type = None
for field in fields:
if isinstance(field, IndexedField):
index_type = field.index_type
if index_type:
clause.nodes.insert(-1, SQL('USING %s' % index_type))
return clause
def _parse(self, node, alias_map, conv):
sql, params, unknown = super(PostgresqlExtCompiler, self)._parse(
node, alias_map, conv)
if unknown:
if isinstance(node, ObjectSlice):
unknown = False
sql, params = self.parse_node(node.node, alias_map, conv)
# Postgresql uses 1-based indexes.
parts = [str(part + 1) for part in node.parts]
sql = '%s[%s]' % (sql, ':'.join(parts))
if isinstance(node, JsonLookup):
unknown = False
sql, params = self.parse_node(node.node, alias_map, conv)
lookups = [sql]
for part in node.parts:
part_sql, part_params = self.parse_node(
part, alias_map, conv)
lookups.append(part_sql)
params.extend(part_params)
# The last lookup should be converted to text.
head, tail = lookups[:-1], lookups[-1]
sql = '->>'.join(('->'.join(head), tail))
return sql, params, unknown
class PostgresqlExtDatabase(PostgresqlDatabase):
compiler_class = PostgresqlExtCompiler
def __init__(self, *args, **kwargs):
self.server_side_cursors = kwargs.pop('server_side_cursors', False)
super(PostgresqlExtDatabase, self).__init__(*args, **kwargs)
def get_cursor(self, name=None):
return self.get_conn().cursor(name=name)
def execute_sql(self, sql, params=None, require_commit=True,
named_cursor=False):
logger.debug((sql, params))
use_named_cursor = (named_cursor or (
self.server_side_cursors and
sql.lower().startswith('select')))
with self.exception_wrapper():
if use_named_cursor:
cursor = self.get_cursor(name=str(uuid.uuid1()))
require_commit = False
else:
cursor = self.get_cursor()
try:
res = cursor.execute(sql, params or ())
except Exception as exc:
logger.exception('%s %s', sql, params)
if self.sql_error_handler(exc, sql, params, require_commit):
raise
else:
if require_commit and self.get_autocommit():
self.commit()
return cursor
def _connect(self, database, **kwargs):
conn = super(PostgresqlExtDatabase, self)._connect(database, **kwargs)
register_hstore(conn, globally=True)
return conn
class ServerSideSelectQuery(SelectQuery):
@classmethod
def clone_from_query(cls, query):
clone = ServerSideSelectQuery(query.model_class)
return query._clone_attributes(clone)
def _execute(self):
sql, params = self.sql()
return self.database.execute_sql(
sql, params, require_commit=False, named_cursor=True)
PostgresqlExtDatabase.register_fields({
'datetime_tz': 'timestamp with time zone',
'hash': 'hstore',
'json': 'json',
'uuid': 'uuid',
})
PostgresqlExtDatabase.register_ops({
OP_HCONTAINS_DICT: '@>',
OP_HCONTAINS_KEYS: '?&',
OP_HCONTAINS_KEY: '?',
OP_HCONTAINS_ANY_KEY: '?|',
OP_HKEY: '->',
OP_HUPDATE: '||',
OP_ACONTAINS: '@>',
OP_ACONTAINS_ANY: '&&',
})
def ServerSide(select_query):
# Flag query for execution using server-side cursors.
clone = ServerSideSelectQuery.clone_from_query(select_query)
with clone.database.transaction():
# Execute the query.
query_result = clone.execute()
# Patch QueryResultWrapper onto original query.
select_query._qr = query_result
# Expose generator for iterating over query.
for obj in query_result.iterator():
yield obj
| [
"github_subs@icave.net"
] | github_subs@icave.net |
2fd7e86a0345548fe89a360c898f938f9227bdb2 | 5b38dd549d29322ae07ad0cc68a28761989ef93a | /cc_lib/_util/_logger.py | fc66aac68804a599831a0405e5eaf400e78fd1cb | [
"Apache-2.0"
] | permissive | SENERGY-Platform/client-connector-lib | d54ea800807892600cf08d3b2a4f00e8340ab69c | e365fc4bed949e84cde81fd4b5268bb8d4f53c12 | refs/heads/master | 2022-09-03T00:03:29.656511 | 2022-08-24T11:18:22 | 2022-08-24T11:18:22 | 159,316,125 | 1 | 2 | Apache-2.0 | 2020-05-27T07:47:14 | 2018-11-27T10:15:38 | Python | UTF-8 | Python | false | false | 784 | py | """
Copyright 2019 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ('get_logger',)
import logging
logger = logging.getLogger('connector')
logger.propagate = False
def get_logger(name: str) -> logging.Logger:
return logger.getChild(name)
| [
"42994541+y-du@users.noreply.github.com"
] | 42994541+y-du@users.noreply.github.com |
8ca486bf9468da73f85b937419fe7251c45b06d1 | 56a446ab5ac2994607d92cf7e5453fc39d611e8d | /book/_build/jupyter_execute/docs/Semantic Textual Similarity.py | 52aa5708ea51b5dc4ee1fab6e21cba4cb6e9e835 | [] | no_license | Python-Repository-Hub/klue-baseline | 66dcd8d305d6be184cda31460d29f7043eb808f2 | e4c419feadb30f00a5176cb85f2620964a021531 | refs/heads/main | 2023-07-30T15:05:08.241086 | 2021-09-29T03:12:13 | 2021-09-29T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | #!/usr/bin/env python
# coding: utf-8
# # Semantic Textual Similarity
# ## 1. 예시
# - (본문 예시)
# - .ipynb 형식입니다.
# In[1]:
print("test")
# In[ ]:
| [
"jih020202@gmail.com"
] | jih020202@gmail.com |
e577ea4fe5dde95179497ef4002865bb86ea6907 | 3c9dbfff9e5ac88ca42f5a21f9dff8bcf40ed4b2 | /chapter03_stacks_queues/06_animal_shelter.py | 735b56f77401bf6ee88b3b8d875ff460a0762cb5 | [] | no_license | optionalg/cracking_the_coding_interview | 50834f3c9aea7d24cd372bf3022c4bf01ec90957 | 2e962b3e272074b428ace8e85e6d4a735cfe6afa | refs/heads/master | 2021-06-20T21:15:48.063618 | 2017-07-18T14:42:03 | 2017-07-18T14:42:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | from ctci.chapter02_lists.LinkedList import LinkedList
class Node:
def __init__(self, value, name):
self.data = value
self.name = name
self.next = None
class AnimalQueue:
def __init__(self):
self.dogs = LinkedList()
self.cats = LinkedList()
self.any = LinkedList()
def enqueue(self, value, name):
new_node = Node(value, name)
self.any.add_in_the_end(new_node)
if value == "cat":
self.cats.add_in_the_end(new_node)
elif value == "dog":
self.dogs.add_in_the_end(new_node)
def dequeue_any(self):
popped = self.any.pop()
if popped.data == "cat":
self.cats.pop()
elif popped.data == "dog":
self.dogs.pop()
return popped.name
def dequeue_dog(self):
popped = self.dogs.pop()
self.any.remove(popped.name)
return popped.name
def dequeue_cat(self):
popped = self.cats.pop()
self.any.remove(popped.name)
return popped.name
q = AnimalQueue()
q.enqueue('cat', 'jasper')
q.enqueue('dog', 'marcel')
q.enqueue('cat', 'alice')
q.enqueue('dog', 'pongo')
q.enqueue('cat', 'nala')
q.enqueue('dog', 'nelly')
q.enqueue('dog', 'rudy')
print(q.dequeue_any())
print(q.dequeue_cat())
print(q.dequeue_dog())
| [
"lito.kriara@disneyresearch.com"
] | lito.kriara@disneyresearch.com |
d6758b1214e18affacc304004dfb23d732194dc0 | 07cf86733b110a13224ef91e94ea5862a8f5d0d5 | /taum_and_bday/taum_and_bday.py | 2dca00a8c687bce30c4615338d881eba6f673268 | [] | no_license | karsevar/Code_Challenge_Practice | 2d96964ed2601b3beb324d08dd3692c3d566b223 | 88d4587041a76cfd539c0698771420974ffaf60b | refs/heads/master | 2023-01-23T17:20:33.967020 | 2020-12-14T18:29:49 | 2020-12-14T18:29:49 | 261,813,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | def taumBday(b, w, bc, wc, z):
# Write your code here
# create a variable named black_cost
# create a variable named white_cost
# check if bc + z is less than wc:
# if so overwrite white_cost with b * (bc + z)
# overwrite black_cost with b * (bc)
# elif wc + z is less than bc:
# if so overwrite black_cost with w * (wc + z)
# overwrite white_cost with w * (wc)
# else
# overwrite black_cost with b * (bc + z)
# overwrite white_cost with w * (wc + z)
black_cost = 0
white_cost = 0
if (bc + z) < wc:
white_cost = w * (bc + z)
black_cost = b * bc
elif (wc + z) < bc:
white_cost = w * wc
black_cost = b * (wc + z)
else:
white_cost = w * wc
black_cost = b * bc
return white_cost + black_cost | [
"masonkarsevar@gmail.com"
] | masonkarsevar@gmail.com |
f5de930cd145d2474ed04f2b3d2d810ceba3e181 | f38db79439185ab6062294e1d82f6e909d2be81e | /test/test_update_timesheet_model.py | cbf81950c118167fd9c24c13db0647e8123c0e00 | [] | no_license | ContainerSolutions/avazacli | 3a37f8500ad1f1843acbdbb413d4949e00ec6f91 | 49618314f15d8cb2bda36e6019670fdfbed1524f | refs/heads/master | 2020-06-18T18:44:58.594385 | 2019-07-11T14:23:10 | 2019-07-11T14:23:10 | 196,406,206 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | # coding: utf-8
"""
Avaza API Documentation
Welcome to the autogenerated documentation & test tool for Avaza's API. <br/><br/><strong>API Security & Authentication</strong><br/>Authentication options include OAuth2 Implicit and Authorization Code flows, and Personal Access Token. All connections should be encrypted over SSL/TLS <br/><br/>You can set up and manage your api authentication credentials from within your Avaza account. (requires Administrator permissions on your Avaza account).<br/><br/> OAuth2 Authorization endpoint: https://any.avaza.com/oauth2/authorize <br/>OAuth2 Token endpoint: https://any.avaza.com/oauth2/token<br/>Base URL for subsequent API Requests: https://api.avaza.com/ <br/><br/>Blogpost about authenticating with Avaza's API: https://www.avaza.com/avaza-api-oauth2-authentication/ <br/>Blogpost on using Avaza's webhooks: https://www.avaza.com/avaza-api-webhook-notifications/<br/>The OAuth flow currently issues Access Tokens that last 1 day, and Refresh tokens that last 180 days<br/>The Api respects the security Roles assigned to the authenticating Avaza user and filters the data return appropriately. <br/><br><strong>Support</strong><br/>For API Support, and to request access please contact Avaza Support Team via our support chat. <br/><br/><strong>User Contributed Libraries:</strong><br/>Graciously contributed by 3rd party users like you. <br/>Note these are not tested or endorsesd by Avaza. We encourage you to review before use, and use at own risk.<br/> <ul><li> - <a target='blank' href='https://packagist.org/packages/debiprasad/oauth2-avaza'>PHP OAuth Client Package for Azava API (by Debiprasad Sahoo)</a></li></ul> # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import avazacli
from avazacli.models.update_timesheet_model import UpdateTimesheetModel # noqa: E501
from avazacli.rest import ApiException
class TestUpdateTimesheetModel(unittest.TestCase):
"""UpdateTimesheetModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateTimesheetModel(self):
"""Test UpdateTimesheetModel"""
# FIXME: construct object with mandatory attributes with example values
# model = avazacli.models.update_timesheet_model.UpdateTimesheetModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"riccardo.cefala@container-solutions.com"
] | riccardo.cefala@container-solutions.com |
1b0b4bc4e5b5b0bc77020ca601dd1f1dabbccc3a | 23e74e0d5bd42de514544917f7b33206e5acf84a | /alumnos/58003-Martin-Ruggeri/copia.py | eb8dc2774c2bf67e0fdfa336f443d85570aba882 | [] | no_license | Martin-Ruggeri-Bio/lab | 2e19015dae657bb9c9e86c55d8355a04db8f5804 | 9a1c1d8f99c90c28c3be62670a368838aa06988f | refs/heads/main | 2023-08-01T07:26:42.015115 | 2021-09-20T20:21:31 | 2021-09-20T20:21:31 | 350,102,381 | 0 | 0 | null | 2021-03-21T19:48:58 | 2021-03-21T19:48:58 | null | UTF-8 | Python | false | false | 279 | py | #!/bin/python3
def copiaArchivos():
archi_org = open(input("ingrese archivo de origen:\n"), "r")
archi_des = open(input("ingrese archivo de destino:\n"), "w")
with archi_org:
archi_des.write(archi_org.read())
if __name__ == '__main__':
copiaArchivos()
| [
"martinruggeri18@gmail.com"
] | martinruggeri18@gmail.com |
6a9b79f848da7a24ffbeb03eedae35d4063b8580 | d78a55788407c58ab7e5d7edc53a2887f46b20c4 | /Basic/SubList.py | ee95eab4cc76e85a710f55a883bcce91dc98e972 | [] | no_license | aakib7/python | db085e7eee10d3427906da7f32349a9f16a82a3e | 40ffa242ebdf78f042e559cecd20722949607b0b | refs/heads/main | 2023-07-01T04:24:03.472448 | 2021-08-04T20:16:39 | 2021-08-04T20:16:39 | 392,812,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | matrix = [[1,2,3],[4,5,6],[7,8,9]]
print(matrix[0]) # print [1,2,3]
print(matrix[0][1]) # print 2
# for i in matrix: # print [1,2,3]\n[3,4,5]\n[6,7,8]
# print(i)
count = 0
for sublist in matrix: # frist time loop exicute [1,2,3] store in sublist and so on
count +=1
print(f"{count} Sublist:")
for i in sublist: # after 1st for loop 2nd loop exicute till (elements in sublist)
print(i)
| [
"ajmehdi5@gmail.com"
] | ajmehdi5@gmail.com |
690be3e020160ec55919e9b80e9a4669bde616bf | 08b623f814a04467d5602b7fded7a5767d696763 | /music_controller/music_controller/urls.py | 3c8bfd48bdfc72bf481629cde03e5383f0fb3d55 | [
"MIT"
] | permissive | ajsnow56/react-python-music-app | 30f122e2243a01b4ef41b7615fbf2c6ecf8c255e | f44fc0040d2c2f8bf3580ba7e71c9196c2603386 | refs/heads/main | 2023-02-10T17:23:36.353584 | 2021-01-10T06:59:40 | 2021-01-10T06:59:40 | 328,294,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | """music_controller URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("api.urls")),
path('', include("frontend.urls"))
]
| [
"ajsnow56@gmail.com"
] | ajsnow56@gmail.com |
e0630106cd1f4d50aff9fea679ff50ae76a6ff20 | 775c92c038d418a97f483a5c6f3692562aa5e616 | /test_code.py | fed80156fec837c60c2554d0dd3844a941c063d6 | [] | no_license | seminvest/investment | 2878efd9ca8a09e5c708fec99eaea80b6e22cb0e | af5a9f4c4163bbcdc293d3ebad0d551464d96f12 | refs/heads/master | 2021-01-20T18:15:26.114699 | 2019-05-24T19:07:26 | 2019-05-24T19:07:26 | 90,911,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py |
import pandas as pd
def test_run():
start_date ='2018-08-14'
end_date='2019-01-07'
dates=pd.date_range(start_date,end_date)
#print(dates[0])
df1=pd.DataFrame(index=dates)
symbols = ['TSLA','FB','AAPL']
for symbol in symbols:
df_temp=pd.read_csv("{}.csv".format(symbol),index_col="date",parse_dates=True,usecols=['date','adjusted close'],na_values=['nan'])
df_temp = df_temp.rename(columns={"adjusted close":symbol})
df1=df1.join(df_temp,how='inner')
print(df1)
dfTSLA = pd.read_csv("./data/TSLA.csv", index_col="date",parse_dates=True,usecols=['date','adjusted close'],na_values=['nan'])
#df1=df1.join(dfTSLA,how='inner')
#df1=df1.dropna()
if __name__ == "__main__":
test_run() | [
"rtang7813@icloud.com"
] | rtang7813@icloud.com |
0ada9e39c61bd009bdf2ced7e598217a1ddcebd1 | 3318f5b9ae8a374f08c3fa8cb3db92681542ba2b | /src/GetElectroStaticEmbedding.py | b7b6b859ff06cb46592737bf8179d882d2592846 | [] | no_license | jiahao/clusterchem | 2c381f0b044f9d51653636666610d2377233b2f7 | 7b5db987c252780dffd4f213b0e14dcb48a62391 | refs/heads/master | 2021-01-22T08:48:46.776359 | 2012-07-18T20:22:30 | 2012-07-18T20:22:30 | 2,615,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py |
def LoadGromacsTopology(filename):
"""
Parses Gromacs topology file for atomic charge definitions
@param filename Name of Gromacs topology file (usually .top or .itp)
to parse.
@returns a dictionary of atomtypes with (residue, atomtype) as key
and charge as value.
"""
AtomTypes = {}
mode = 'seek'
for line in open(filename):
if mode == 'seek' and '[ atoms ]' in line:
mode = 'read'
elif mode == 'read':
#The GROMACS topology file format specifies lines to have the form
#; nr type resnr residu atom cgnr charge
theline = line[:line.find(';')] #Ignore comments between semicolon and newline
assert '\\' not in theline, 'Cannot handle line continuations at this time.'
t = theline.split()
try:
residu = t[3]
atom = t[4]
charge = float(t[6])
except (ValueError, IndexError):
#Could not parse, assume we are done with this section
mode = 'seek'
continue
AtomTypes[residu, atom] = charge
return AtomTypes
def LoadGromacsGeometry(h5table, filename):
"""
Parses Gromacs topology file for atomic charge definitions
@param h5table HDF5 table to populate. Must be in CHARMM_CARD format.
@param filename Name of Gromacs topology file (usually .top or .itp)
to parse.
"""
mode = 'title'
for line in open(filename):
if mode == 'title':
mode = 'numatoms'
elif mode == 'numatoms':
numatoms = int(line)
thisnumatoms = 0
mode = 'read'
elif mode == 'read':
try:
data = h5table.row
#The GROMACS format does not contain ResID and SegId
#We fill them in with ResidNo.
data['ResID'] = data['SegID'] = data['ResidNo'] = int(line[:5])
data['Res'] = line[5:10].strip()
data['Type'] = line[10:15].strip()
data['AtomNo'] = int(line[15:20])
numbers = map(float, line[20:].split())
data['Coord'] = numbers[:3] #Discard velocities if present
#The GROMACS format does not contain Weighting
#Set to dummy value
data['Weighting'] = 0
data.append()
thisnumatoms += 1
except (ValueError, IndexError): #Assume this is the last line with box vectors
#boxvectors = map(float, line.split())
break
assert thisnumatoms == numatoms, 'Wrong number of atoms read: expected %d but read %d' % (numatoms, thisnumatoms)
h5table.flush()
| [
"jiahao@mit.edu"
] | jiahao@mit.edu |
0608ed6c0bd8ae5268945ba712871159548338c1 | bab51ba1ff7a839ca16bcc086ae55451a3a16823 | /stylesite/stylesite/settings.py | 087b2b12ef8b7185ae2fe0c599b1e11b009b3368 | [] | no_license | melissapnyc/insphairation | a96197e03556f63c2c49b4a487a5296d25224b41 | 4e3ba94430a476132da645fdbb8c17e0e87f4468 | refs/heads/master | 2020-05-05T02:54:10.174628 | 2014-04-03T17:13:04 | 2014-04-03T17:13:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,999 | py | """
Django settings for stylesite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')cws#g^5+cajokq)#zk#=f08i$^7klk9fz(i8=qbod!q#41qkd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hairstyles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stylesite.urls'
WSGI_APPLICATION = 'stylesite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| [
"melissapallay@gmail.com"
] | melissapallay@gmail.com |
0726392c3e962800ab537f902236c9ddf78370f0 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/hinventory/account.py | 7b581f55213470c10ef7d664c6a48948edf9b960 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,184 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Account(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.hinventory.Account")
meta.moClassName = "hinventoryAccount"
meta.rnFormat = "account-[%(name)s]"
meta.category = MoCategory.REGULAR
meta.label = "AInventory"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x600c101
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.hinventory.Region")
meta.childClasses.add("cobra.model.hinventory.StaleResource")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.hcloud.RtSelectorToTagParent")
meta.childClasses.add("cobra.model.hinventory.ResourceGrp")
meta.childNamesAndRnPrefix.append(("cobra.model.hcloud.RtSelectorToTagParent", "rtselectorToTagParent-"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.ResourceGrp", "resourcegrp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.Region", "region-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.StaleResource", "stale"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.hinventory.Provider")
meta.superClasses.add("cobra.model.hcloud.AResource")
meta.superClasses.add("cobra.model.hinventory.AInventory")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.hcloud.ACloudBase")
meta.superClasses.add("cobra.model.hinventory.AAInventory")
meta.rnPrefixes = [
('account-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cloudName", "cloudName", 53279, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("cloudName", prop)
prop = PropMeta("str", "cloudProviderId", "cloudProviderId", 54108, PropCategory.REGULAR)
prop.label = "Resource ID in Cloud Provider"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("cloudProviderId", prop)
prop = PropMeta("str", "configDn", "configDn", 54120, PropCategory.REGULAR)
prop.label = "DN of object that created the resource"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("configDn", prop)
prop = PropMeta("str", "delegateDn", "delegateDn", 53375, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("delegateDn", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "isStale", "isStale", 54109, PropCategory.REGULAR)
prop.label = "Resource out-of-sync with current configuration"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("isStale", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 50766, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 50279, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "resolvedObjDn", "resolvedObjDn", 50280, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("resolvedObjDn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
getattr(meta.props, "name").needDelimiter = True
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
79331affbc571e2fd6380690621972ed904a93b2 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2015_11_15_5deg_FIP_db/check_db_symm_v3.py | 02760a80bd14513da9f994e3a337517bca50323a | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,154 | py | import numpy as np
import matplotlib.pyplot as plt
import euler_func as ef
import h5py
"""
check whether the database exhibits hexagonal-triclinic crystal
symmetry
first find 12 symmetric orientations in triclinic FZ
(0<=phi1<2*pi, 0<=Phi<=pi, 0<=phi2<2*pi)
for each deformation mode sample (theta), check if the value of
interest is the same for all symmetric orientations
"""
inc = 5 # degree increment for angular variables
np.random.seed() # generate seed for random
symhex = ef.symhex()
r2d = 180./np.pi
d2r = np.pi/180.
r2s = r2d/inc
n_th_max = 120/inc # number of theta samples in FOS
n_max = 360/inc # number of phi1, Phi and phi2 samples in FOS
n_hlf = 180/inc # half n_max
n_th = (60/inc)+1 # number of theta samples for FZ
n_p1 = 360/inc # number of phi1 samples for FZ
n_P = (90/inc)+1 # number of Phi samples for FZ
n_p2 = 60/inc # number of phi2 samples for FZ
print "angle space shape: %s" % str(np.array([n_th, n_p1, n_P, n_p2]))
# only look at last in series for value of interest
db = np.load("pre_fft.npy")[:n_th, ..., -1]
print "db shape: %s" % str(db.shape)
# n_FZ: total number of sampled orientations in FZ
n_FZ = n_p1*n_P*n_p2
# FZ_indx: vector of linear indices for sampled orientations in FZ
FZ_indx = np.arange(n_FZ)
print "FZ_indx shape: %s" % str(FZ_indx.shape)
# FZ_subs: array of subscripts of sampled orientations in FZ
FZ_subs = np.unravel_index(FZ_indx, (n_p1, n_P, n_p2))
FZ_subs = np.array(FZ_subs).transpose()
print "FZ_subs shape: %s" % str(FZ_subs.shape)
# FZ_euler: array of euler angles of sampled orientations in FZ
FZ_euler = np.float64(FZ_subs*inc*d2r)
# g: array of orientation matrices (sample to crystal frame rotation
# matrices) for orientations in fundamental zone
g = ef.bunge2g(FZ_euler[:, 0],
FZ_euler[:, 1],
FZ_euler[:, 2])
print "g shape: %s" % str(g.shape)
# FZ_euler_sym: array of euler angles of sampled orientations in
# FZ and their symmetric equivalents
FZ_euler_sym = np.zeros((12, n_FZ, 3))
# find the symmetric equivalents to the euler angle within the FZ
for sym in xrange(12):
op = symhex[sym, ...]
# g_sym: array of orientation matrices transformed with a
# hexagonal symmetry operator
g_sym = np.einsum('ik,...kj', op, g)
tmp = np.array(ef.g2bunge(g_sym)).transpose()
if sym == 0:
print "g_sym shape: %s" % str(g_sym.shape)
print "tmp shape: %s" % str(tmp.shape)
del g_sym
FZ_euler_sym[sym, ...] = tmp
del tmp
# convert euler angles to subscripts
FZ_subs_sym = np.int64(np.round(FZ_euler_sym*r2s))
# # make sure all of the euler angles within the appropriate
# # ranges (eg. not negative)
for ii in xrange(3):
lt = FZ_subs_sym[..., ii] < 0.0
FZ_subs_sym[..., ii] += n_max*lt
print np.sum(FZ_subs_sym < 0)
# determine the deviation from symmetry by finding the value of
# the function for symmetric locations and comparing these values
f = h5py.File('symm_check.hdf5', 'w')
error = f.create_dataset("error", (n_th, 12, n_FZ, 5))
for th in xrange(n_th):
for sym in xrange(12):
error[th, sym, :, 0:3] = FZ_subs_sym[sym, ...]*inc
origFZ = db[th,
FZ_subs_sym[0, :, 0],
FZ_subs_sym[0, :, 1],
FZ_subs_sym[0, :, 2]]
symFZ = db[th,
FZ_subs_sym[sym, :, 0],
FZ_subs_sym[sym, :, 1],
FZ_subs_sym[sym, :, 2]]
if th == 0 and sym == 0:
print "origFZ shape: %s" % str(origFZ.shape)
print "symFZ shape: %s" % str(symFZ.shape)
if th == 0:
print "operator number: %s" % sym
idcheck = np.all(FZ_euler_sym[0, ...] == FZ_euler_sym[sym, ...])
print "are Euler angles in different FZs identical?: %s" % str(idcheck)
orig_0sum = np.sum(origFZ == 0.0)
sym_0sum = np.sum(symFZ == 0.0)
if orig_0sum != 0 or sym_0sum != 0:
print "number of zero values in origFZ: %s" % orig_0sum
print "number of zero values in symFZ: %s" % sym_0sum
error[th, sym, :, 3] = symFZ
error[th, sym, :, 4] = np.abs(origFZ-symFZ)
error_sec = error[...]
f.close()
# perform error analysis
# generate random deformation mode and euler angle
# th_rand = np.int64(np.round((n_th-1)*np.random.rand()))
# g_rand = np.int64(np.round((n_FZ-1)*np.random.rand()))
badloc = np.argmax(error_sec[..., 4])
badloc = np.unravel_index(badloc, error_sec[..., 3].shape)
th_rand = badloc[0]
g_rand = badloc[2]
print "\nexample comparison:"
print "deformation mode: %s degrees" % str(np.float(th_rand*inc))
for sym in xrange(12):
print "operator number: %s" % sym
eul_rand = error_sec[th_rand, sym, g_rand, 0:3]
print "euler angles: %s (degrees)" % str(eul_rand)
val_rand = error_sec[th_rand, sym, g_rand, 3]
print "value of interest: %s" % str(val_rand)
errvec = error_sec[..., 4].reshape(error_sec[..., 4].size)
print "\noverall error metrics:"
print "mean database value: %s" % np.mean(db)
print "mean error: %s" % np.mean(errvec)
print "maximum error: %s" % np.max(errvec)
print "standard deviation of error: %s" % np.std(errvec)
print "total number of locations checked: %s" % (errvec.size)
err_count = np.sum(errvec != 0.0)
# plot the error histograms
error_indx = errvec != 0.0
print error_indx.shape
loc_hist = errvec[error_indx]
print loc_hist.shape
err_count = np.sum(loc_hist != 0.0)
print "number of locations with nonzero error: %s" % err_count
errvec_p1 = error_sec[..., 0].reshape(error_sec[..., 0].size)[error_indx]
plt.figure(num=4, figsize=[10, 6])
plt.hist(errvec_p1, 361)
errvec_P = error_sec[..., 1].reshape(error_sec[..., 1].size)[error_indx]
plt.figure(num=5, figsize=[10, 6])
plt.hist(errvec_P, 361)
errvec_p2 = error_sec[..., 0].reshape(error_sec[..., 0].size)[error_indx]
plt.figure(num=6, figsize=[10, 6])
plt.hist(errvec_p2, 361)
# plot the error histograms
plt.figure(num=1, figsize=[10, 6])
error_hist = error_sec[..., 4]
plt.hist(error_hist.reshape(error_hist.size), 100)
# plot the symmetric orientations in euler space
plt.figure(2)
plt.plot(np.array([0, 360, 360, 0, 0]), np.array([0, 0, 180, 180, 0]), 'k-')
plt.plot(np.array([0, 360]), np.array([90, 90]), 'k-')
plt.xlabel('$\phi_1$')
plt.ylabel('$\Phi$')
sc = 1.05
plt.axis([-(sc-1)*360, sc*360, -(sc-1)*180, sc*180])
plt.figure(3)
plt.plot(np.array([0, 180, 180, 0, 0]), np.array([0, 0, 360, 360, 0]), 'k-')
plt.plot(np.array([90, 90]), np.array([0, 360]), 'k-')
plt.plot(np.array([0, 180]), np.array([60, 60]), 'k-')
plt.plot(np.array([0, 180]), np.array([120, 120]), 'k-')
plt.plot(np.array([0, 180]), np.array([180, 180]), 'k-')
plt.plot(np.array([0, 180]), np.array([240, 240]), 'k-')
plt.plot(np.array([0, 180]), np.array([300, 300]), 'k-')
plt.xlabel('$\Phi$')
plt.ylabel('$\phi2$')
sc = 1.05
plt.axis([-(sc-1)*180, sc*180, -(sc-1)*360, sc*360])
eul_plt = error_sec[th_rand, :, g_rand, 0:3]
plt.figure(2)
plt.plot(eul_plt[:, 0], eul_plt[:, 1],
c='b', marker='o', linestyle='none')
plt.figure(3)
plt.plot(eul_plt[:, 1], eul_plt[:, 2],
c='b', marker='o', linestyle='none')
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
1be1511f6d587ac160826b59755e8c6494866c0f | 70a02a488849ac8cc15f5684564b06cfafe98192 | /src/network/pyramid.py | 10e1ac8d8e0fa5c71aed70d09cd2d88b803c5016 | [] | no_license | takedarts/resnetfamily | 9a4203ba5e2c54caf369138d23352c9f2a514ddb | ea51083c430cc27c5fd285429e6eed67b1f697bd | refs/heads/master | 2020-12-02T11:14:17.881053 | 2017-07-09T14:28:08 | 2017-07-09T14:28:08 | 96,619,601 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,693 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Residual Network (pre-activation)モジュール。
Residual Block : BN - Conv(3x3) - BN - ReLU - Conv(3x3) - BN
@author: Atsushi TAKEDA
'''
import chainer
def reshape(x, channels):
if x.shape[1] < channels:
xp = chainer.cuda.get_array_module(x)
p = xp.zeros((x.shape[0], channels - x.shape[1], x.shape[2], x.shape[3]), dtype=x.dtype)
x = chainer.functions.concat((x, p), axis=1)
elif x.shape[1] > channels:
x = x[:, :channels, :]
return x
class ResidualUnit(chainer.Chain):
def __init__(self, in_channels, out_channels):
super().__init__(norm0=chainer.links.BatchNormalization(in_channels),
conv1=chainer.links.Convolution2D(in_channels, out_channels, 3, pad=1),
norm1=chainer.links.BatchNormalization(out_channels),
conv2=chainer.links.Convolution2D(out_channels, out_channels, 3, pad=1),
norm2=chainer.links.BatchNormalization(out_channels))
def __call__(self, x):
x = self.norm0(x)
x = self.conv1(x)
x = self.norm1(x)
x = chainer.functions.relu(x)
x = self.conv2(x)
x = self.norm2(x)
return x
class ResidualBlock(chainer.ChainList):
def __init__(self, in_channels, out_channels, depth):
channels = [int((in_channels * (depth - i) + out_channels * i) / depth) for i in range(depth + 1)]
super().__init__(*[ResidualUnit(channels[i], channels[i + 1]) for i in range(depth)])
def __call__(self, x):
for layer in self:
y = layer(x)
y += reshape(x, y.shape[1])
x = y
return x
class Network(chainer.Chain):
def __init__(self, category, params):
depth, alpha = params
depth = (depth - 2) // 6
super().__init__(input=chainer.links.Convolution2D(None, 16, 3, pad=1),
norm=chainer.links.BatchNormalization(16),
block1=ResidualBlock(16 + alpha * 0 // 3, 16 + alpha * 1 // 3, depth),
block2=ResidualBlock(16 + alpha * 1 // 3, 16 + alpha * 2 // 3, depth),
block3=ResidualBlock(16 + alpha * 2 // 3, 16 + alpha * 3 // 3, depth),
output=chainer.links.Linear(16 + alpha, category))
def __call__(self, x):
x = self.input(x)
x = self.norm(x)
x = self.block1(x)
x = chainer.functions.average_pooling_2d(x, 2)
x = self.block2(x)
x = chainer.functions.average_pooling_2d(x, 2)
x = self.block3(x)
x = chainer.functions.relu(x)
x = chainer.functions.average_pooling_2d(x, x.shape[2])
x = self.output(x)
return x
| [
"atsushi@takedarts.jp"
] | atsushi@takedarts.jp |
20237d2e410ec4f79401483b11eccd032bf6a5f3 | 4a4484e61b662a7d093d72560a0a1182b680acc4 | /contact.py | 1cd6badb10d933d6665091eafcc791d9b50791ec | [] | no_license | TheRohitRahul/Disease_Spread_Simulator | 1a8a86e454f9c7fe29a89189cb8babba3626d2c0 | 3c97bb7e467028ad9b330f5709481a0423040e03 | refs/heads/master | 2021-05-21T02:49:09.063162 | 2020-04-03T08:16:07 | 2020-04-03T08:16:07 | 252,508,531 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | from living_state import *
from random import randint
import pdb
def contact(person1, person2, current_iter):
xp1, yp1 = person1.location
xp2, yp2 = person2.location
if (person1.status == INFECTED or person1.status == NO_HOSPITAL_QUARANTINED) and person2.status == UNINFECTED:
if (xp2 >= xp1 - person1.infection_cls.infect_area and xp2 <= xp1 + person1.infection_cls.infect_area) and (yp2 >= yp1 - person1.infection_cls.infect_area and yp2 <= yp1 + person1.infection_cls.infect_area):
infection_probab = person1.infection_cls.person_infect_probab*100
prob = randint(0,100)
if prob < infection_probab:
person2.infect(person1.infection_cls, current_iter)
if (person2.status == INFECTED or person2.status == NO_HOSPITAL_QUARANTINED) and person1.status == UNINFECTED:
if (xp1 >= xp2 - person2.infection_cls.infect_area and xp1 <= xp2 + person2.infection_cls.infect_area) and (yp1 >= yp2 - person2.infection_cls.infect_area and yp1 <= yp2 + person2.infection_cls.infect_area):
infection_probab = person2.infection_cls.person_infect_probab*100
prob = randint(0,100)
if prob < infection_probab:
person1.infect(person2.infection_cls, current_iter)
| [
"rohit.rahul@tcs.com"
] | rohit.rahul@tcs.com |
1783ef895c616c80015e20e025262af87331c702 | bfa58d62c3c03fd3d728d8cea5c47f36942c43b8 | /Modules/MidsagittalSurfaceMacro.py | 0215eddd74fd17fdca8fb2e784e0a09cd28b532c | [
"Apache-2.0"
] | permissive | vinay0458/MidsagittalApp | 3490ecc86e7b27077f20a4d20838463283c133bf | aa067e536df842b8ceafed787f042aa4a6ebd117 | refs/heads/master | 2020-07-22T03:34:04.338842 | 2016-01-15T14:56:37 | 2016-01-15T14:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,433 | py | from AlgorithmMacroModule.AlgorithmMacroModule import AlgorithmMacroModule, Error
from AlgorithmModule.Definitions import StatusCode
import numpy
class MidsagittalSurfaceMacro( AlgorithmMacroModule ):
def _init( self ):
self._ctx.field("MemoryCache.clear").touch()
self._ctx.field("outMsp").setVectorValue((1,0,0,0))
self._ctx.field("outMspA").setVectorValue((1,0,0,0))
self._ctx.field("outMspP").setVectorValue((1,0,0,0))
self._ctx.field("outAngle").setDoubleValue(0)
self._ctx.field("progress").setDoubleValue(0)
def _getPlane( self, imageField ):
self._ctx.field("MidsagittalPlane.input0").connectFrom(ctx.field(imageField))
self._ctx.field("MidsagittalPlane.update").touch()
return self._ctx.field("MidsagittalPlane.outPlane").vectorValue()
def __init__( self, ctx ):
AlgorithmMacroModule.__init__( self, ctx )
self._init()
def _validateInput( self ):
if not self._ctx.field("input0").isValid():
raise Error( StatusCode.ERROR_INPUT_OBJECT, u"Input image not valid.")
def _update( self ):
self._init()
self._ctx.field("progress").setDoubleValue(0.05)
# Compute the midsagittal plane
msp = self._getPlane("Bypass.output0")
self._ctx.field("MemoryCache.update").touch()
self._ctx.field("outMsp").setVectorValue(msp)
# Set the cMSPx from the plane computed on the full image
self._ctx.field("MidsagittalSurface.inCMSPx").setIntValue(self._ctx.field("MidsagittalPlane.outCMSPx").intValue())
self._ctx.field("progress").setDoubleValue(0.1)
# Do we want to use the dual-plane approach, to handle extreme asymmetric cases?
if self._ctx.field("inUseDualPlane").boolValue():
mspA = self._getPlane("Anterior.output0")
self._ctx.field("progress").setDoubleValue(0.2)
mspP = self._getPlane("Posterior.output0")
self._ctx.field("progress").setDoubleValue(0.3)
self._ctx.field("outMspA").setVectorValue(mspA)
self._ctx.field("outMspP").setVectorValue(mspP)
# Compute the in-plane angle between the two MSPs
vA = numpy.array(mspA[:2])
vP = numpy.array(mspP[:2])
# Normalize to unit length
vA = vA / numpy.linalg.norm(vA)
vP = vP / numpy.linalg.norm(vP)
# Compute angle. Special case if vectors have an angle of 0 or 180 degrees
angle = numpy.arccos(numpy.dot(vA, vP))
if numpy.isnan(angle):
if (v1_u == v2_u).all():
angle = 0.0
else:
angle = numpy.pi
self._ctx.field("outAngle").setDoubleValue(numpy.degrees(angle))
# Use dual plane ?
if numpy.degrees(angle) > self._ctx.field("inDualPlaneAngle").doubleValue():
self._ctx.field("MidsagittalSurface.inPlane1").setVectorValue(mspA)
self._ctx.field("MidsagittalSurface.inPlane2").setVectorValue(mspP)
self._ctx.field("MidsagittalSurface.inUseSecondPlane").setBoolValue(True)
else:
self._ctx.field("MidsagittalSurface.inPlane1").setVectorValue(msp)
self._ctx.field("MidsagittalSurface.inUseSecondPlane").setBoolValue(False)
self._ctx.field("progress").setDoubleValue(0.4)
self._ctx.field("MidsagittalSurface.update").touch()
self._ctx.field("progress").setDoubleValue(1)
def _clear( self ):
self._ctx.field("MidsagittalSurface.clear").touch()
self._init()
| [
"hjkuijf@users.noreply.github.com"
] | hjkuijf@users.noreply.github.com |
26acb61a5f3dc306afadcc547821d416d28cbc9b | 29daf4c05f7a604b55f9518757c70093c4dad6a8 | /decision_trees/tree_predict.py | 1107eb8ed53fcb71e51649471d488b5829002950 | [] | no_license | EthanYue/Machine_Learning | 43091a567da5bffa4bc4071237452ffc44f55bc7 | defd64db8273da00128b3f7fa5578fa60ce20a15 | refs/heads/master | 2020-04-22T10:38:26.459500 | 2019-02-22T14:14:01 | 2019-02-22T14:14:01 | 170,311,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,306 | py | from PIL import Image, ImageDraw
my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
['google', 'France', 'yes', 23, 'Premium'],
['digg', 'USA', 'yes', 24, 'Basic'],
['kiwitobes', 'France', 'yes', 23, 'Basic'],
['google', 'UK', 'no', 21, 'Premium'],
['(direct)', 'New Zealand', 'no', 12, 'None'],
['(direct)', 'UK', 'no', 21, 'Basic'],
['google', 'USA', 'no', 24, 'Premium'],
['slashdot', 'France', 'yes', 19, 'None'],
['digg', 'USA', 'no', 18, 'None'],
['google', 'UK', 'no', 18, 'None'],
['kiwitobes', 'UK', 'no', 19, 'None'],
['digg', 'New Zealand', 'yes', 12, 'Basic'],
['google', 'UK', 'yes', 18, 'Basic'],
['kiwitobes', 'France', 'yes', 19, 'Basic']]
class decision_node:
def __init__(self, col=1, value=None, results=None, tb=None, fb=None):
"""
:param col: 待检验的判断条件所对应的列索引值
:param value: 为了使结果为true, 当前列必须匹配的值
:param results: 保存当前分支的结果, 是一个字典, 除了叶节点外,其他节点上该值都为None
:param tb: 结果为true时,树上相对于当前节点的子树上的节点
:param fb: 结果为false时,树上相对于当前节点的子树上的节点
"""
self.col = col
self.value = value
self.results = results
self.tb = tb
self.fb = fb
def divide_set(rows, column, value):
# 定义一个函数, 获得数据行属于第一组(返回值为true)还是第二组(返回值为false)
split_function = None
if isinstance(value, int) or isinstance(value, float):
split_function = lambda row: row[column] >= value
else:
split_function = lambda row: row[column] == value
# 将数据集拆分成两个集合
set1 = [row for row in rows if split_function(row)]
set2 = [row for row in rows if not split_function(row)]
return set1, set2
# 对各种可能的结果进行计数
def unique_counts(rows):
results = {}
for row in rows:
r = row[len(row) - 1]
if r not in results:
results[r] = 0
results[r] += 1
return results
# 随机放置的数据项出现于错误分类中的概率
def gini_impurity(rows):
total = len(rows)
counts = unique_counts(rows)
imp = 0
for k1 in counts:
p1 = float(counts[k1]) / total
for k2 in counts:
if k1 == k2:
continue
p2 = float(counts[k2]) / total
imp += p1 * p2
return imp
# 遍历所有可能的结果之后所得到的p(x)*log(p(x))之和
def entropy(rows):
"""
熵衡量结果之间差异程度的方法
:param rows:
:return:
"""
from math import log
log2 = lambda x: log(x) / log(2)
results = unique_counts(rows)
ent = 0.0
for r in results.keys():
p = float(results[r]) / len(rows)
ent = ent - p * log2(p)
return ent
def build_tree(rows, score_func=entropy):
if len(rows) == 0:
return decision_node()
current_score = score_func(rows)
best_gain = 0.0
best_criteria = None
best_sets = None
column_count = len(rows[0]) - 1
for col in range(0, column_count):
column_values = {}
for row in rows:
column_values[row[col]] = 1
for value in column_values.keys():
set1, set2 = divide_set(rows, col, value)
# 信息增益
p = float(len(set1)) / len(rows)
gain = current_score - p * score_func(set1) - (1 - p) * score_func(set2)
if gain > best_gain and len(set1) > 0 and len(set2) > 0:
best_gain = gain
best_criteria = (col, value)
best_sets = (set1, set2)
# 递归创建子分支
if best_gain > 0:
true_branch = build_tree(best_sets[0])
false_branch = build_tree(best_sets[1])
return decision_node(col=best_criteria[0], value=best_criteria[1], tb=true_branch, fb=false_branch)
else:
return decision_node(results=unique_counts(rows))
def classify(observation, tree):
if tree.results is not None:
return tree.results
else:
v = observation[tree.col]
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.tb
else:
branch = tree.fb
else:
if v == tree.value:
branch = tree.tb
else:
branch = tree.fb
return classify(observation, branch)
def print_tree(tree, indent=''):
if tree.results is not None:
print(str(tree.results))
else:
print(str(tree.col) + ':' + str(tree.value) +'? ')
print(indent + 'T->')
print_tree(tree.tb, indent+' ')
print(indent + 'F->')
print_tree(tree.fb, indent+' ')
# 得到树的总宽度
def get_width(tree):
if tree.tb is None and tree.fb is None:
return 1
return get_width(tree.tb) + get_width(tree.fb)
# 得到树的总深度
def get_depth(tree):
if tree.tb is None and tree.fb is None:
return 0
return max(get_depth(tree.tb), get_depth(tree.fb)) + 1
# 绘制树
def draw_tree(tree, jpeg='tree.jpg'):
w = get_width(tree) * 100
h = get_depth(tree) * 100 + 200
img = Image.new('RGB', (w, h), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw_node(draw, tree, w/2, 20)
img.save(jpeg, 'JPEG')
# 绘制节点,右边为真
def draw_node(draw, tree, x, y):
if tree.results is None:
w1 = get_width(tree.fb) * 100
w2 = get_width(tree.tb) * 100
left = x - (w1 + w2) / 2
right = x + (w1 + w2) / 2
draw.text((x - 20, y - 10), str(tree.col) + ':' + str(tree.value), (0, 0, 0))
draw.line((x, y, left+w1/2, y+100), fill=(255, 0, 0))
draw.line((x, y, right-w2/2, y+100), fill=(255, 0, 0))
draw_node(draw, tree.fb, left+w1/2, y+100)
draw_node(draw, tree.tb, right-w2/2, y+100)
else:
txt = ' \n'.join(['%s:%d' % v for v in tree.results.items()])
draw.text((x-20, y), txt, (0, 0, 0))
# 剪枝
def prune(tree, min_gain):
# 如果分支不是叶节点则剪枝
if tree.tb.results is None:
prune(tree.tb, min_gain)
if tree.fb.results is None:
prune(tree.fb, min_gain)
# 如果两个子节点都是叶节点,则判断是否需要合并
if tree.tb.results is not None and tree.fb.results is not None:
tb, fb = [], []
for v, c in tree.tb.results.items():
tb += [[v]] * c
for v, c in tree.fb.results.items():
fb += [[v]] * c
# 判断熵的减少是否低于最小阈值
delta = entropy(tb+fb) - (entropy(tb) + entropy(fb) / 2)
if delta < min_gain:
# 合并分支
tree.tb, tree.fb = None, None
tree.results = unique_counts(tb+fb)
# 处理确实数据
def md_classify(observation, tree):
if tree.results is not None:
return tree.results
else:
v = observation[tree.col]
if v is None:
tr, fr = md_classify(observation, tree.tb), md_classify(observation, tree.fb)
t_count = sum(tr.values())
f_count = sum(fr.values())
t_weight = t_count / (t_count + f_count)
f_weight = f_count / (t_count + f_count)
result = {}
for k, v in tr.items():
result[k] = v * t_weight
for k, v in fr.items():
if k not in result:
result[k] = 0
result[k] = v * f_weight
return result
else:
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.tb
else:
branch = tree.fb
else:
if v == tree.value:
branch = tree.tb
else:
branch = tree.fb
return md_classify(observation, branch)
def variance(rows):
if len(rows) == 0:
return 0
data = [float(row[len(row)-1]) for row in rows]
mean = sum(data) / len(data)
variance = sum([(d-mean) ** 2 for d in data]) / len(data)
return variance | [
"yfy17859733505@gmail.com"
] | yfy17859733505@gmail.com |
9cf0b12018962010a876c9da5885b4db43e416b4 | 640ac4564cba4836c01cccd37376bdf647c2484c | /optim_project/optim_app/migrations/0007_auto_20200604_0228.py | de9a9a98406647055d86496e801f81d5a64d8095 | [] | no_license | shchepinevg/Bachelor-s-thesis | 9c162839452347e3a8a0f10baf6aa72c3bdd3b95 | ba99c7bf7f75de993a8f6605c2dc4e348f9809df | refs/heads/master | 2023-02-10T10:51:22.914621 | 2020-07-10T09:08:27 | 2020-07-10T09:08:27 | 261,974,296 | 0 | 0 | null | 2021-01-06T03:18:41 | 2020-05-07T06:58:50 | R | UTF-8 | Python | false | false | 2,126 | py | # Generated by Django 3.0.6 on 2020-06-03 19:28
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('optim_app', '0006_auto_20200601_2234'),
]
operations = [
migrations.CreateModel(
name='OptimizationFunction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_function', models.BooleanField()),
('optimization_meth', models.CharField(max_length=64)),
('N', models.IntegerField()),
('optim_type', models.IntegerField()),
('value', models.FloatField()),
('param_func', django.contrib.postgres.fields.jsonb.JSONField()),
('param_optim', django.contrib.postgres.fields.jsonb.JSONField()),
('user_function', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='optim_app.UserFunction')),
],
),
migrations.CreateModel(
name='OptimizationParameters',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_optim_meth', models.CharField(max_length=64)),
('meta_N', models.IntegerField()),
('meta_value', models.FloatField()),
('meta_param_optim', django.contrib.postgres.fields.jsonb.JSONField()),
('optim_func', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='optim_app.OptimizationFunction')),
('user_function', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='optim_app.UserFunction')),
],
),
migrations.AlterField(
model_name='parameterinfo',
name='discrete_continuous',
field=models.IntegerField(),
),
migrations.DeleteModel(
name='OptimizationHistory',
),
]
| [
"shchepinevg@gmail.com"
] | shchepinevg@gmail.com |
6e6a8d9906539b1e7a5184501859be324b07a84d | 3d12da3c90cf1ebc9b04e9a8b289d82fce46cb48 | /planet_name_generator/__init__.py | 57834dad8290c8c1fe0ed939a7b85ebc7751df79 | [
"MIT"
] | permissive | dawsonren/proc_gen_universe | 14b3cf7e91c39fefece26db1f2f23f4a915602cb | 958cf628e12654cadcc858e2122591f0e0a3c84d | refs/heads/master | 2023-02-17T02:28:17.142137 | 2021-01-13T03:06:32 | 2021-01-13T03:06:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import random
SUFFIXES = ["prime", "",
"B", "",
"alpha", "",
'proxima', "",
"V", "",
"C", "",
"X", "",
"D", "",
"", ""] # empty strings so some don't have suffixes
with open("planet_name_generator/planets.txt", "r") as f:
raw = f.read()
PLANETS = raw.split("\n")
syllables = []
for p in PLANETS:
lex = p.split("-")
for syl in lex:
if syl not in syllables:
syllables.append(syl)
size = len(syllables)
freq = [[0] * size for i in range(size)]
for p in PLANETS:
lex = p.split("-")
i = 0
while i < len(lex) - 1:
freq[syllables.index(lex[i])][syllables.index(lex[i+1])] += 1
i += 1
freq[syllables.index(lex[len(lex) - 1])][size - 1] += 1
def generate_name():
planet_name = ""
length = random.randint(2, 3)
initial = random.randint(0, size - 1)
while length > 0:
while 1 not in freq[initial]:
initial = random.randint(0, size - 1)
planet_name += syllables[initial]
initial = freq[initial].index(1)
length -= 1
suffix_index = random.randint(0, len(SUFFIXES) - 1)
planet_name += f" {SUFFIXES[suffix_index]}"
return (" ".join([s.capitalize() for s in planet_name.split(" ")])).strip()
if __name__ == "__main__":
print(generate_name())
| [
"dawsonren@gmail.com"
] | dawsonren@gmail.com |
60f89bcbfd5b5b577a09c1598ca2168403e00b62 | 136cbeb736cecca889dce4dc4abc601919b41a3c | /GUI/add_monster_popup.py | be4c83de807c4c4a52c013d0dea2d58f3cc68bfd | [] | no_license | LouisChen1013/CharacterManagementSystem | f756ac1817b577003f7bc52c1189d5d67fc8b357 | e710f04a2f95a8f4815088d9fe01168547b55fae | refs/heads/master | 2023-05-23T21:02:44.284701 | 2021-07-29T06:54:25 | 2021-07-29T06:54:25 | 210,401,012 | 0 | 0 | null | 2023-05-23T01:26:35 | 2019-09-23T16:20:29 | Python | UTF-8 | Python | false | false | 1,624 | py | import tkinter as tk
from tkinter import messagebox
import requests
import re
class AddMonsterPopup(tk.Frame):
""" Popup Frame to Add a Monster """
def __init__(self, parent, close_callback):
""" Constructor """
tk.Frame.__init__(self, parent)
self._close_cb = close_callback
self.grid(rowspan=2, columnspan=2)
tk.Label(self, text="Monster Type:").grid(row=1, column=1)
self._monster_type = tk.Entry(self)
self._monster_type.grid(row=1, column=2)
tk.Label(self, text="Monster AI Difficulty:").grid(row=2, column=1)
self._monster_ai_difficulty = tk.Entry(self)
self._monster_ai_difficulty.grid(row=2, column=2)
tk.Button(self, text="Submit", command=self._submit_cb).grid(
row=4, column=1)
tk.Button(self, text="Close", command=self._close_cb).grid(
row=4, column=2)
def _submit_cb(self):
""" Submit the Add Monster """
# Create the dictionary for the JSON request body
data = {}
data['monster_type'] = self._monster_type.get()
data['monster_ai_difficulty'] = self._monster_ai_difficulty.get()
data['type'] = "monster"
""" Adds a character to the backend server"""
headers = {"content-type": "application/json"}
response = requests.post(
"http://127.0.0.1:5000/server/characters", json=data, headers=headers)
if response.status_code == 200:
self._close_cb()
else:
messagebox.showerror(
"Error", "Add Monster Request Failed: " + response.text)
| [
"chenhonglin1013@gmail.com"
] | chenhonglin1013@gmail.com |
533b9efc9a2717c2a35f6dd0878ed0ee355c00d9 | 0cc2e39d4f288a4d07d2df5cc51b2d27037f3732 | /exercism/python/book-store/book_store.py | 144502255b204e733d633f3c73777cdc47bfc182 | [] | no_license | Sujan-Kandeepan/Exercism | 893aceb3f5b7a37b7d845dfdde3faa11fae60236 | 3491c660d7090627196c83c8f46c13cca9e0f856 | refs/heads/master | 2021-04-27T00:09:09.211345 | 2018-03-04T08:16:58 | 2018-03-04T08:16:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | def calculate_total(books):
def groupnums(num):
if num == 1:
return [[0], [1], [2]]
else:
returnlist = []
for i in groupnums(num-1):
returnlist.extend([i+[0], i+[1], i+[2]])
return returnlist
bestprice = len(books)*8
if books == []:
return 0.0
for i in groupnums(len(books)):
groups = [[], [], []]
for j in range(len(books)):
groups[i[j]].append(books[j])
if groups[0] == sorted(groups[0]) and groups[0] == list(set(groups[0])) and \
groups[1] == sorted(groups[1]) and groups[1] == list(set(groups[1])) and \
groups[2] == sorted(groups[2]) and groups[2] == list(set(groups[2])):
price = 0.0
for j in groups:
if len(j) == 5:
price += 30.0
elif len(j) == 4:
price += 25.6
elif len(j) == 3:
price += 21.6
elif len(j) == 2:
price += 15.2
elif len(j) == 1:
price += 8.0
if price < bestprice:
bestprice = price
return bestprice
| [
"kandeeps@mcmaster.ca"
] | kandeeps@mcmaster.ca |
e33fe0145613768d16866c5fc41bc2560e783bf5 | 70bee1e4e770398ae7ad9323bd9ea06f279e2796 | /test/test_istio_authorization_policy_source.py | d06474312ad2007728f5c1f1dbe3e96ba1395147 | [] | no_license | hi-artem/twistlock-py | c84b420b1e582b3c4cf3631eb72dac6d659d4746 | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | refs/heads/main | 2023-07-18T07:57:57.705014 | 2021-08-22T04:36:33 | 2021-08-22T04:36:33 | 398,637,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.istio_authorization_policy_source import IstioAuthorizationPolicySource # noqa: E501
from openapi_client.rest import ApiException
class TestIstioAuthorizationPolicySource(unittest.TestCase):
"""IstioAuthorizationPolicySource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IstioAuthorizationPolicySource
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.istio_authorization_policy_source.IstioAuthorizationPolicySource() # noqa: E501
if include_optional :
return IstioAuthorizationPolicySource(
namespaces = [
''
],
principals = [
''
]
)
else :
return IstioAuthorizationPolicySource(
)
def testIstioAuthorizationPolicySource(self):
"""Test IstioAuthorizationPolicySource"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"aakatev@virtru.com"
] | aakatev@virtru.com |
59891f64ba1120d54b73a4d999144edc28304be6 | 652cea9eea05f9baa54f93830746fda5db13af25 | /vscode_test.py | 7e87500081095e377099a6a5b0315ca4f1d65378 | [] | no_license | linbirg/qt | 091af74b0ac4527883c8c0451135d77bdc310d23 | 4cb85944cebf903f0a43d0c2883a0e4839dcad31 | refs/heads/master | 2020-07-23T03:36:40.889622 | 2019-10-28T06:13:34 | 2019-10-28T06:13:34 | 207,435,268 | 22 | 9 | null | null | null | null | UTF-8 | Python | false | false | 42,976 | py | # coding: utf-8
##### 下方代码为 IDE 运行必备代码 #####
import jqdata
if __name__ == '__main__':
import jqsdk
params = {
'token': '88e0627cf5b11e6e988637de1f3be8e8', # 在客户端系统设置中找,字符串格式,例如 'asdf...'
'algorithmId': 6, # 在客户端我的策略中,整数型,例如:1;回测结束后在客户端此ID策略的回测列表中找对应的回测结果
'baseCapital': 100000,
'frequency': 'day',
'startTime': '2017-06-01',
'endTime': '2017-08-01',
'name': "费雪选股",
}
jqsdk.run(params)
##### 下面是策略代码编辑部分 #####
# 克隆自聚宽文章:https://www.joinquant.com/post/7029
# 标题:小费雪选股法(终)
# 作者:小兵哥
#enable_profile()
import numpy as np
import talib
import pandas
import scipy as sp
import scipy.optimize
import datetime as dt
from scipy import linalg as sla
from scipy import spatial
# from jqdata import gta
from jqdata import jy as gta
from jqdata import *
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import statsmodels.api as sm
def initialize(context):
#用沪深 300 做回报基准
set_benchmark('000300.XSHG')
# 滑点、真实价格
set_slippage(FixedSlippage(0.000))
set_option('use_real_price', True)
# 关闭部分log
log.set_level('order', 'error')
run_daily(fun_main, '10:30')
def after_code_changed(context):
# 变量都挪到 after_code_changed 里
g.quantlib = quantlib()
# 策略起停标志位
g.quantlib.fun_set_var(context, 'algo_enable', True)
# 定义风险敞口
g.quantlib.fun_set_var(context, 'riskExposure', 0.03)
# 正态分布概率表,标准差倍数以及置信率
# 1.96, 95%; 2.06, 96%; 2.18, 97%; 2.34, 98%; 2.58, 99%; 5, 99.9999%
g.quantlib.fun_set_var(context, 'confidencelevel', 1.96)
# 调仓参数
g.quantlib.fun_set_var(context, 'hold_cycle', 30)
g.quantlib.fun_set_var(context, 'hold_periods', 0)
g.quantlib.fun_set_var(context, 'stock_list', [])
g.quantlib.fun_set_var(context, 'position_price', {})
g.quantlib.fun_set_var(context, 'recal_periods', 0)
g.quantlib.fun_set_var(context, 'version', 1.0)
if context.version < 1.0:
context.hold_periods = 0
context.riskExposure = 0.03
context.version = 1.0
def before_trading_start(context):
# 定义股票池
moneyfund = ['511880.XSHG','511010.XSHG','511220.XSHG']
# 上市不足 60 天的剔除掉
context.moneyfund = g.quantlib.fun_delNewShare(context, moneyfund, 60)
def fun_main(context):
# 引用 lib
g.value_factor = value_factor_lib()
# g.quantlib = quantlib()
context.msg = ""
# 检查是否需要调仓
rebalance_flag, context.position_price, context.hold_periods, msg = \
g.quantlib.fun_needRebalance('algo', context.moneyfund, context.stock_list, context.position_price, \
context.hold_periods, context.hold_cycle, 0.25)
context.msg += msg
statsDate = context.current_dt.date() - dt.timedelta(1)
#context.algo_enable, context.recal_periods, rebalance_flag = g.quantlib.fun_check_algo(context.algo_enable, context.recal_periods, rebalance_flag, statsDate)
trade_style = False # True 会交易进行类似 100股的买卖,False 则只有在仓位变动 >25% 的时候,才产生交易
if rebalance_flag:
stock_list = []
if context.algo_enable:
#获取坏股票列表,将会剔除
# bad_stock_list = g.quantlib.fun_get_bad_stock_list(statsDate)
# 低估值策略
value_factor_stock_list = g.value_factor.fun_get_stock_list(context, 5, statsDate, None)
stock_list = value_factor_stock_list
# 分配仓位
equity_ratio, bonds_ratio = g.quantlib.fun_assetAllocationSystem(stock_list, context.moneyfund, context.confidencelevel, statsDate)
risk_ratio = 0
if len(equity_ratio.keys()) >= 1:
risk_ratio = context.riskExposure / len(equity_ratio.keys())
# 分配头寸,根据预设的风险敞口,计算交易时的比例
position_ratio = g.quantlib.fun_calPosition(equity_ratio, bonds_ratio, 1.0, risk_ratio, context.moneyfund, context.portfolio.portfolio_value, context.confidencelevel, statsDate)
trade_style = True
context.stock_list = position_ratio.keys()
# 更新待购价格
context.position_price = g.quantlib.fun_update_positions_price(position_ratio)
# 卖掉已有且不在待购清单里的股票
for stock in context.portfolio.positions.keys():
if stock not in position_ratio:
position_ratio[stock] = 0
context.position_ratio = position_ratio
print(position_ratio)
# 调仓,执行交易
g.quantlib.fun_do_trade(context, context.position_ratio, context.moneyfund, trade_style)
class value_factor_lib():
def fun_get_stock_list(self, context, hold_number, statsDate=None, bad_stock_list=[]):
relative_ps = self.fun_get_relative_ps(context, statsDate)
low_ps = self.fun_get_low_ps(context, statsDate)
good_stock_list = list(set(relative_ps) & set(low_ps))
# 取净利润增长率为正的
df = g.quantlib.get_fundamentals_sum('income', income.net_profit, statsDate)
df = df.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df.rename(columns={'sum_value':'ttm_1y'}, inplace=True)
df1 = g.quantlib.get_fundamentals_sum('income', income.net_profit, (statsDate - dt.timedelta(365)))
df1 = df1.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df1.rename(columns={'sum_value':'ttm_2y'}, inplace=True)
df = df.merge(df1, on='code')
df = df.fillna(value=0)
df['inc_net_profit'] = 1.0*(df['ttm_1y'] - df['ttm_2y'])
df = df[df.inc_net_profit > 0]
inc_net_profit_list = list(df.code)
good_stock_list = list(set(good_stock_list) & set(inc_net_profit_list))
print(len(good_stock_list))
# 按行业取营业收入增长率前 1/3
df = g.quantlib.get_fundamentals_sum('income', income.operating_revenue, statsDate)
df = df.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df.rename(columns={'sum_value':'ttm_1y'}, inplace=True)
df1 = g.quantlib.get_fundamentals_sum('income', income.operating_revenue, (statsDate - dt.timedelta(365)))
df1 = df1.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df1.rename(columns={'sum_value':'ttm_2y'}, inplace=True)
df = df.merge(df1, on='code')
df = df.fillna(value=0)
df['inc_operating_revenue'] = 1.0*(df['ttm_1y'] - df['ttm_2y']) / abs(df['ttm_2y'])
df = df.fillna(value = 0)
industry_list = g.quantlib.fun_get_industry(cycle=None)
#industry_list = g.quantlib.fun_get_industry_levelI()
inc_operating_revenue_list = []
for industry in industry_list:
stock_list = g.quantlib.fun_get_industry_stocks(industry, 2, statsDate)
df_inc_operating_revenue = df[df.code.isin(stock_list)]
df_inc_operating_revenue = df_inc_operating_revenue.sort_values(by='inc_operating_revenue', ascending=False)
inc_operating_revenue_list = inc_operating_revenue_list + list(df_inc_operating_revenue[:int(len(df_inc_operating_revenue)*0.33)].code)
good_stock_list = list(set(good_stock_list) & set(inc_operating_revenue_list))
print(len(good_stock_list))
# 指标剔除资产负债率相对行业最高的1/3的股票
df = get_fundamentals(query(balance.code, balance.total_liability, balance.total_assets), date = statsDate)
df = df.fillna(value=0)
df['liability_ratio'] = 1.0*(df['total_liability'] / df['total_assets'])
industry_list = g.quantlib.fun_get_industry(cycle=None)
#industry_list = g.quantlib.fun_get_industry_levelI()
liability_ratio_list = []
for industry in industry_list:
stock_list = g.quantlib.fun_get_industry_stocks(industry, 2, statsDate)
df_liability_ratio = df[df.code.isin(stock_list)]
df_liability_ratio = df_liability_ratio.sort_values(by='liability_ratio', ascending=True)
liability_ratio_list = liability_ratio_list + list(df_liability_ratio[:int(len(df_liability_ratio)*0.66)].code)
good_stock_list = list(set(good_stock_list) & set(liability_ratio_list))
# 剔除净利润率相对行业最低的1/3的股票;
df = get_fundamentals(query(indicator.code, indicator.net_profit_to_total_revenue ), date = statsDate)
df = df.fillna(value=0)
industry_list = g.quantlib.fun_get_industry(cycle=None)
#industry_list = g.quantlib.fun_get_industry_levelI()
profit_ratio_list = []
for industry in industry_list:
stock_list = g.quantlib.fun_get_industry_stocks(industry, 2, statsDate)
df_profit_ratio = df[df.code.isin(stock_list)]
df_profit_ratio = df_profit_ratio.sort_values('net_profit_to_total_revenue', ascending=False)
profit_ratio_list = profit_ratio_list + list(df_profit_ratio[:int(len(df_profit_ratio)*0.66)].code)
good_stock_list = list(set(good_stock_list) & set(profit_ratio_list))
stock_list = []
for stock in relative_ps:
#for stock in low_ps:
if stock in good_stock_list:
stock_list.append(stock)
print(len(good_stock_list))
positions_list = context.portfolio.positions.keys()
stock_list = g.quantlib.unpaused(stock_list, positions_list)
stock_list = g.quantlib.remove_st(stock_list, statsDate)
stock_list = g.quantlib.fun_delNewShare(context, stock_list, 30)
stock_list = stock_list[:hold_number*10]
stock_list = g.quantlib.remove_bad_stocks(stock_list, bad_stock_list)
stock_list = g.quantlib.remove_limit_up(stock_list, positions_list)
stock_list = g.quantlib.fun_diversity_by_industry(stock_list, int(hold_number*0.4), statsDate)
return stock_list[:hold_number]
def fun_get_relative_ps(self, context, statsDate=None):
def __fun_get_ps(statsDate, deltamonth):
__df = get_fundamentals(query(valuation.code, valuation.ps_ratio), date = (statsDate - dt.timedelta(30*deltamonth)))
__df.rename(columns={'ps_ratio':deltamonth}, inplace=True)
return __df
for i in range(48):
df1 = __fun_get_ps(statsDate, i)
if i == 0:
df = df1
else:
df = df.merge(df1, on='code')
df.index = list(df['code'])
df = df.drop(['code'], axis=1)
df = df.fillna(value=0, axis=0)
# 1. 计算相对市收率,相对市收率等于个股市收率除以全市场的市收率,这样处理的目的是为了剔除市场估值变化的影响
for i in range(len(df.columns)):
s = df.iloc[:,i]
median = s.median()
df.iloc[:,i] = s / median
length, stock_list, stock_dict = len(df), list(df.index), {}
# 2. 计算相对市收率N个月的移动平均值的N个月的标准差,并据此计算布林带上下轨(N个月的移动平均值+/-N个月移动平均的标准差)。N = 24
for i in range(length):
s = df.iloc[i,:]
if s.min() < 0:
pass
else:
# tmp_list 是24个月的相对市收率均值
tmp_list = []
for j in range(24):
tmp_list.append(s[j:j+24].mean())
# mean_value 是最近 24个月的相对市收率均值
mean_value = tmp_list[0]
# std_value 是相对市收率24个月的移动平均值的24个月的标准差
std_value = np.std(tmp_list)
tmp_dict = {}
# (mean_value - std_value),是布林线下轨(此处定义和一般布林线不一样,一般是 均线 - 2 倍标准差)
'''
研报原始的策略,选择 s[0] < mean_value - std_value 的标的,但因为 ps_ratio十分不稳定,跳跃很大,此区间里的测试结果非常不稳定
本策略退而求其次,选择均线-1倍标准差 和 均线 - 2 倍标准差之间的标的
大致反映策略的有效性
'''
if s[0] > (mean_value - 2.0*std_value) and s[0] < mean_value:
# 记录 相对市收率均值 / 当期相对市收率
stock_dict[stock_list[i]] = (1.0*mean_value/s[0])
stock_list = []
dict_score = stock_dict
dict_score = sorted(dict_score.items(), key=lambda d:d[1], reverse=True)
for idx in dict_score:
stock = idx[0]
stock_list.append(stock)
return stock_list
def fun_get_low_ps(self, context, statsDate=None):
df = get_fundamentals(
query(valuation.code, valuation.ps_ratio),
date = statsDate
)
# 根据 sp 去极值、中性化、标准化后,跨行业选最佳的标的
industry_list = g.quantlib.fun_get_industry(cycle=None)
df = df.fillna(value = 0)
sp_ratio = {}
df['SP'] = 1.0/df['ps_ratio']
df = df.drop(['ps_ratio'], axis=1)
for industry in industry_list:
tmpDict = g.quantlib.fun_get_factor(df, 'SP', industry, 2, statsDate).to_dict()
for stock in tmpDict.keys():
if stock in sp_ratio:
if sp_ratio[stock] < tmpDict[stock]:
sp_ratio[stock] = tmpDict[stock]
else:
sp_ratio[stock] = tmpDict[stock]
dict_score = sorted(sp_ratio.items(), key=lambda d:d[1], reverse=True)
stock_list = []
for idx in dict_score:
stock = idx[0]
stock_list.append(stock)
return stock_list[:int(len(stock_list)*0.5)]
# class quantlib():
# def get_fundamentals_sum(self, table_name='indicator', search='indicator.adjusted_profit', statsDate=None):
# # 取最近的五个季度财报的日期
# def __get_quarter(table_name, statsDate):
# '''
# 返回最近 n 个财报的日期
# 返回每个股票最近一个财报的日期
# '''
# # 取最新一季度的统计日期
# if table_name == 'indicator':
# q = query(indicator.code, indicator.statDate)
# elif table_name == 'income':
# q = query(income.code, income.statDate)
# elif table_name == 'cash_flow':
# q = query(cash_flow.code, cash_flow.statDate)
# elif table_name == 'balance':
# q = query(balance.code, balance.statDate)
# df = get_fundamentals(q, date = statsDate)
# stock_last_statDate = {}
# tmpDict = df.to_dict()
# for i in range(len(tmpDict['statDate'].keys())):
# # 取得每个股票的代码,以及最新的财报发布日
# stock_last_statDate[tmpDict['code'][i]] = tmpDict['statDate'][i]
# df = df.sort_values(by='statDate', ascending=False)
# # 取得最新的财报日期
# last_statDate = df.iloc[0,1]
# this_year = int(str(last_statDate)[0:4])
# this_month = str(last_statDate)[5:7]
# if this_month == '12':
# last_quarter = str(this_year) + 'q4'
# last_two_quarter = str(this_year) + 'q3'
# last_three_quarter = str(this_year) + 'q2'
# last_four_quarter = str(this_year) + 'q1'
# last_five_quarter = str(this_year - 1) + 'q4'
# elif this_month == '09':
# last_quarter = str(this_year) + 'q3'
# last_two_quarter = str(this_year) + 'q2'
# last_three_quarter = str(this_year) + 'q1'
# last_four_quarter = str(this_year - 1) + 'q4'
# last_five_quarter = str(this_year - 1) + 'q3'
# elif this_month == '06':
# last_quarter = str(this_year) + 'q2'
# last_two_quarter = str(this_year) + 'q1'
# last_three_quarter = str(this_year - 1) + 'q4'
# last_four_quarter = str(this_year - 1) + 'q3'
# last_five_quarter = str(this_year - 1) + 'q2'
# else: #this_month == '03':
# last_quarter = str(this_year) + 'q1'
# last_two_quarter = str(this_year - 1) + 'q4'
# last_three_quarter = str(this_year - 1) + 'q3'
# last_four_quarter = str(this_year - 1) + 'q2'
# last_five_quarter = str(this_year - 1) + 'q1'
# return last_quarter, last_two_quarter, last_three_quarter, last_four_quarter, last_five_quarter, stock_last_statDate
# # 查财报,返回指定值
# def __get_fundamentals_value(table_name, search, myDate):
# '''
# 输入查询日期
# 返回指定的财务数据,格式 dict
# '''
# if table_name == 'indicator':
# q = query(indicator.code, search, indicator.statDate)
# elif table_name == 'income':
# q = query(income.code, search, income.statDate)
# elif table_name == 'cash_flow':
# q = query(cash_flow.code, search, cash_flow.statDate)
# elif table_name == 'balance':
# q = query(balance.code, search, balance.statDate)
# df = get_fundamentals(q, statDate = myDate).fillna(value=0)
# tmpDict = df.to_dict()
# stock_dict = {}
# name = str(search).split('.')[-1]
# for i in range(len(tmpDict['statDate'].keys())):
# tmpList = []
# tmpList.append(tmpDict['statDate'][i])
# tmpList.append(tmpDict[name][i])
# stock_dict[tmpDict['code'][i]] = tmpList
# return stock_dict
# # 得到最近 n 个季度的统计时间
# last_quarter, last_two_quarter, last_three_quarter, last_four_quarter, last_five_quarter, stock_last_statDate = __get_quarter(table_name, statsDate)
# last_quarter_dict = __get_fundamentals_value(table_name, search, last_quarter)
# last_two_quarter_dict = __get_fundamentals_value(table_name, search, last_two_quarter)
# last_three_quarter_dict = __get_fundamentals_value(table_name, search, last_three_quarter)
# last_four_quarter_dict = __get_fundamentals_value(table_name, search, last_four_quarter)
# last_five_quarter_dict = __get_fundamentals_value(table_name, search, last_five_quarter)
# tmp_list = []
# stock_list = stock_last_statDate.keys()
# for stock in stock_list:
# tmp_dict = {}
# tmp_dict['code'] = stock
# value_list = []
# if stock in last_quarter_dict:
# if stock_last_statDate[stock] == last_quarter_dict[stock][0]:
# value_list.append(last_quarter_dict[stock][1])
# if stock in last_two_quarter_dict:
# value_list.append(last_two_quarter_dict[stock][1])
# if stock in last_three_quarter_dict:
# value_list.append(last_three_quarter_dict[stock][1])
# if stock in last_four_quarter_dict:
# value_list.append(last_four_quarter_dict[stock][1])
# if stock in last_five_quarter_dict:
# value_list.append(last_five_quarter_dict[stock][1])
# for i in range(4 - len(value_list)):
# value_list.append(0)
# tmp_dict['0Q'] = value_list[0]
# tmp_dict['1Q'] = value_list[1]
# tmp_dict['2Q'] = value_list[2]
# tmp_dict['3Q'] = value_list[3]
# tmp_dict['sum_value'] = value_list[0] + value_list[1] + value_list[2] + value_list[3]
# tmp_list.append(tmp_dict)
# df = pd.DataFrame(tmp_list)
# return df
# def fun_set_var(self, context, var_name, var_value):
# if var_name not in dir(context):
# setattr(context, var_name, var_value)
# def fun_check_price(self, algo_name, stock_list, position_price, gap_trigger):
# flag = False
# msg = ""
# if stock_list:
# h = history(1, '1d', 'close', stock_list, df=False)
# for stock in stock_list:
# curPrice = h[stock][0]
# if stock not in position_price:
# position_price[stock] = curPrice
# oldPrice = position_price[stock]
# if oldPrice != 0:
# deltaprice = abs(curPrice - oldPrice)
# if deltaprice / oldPrice > gap_trigger:
# msg = algo_name + "需要调仓: " + stock + ",现价: " + str(curPrice) + " / 原价格: " + str(oldPrice) + "\n"
# flag = True
# return flag, position_price, msg
# return flag, position_price, msg
# def fun_needRebalance(self, algo_name, moneyfund, stock_list, position_price, hold_periods, hold_cycle, gap_trigger):
# msg = ""
# rebalance_flag = False
# stocks_count = 0
# for stock in stock_list:
# if stock not in moneyfund:
# stocks_count += 1
# if stocks_count == 0:
# msg += algo_name + "调仓,因为持股数为 0 \n"
# rebalance_flag = True
# elif hold_periods == 0:
# msg += algo_name + "调仓,因为持股天数剩余为 0 \n"
# rebalance_flag = True
# if not rebalance_flag:
# rebalance_flag, position_price, msg2 = self.fun_check_price(algo_name, stock_list, position_price, gap_trigger)
# msg += msg2
# if rebalance_flag:
# hold_periods = hold_cycle
# else:
# hold_periods -= 1
# msg += algo_name + "离下次调仓还剩 " + str(hold_periods) + " 天\n"
# return rebalance_flag, position_price, hold_periods, msg
# # 更新持有股票的价格,每次调仓后跑一次
# def fun_update_positions_price(self, ratio):
# position_price = {}
# if ratio:
# h = history(1, '1m', 'close', ratio.keys(), df=False)
# for stock in ratio.keys():
# if ratio[stock] > 0:
# position_price[stock] = round(h[stock][0], 3)
# return position_price
# def fun_assetAllocationSystem(self, stock_list, moneyfund, confidencelevel, statsDate=None):
# def __fun_getEquity_ratio(__stocklist, confidencelevel, type, limit_up=1.0, limit_low=0.0, statsDate=None):
# __ratio = {}
# if __stocklist:
# if type == 1: #风险平价 历史模拟法
# # 正态分布概率表,标准差倍数以及置信率
# # 1.96, 95%; 2.06, 96%; 2.18, 97%; 2.34, 98%; 2.58, 99%; 5, 99.9999%
# __ratio = self.fun_calStockWeight_by_risk(confidencelevel, __stocklist, limit_up, limit_low, statsDate)
# elif type == 2: #马科维奇
# __ratio = self.fun_calStockWeight(__stocklist, limit_up, limit_low)
# elif type == 3: #最小方差
# __ratio = self.fun_cal_Weight_by_minvar(__stocklist, limit_up, limit_low)
# elif type == 5: # 风险平价 方差-协方差法
# __ratio = self.fun_calWeight_by_RiskParity(__stocklist, statsDate)
# else: #等权重
# for stock in __stocklist:
# __ratio[stock] = 1.0/len(__stocklist)
# return __ratio
# if stock_list:
# limit_up, limit_low = round(2.0/len(list(set(stock_list))), 4), round(0.5/len(list(set(stock_list))), 4)
# equity_ratio = __fun_getEquity_ratio(stock_list, confidencelevel, 0, limit_up, limit_low, statsDate)
# else:
# equity_ratio = {}
# bonds_ratio = __fun_getEquity_ratio(moneyfund, confidencelevel, 0, 1.0, 0.0, statsDate)
# return equity_ratio, bonds_ratio
# def fun_calPosition(self, equity_ratio, bonds_ratio, algo_ratio, risk_ratio, moneyfund, portfolio_value, confidencelevel, statsDate=None):
# '''
# equity_ratio 资产配仓结果
# bonds_ratio 债券配仓结果
# algo_ratio 策略占市值的百分比
# risk_ratio 每个标的承受的风险系数
# '''
# trade_ratio = equity_ratio # 简化
# return trade_ratio
# # 去极值
# def fun_winsorize(self, rs, type, num):
# # rs为Series化的数据
# rs = rs.dropna().copy()
# low_line, up_line = 0, 0
# if type == 1: # 标准差去极值
# mean = rs.mean()
# #取极值
# mad = num*rs.std()
# up_line = mean + mad
# low_line = mean - mad
# elif type == 2: #中位值去极值
# rs = rs.replace([-np.inf, np.inf], np.nan)
# median = rs.median()
# md = abs(rs - median).median()
# mad = md * num * 1.4826
# up_line = median + mad
# low_line = median - mad
# elif type == 3: #Boxplot 去极值
# if len(rs) < 2:
# return rs
# mc = sm.stats.stattools.medcouple(rs)
# rs.sort()
# q1 = rs[int(0.25*len(rs))]
# q3 = rs[int(0.75*len(rs))]
# iqr = q3-q1
# if mc >= 0:
# low_line = q1-1.5*np.exp(-3.5*mc)*iqr
# up_line = q3+1.5*np.exp(4*mc)*iqr
# else:
# low_line = q1-1.5*np.exp(-4*mc)*iqr
# up_line = q3+1.5*np.exp(3.5*mc)*iqr
# rs[rs < low_line] = low_line
# rs[rs > up_line] = up_line
# return rs
# #标准化
# def fun_standardize(self, s,type):
# '''
# s为Series数据
# type为标准化类型:1 MinMax,2 Standard,3 maxabs
# '''
# data=s.dropna().copy()
# if int(type)==1:
# rs = (data - data.min())/(data.max() - data.min())
# elif type==2:
# rs = (data - data.mean())/data.std()
# elif type==3:
# rs = data/10**np.ceil(np.log10(data.abs().max()))
# return rs
# #中性化
# def fun_neutralize(self, s, df, module='pe_ratio', industry_type=None, level=2, statsDate=None):
# '''
# 参数:
# s为stock代码 如'000002.XSHE' 可为list,可为str
# moduel:中性化的指标 默认为PE
# industry_type:行业类型(可选), 如果行业不指定,全市场中性化
# 返回:
# 中性化后的Series index为股票代码 value为中性化后的值
# '''
# s = df[df.code.isin(list(s))]
# s = s.reset_index(drop = True)
# s = pd.Series(s[module].values, index=s['code'])
# s = self.fun_winsorize(s,1,3)
# if industry_type:
# stocks = self.fun_get_industry_stocks(industry=industry_type, level=level, statsDate=statsDate)
# else:
# stocks = list(get_all_securities(['stock'], date=statsDate).index)
# df = df[df.code.isin(stocks)]
# df = df.reset_index(drop = True)
# df = pd.Series(df[module].values, index=df['code'])
# df = self.fun_winsorize(df,1, 3)
# rs = (s - df.mean())/df.std()
# return rs
# def fun_get_factor(self, df, factor_name, industry, level, statsDate):
# stock_list = self.fun_get_industry_stocks(industry, level, statsDate)
# rs = self.fun_neutralize(stock_list, df, module=factor_name, industry_type=industry, level=level, statsDate=statsDate)
# rs = self.fun_standardize(rs, 2)
# return rs
# def fun_diversity_by_industry(self, stock_list, max_num, statsDate):
# if not stock_list:
# return stock_list
# industry_list = self.fun_get_industry(cycle=None)
# tmpList = []
# for industry in industry_list:
# i = 0
# stocks = self.fun_get_industry_stocks(industry, 2, statsDate)
# for stock in stock_list:
# if stock in stocks: #by 行业选入 top max_num 的标的(如有)
# i += 1
# if i <= max_num:
# tmpList.append(stock) #可能一个股票横跨多个行业,会导致多次入选,但不影响后面计算
# final_stocks = []
# for stock in stock_list:
# if stock in tmpList:
# final_stocks.append(stock)
# return final_stocks
# # 根据行业取股票列表
# def fun_get_industry_stocks(self, industry, level=2, statsDate=None):
# if level == 2:
# stock_list = get_industry_stocks(industry, statsDate)
# elif level == 1:
# industry_list = self.fun_get_industry_levelI(industry)
# stock_list = []
# for industry_code in industry_list:
# tmpList = get_industry_stocks(industry_code, statsDate)
# stock_list = stock_list + tmpList
# stock_list = list(set(stock_list))
# else:
# stock_list = []
# return stock_list
# # 一级行业列表
# def fun_get_industry_levelI(self, industry=None):
# industry_dict = {
# 'A':['A01', 'A02', 'A03', 'A04', 'A05'] #农、林、牧、渔业
# ,'B':['B06', 'B07', 'B08', 'B09', 'B11'] #采矿业
# ,'C':['C13', 'C14', 'C15', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26', 'C27', 'C28', 'C29', 'C30', 'C31', 'C32',\
# 'C33', 'C34', 'C35', 'C36', 'C37', 'C38', 'C39', 'C40', 'C41', 'C42'] #制造业
# ,'D':['D44', 'D45', 'D46'] #电力、热力、燃气及水生产和供应业
# ,'E':['E47', 'E48', 'E50'] #建筑业
# ,'F':['F51', 'F52'] #批发和零售业
# ,'G':['G53', 'G54', 'G55', 'G56', 'G58', 'G59'] #交通运输、仓储和邮政业
# ,'H':['H61', 'H62'] #住宿和餐饮业
# ,'I':['I63', 'I64', 'I65'] #信息传输、软件和信息技术服务业
# ,'J':['J66', 'J67', 'J68', 'J69'] #金融业
# ,'K':['K70'] #房地产业
# ,'L':['L71', 'L72'] #租赁和商务服务业
# ,'M':['M73', 'M74'] #科学研究和技术服务业
# ,'N':['N78'] #水利、环境和公共设施管理业
# #,'O':[] #居民服务、修理和其他服务业
# ,'P':['P82'] #教育
# ,'Q':['Q83'] #卫生和社会工作
# ,'R':['R85', 'R86', 'R87'] #文化、体育和娱乐业
# ,'S':['S90'] #综合
# }
# if industry == None:
# return industry_dict
# else:
# return industry_dict[industry]
# # 行业列表
# def fun_get_industry(self, cycle=None):
# # cycle 的参数:None取所有行业,True取周期性行业,False取非周期性行业
# industry_dict = {
# 'A01':False,# 农业 1993-09-17
# 'A02':False,# 林业 1996-12-06
# 'A03':False,# 畜牧业 1997-06-11
# 'A04':False,# 渔业 1993-05-07
# 'A05':False,# 农、林、牧、渔服务业 1997-05-30
# 'B06':True, # 煤炭开采和洗选业 1994-01-06
# 'B07':True, # 石油和天然气开采业 1996-06-28
# 'B08':True, # 黑色金属矿采选业 1997-07-08
# 'B09':True, # 有色金属矿采选业 1996-03-20
# 'B11':True, # 开采辅助活动 2002-02-05
# 'C13':False, # 农副食品加工业 1993-12-15
# 'C14':False,# 食品制造业 1994-08-18
# 'C15':False,# 酒、饮料和精制茶制造业 1992-10-12
# 'C17':True,# 纺织业 1992-06-16
# 'C18':True,# 纺织服装、服饰业 1993-12-31
# 'C19':True,# 皮革、毛皮、羽毛及其制品和制鞋业 1994-04-04
# 'C20':False,# 木材加工及木、竹、藤、棕、草制品业 2005-05-10
# 'C21':False,# 家具制造业 1996-04-25
# 'C22':False,# 造纸及纸制品业 1993-03-12
# 'C23':False,# 印刷和记录媒介复制业 1994-02-24
# 'C24':False,# 文教、工美、体育和娱乐用品制造业 2007-01-10
# 'C25':True, # 石油加工、炼焦及核燃料加工业 1993-10-25
# 'C26':True, # 化学原料及化学制品制造业 1990-12-19
# 'C27':False,# 医药制造业 1993-06-29
# 'C28':True, # 化学纤维制造业 1993-07-28
# 'C29':True, # 橡胶和塑料制品业 1992-08-28
# 'C30':True, # 非金属矿物制品业 1992-02-28
# 'C31':True, # 黑色金属冶炼及压延加工业 1994-01-06
# 'C32':True, # 有色金属冶炼和压延加工业 1996-02-15
# 'C33':True, # 金属制品业 1993-11-30
# 'C34':True, # 通用设备制造业 1992-03-27
# 'C35':True, # 专用设备制造业 1992-07-01
# 'C36':True, # 汽车制造业 1992-07-24
# 'C37':True, # 铁路、船舶、航空航天和其它运输设备制造业 1992-03-31
# 'C38':True, # 电气机械及器材制造业 1990-12-19
# 'C39':False,# 计算机、通信和其他电子设备制造业 1990-12-19
# 'C40':False,# 仪器仪表制造业 1993-09-17
# 'C41':True, # 其他制造业 1992-08-14
# 'C42':False,# 废弃资源综合利用业 2012-10-26
# 'D44':True, # 电力、热力生产和供应业 1993-04-16
# 'D45':False,# 燃气生产和供应业 2000-12-11
# 'D46':False,# 水的生产和供应业 1994-02-24
# 'E47':True, # 房屋建筑业 1993-04-29
# 'E48':True, # 土木工程建筑业 1994-01-28
# 'E50':True, # 建筑装饰和其他建筑业 1997-05-22
# 'F51':False,# 批发业 1992-05-06
# 'F52':False,# 零售业 1992-09-02
# 'G53':True, # 铁路运输业 1998-05-11
# 'G54':True, # 道路运输业 1991-01-14
# 'G55':True, # 水上运输业 1993-11-19
# 'G56':True, # 航空运输业 1997-11-05
# 'G58':True, # 装卸搬运和运输代理业 1993-05-05
# 'G59':False,# 仓储业 1996-06-14
# 'H61':False,# 住宿业 1993-11-18
# 'H62':False,# 餐饮业 1997-04-30
# 'I63':False,# 电信、广播电视和卫星传输服务 1992-12-02
# 'I64':False,# 互联网和相关服务 1992-05-07
# 'I65':False,# 软件和信息技术服务业 1992-08-20
# 'J66':True, # 货币金融服务 1991-04-03
# 'J67':True, # 资本市场服务 1994-01-10
# 'J68':True, # 保险业 2007-01-09
# 'J69':True, # 其他金融业 2012-10-26
# 'K70':True, # 房地产业 1992-01-13
# 'L71':False,# 租赁业 1997-01-30
# 'L72':False,# 商务服务业 1996-08-29
# 'M73':False,# 研究和试验发展 2012-10-26
# 'M74':True, # 专业技术服务业 2007-02-15
# 'N77':False,# 生态保护和环境治理业 2012-10-26
# 'N78':False,# 公共设施管理业 1992-08-07
# 'P82':False,# 教育 2012-10-26
# 'Q83':False,# 卫生 2007-02-05
# 'R85':False,# 新闻和出版业 1992-12-08
# 'R86':False,# 广播、电视、电影和影视录音制作业 1994-02-24
# 'R87':False,# 文化艺术业 2012-10-26
# 'S90':False,# 综合 1990-12-10
# }
# industry_list = []
# if cycle == True:
# for industry in industry_dict.keys():
# if industry_dict[industry] == True:
# industry_list.append(industry)
# elif cycle == False:
# for industry in industry_dict.keys():
# if industry_dict[industry] == False:
# industry_list.append(industry)
# else:
# industry_list = industry_dict.keys()
# return industry_list
# def fun_do_trade(self, context, trade_ratio, moneyfund, trade_style):
# def __fun_tradeBond(context, stock, curPrice, Value):
# curValue = float(context.portfolio.positions[stock].total_amount * curPrice)
# deltaValue = abs(Value - curValue)
# if deltaValue > (curPrice*200):
# if Value > curValue:
# cash = context.portfolio.cash
# if cash > (curPrice*200):
# self.fun_trade(context, stock, Value)
# else:
# self.fun_trade(context, stock, Value)
# def __fun_tradeStock(context, curPrice, stock, ratio, trade_style):
# total_value = context.portfolio.portfolio_value
# if stock in moneyfund:
# __fun_tradeBond(context, stock, curPrice, total_value * ratio)
# else:
# curValue = context.portfolio.positions[stock].total_amount * curPrice
# Quota = total_value * ratio
# if Quota:
# if abs(Quota - curValue) / Quota >= 0.25 or trade_style:
# if Quota > curValue:
# #if curPrice > context.portfolio.positions[stock].avg_cost:
# self.fun_trade(context, stock, Quota)
# else:
# self.fun_trade(context, stock, Quota)
# else:
# if curValue > 0:
# self.fun_trade(context, stock, Quota)
# trade_list = trade_ratio.keys()
# myholdstock = context.portfolio.positions.keys()
# stock_list = list(set(trade_list).union(set(myholdstock)))
# total_value = context.portfolio.portfolio_value
# # 已有仓位
# holdDict = {}
# h = history(1, '1d', 'close', stock_list, df=False)
# for stock in myholdstock:
# tmp = (context.portfolio.positions[stock].total_amount * h[stock])/total_value
# # print('w:',tmp)
# tmpW = round(tmp[0], 2)
# holdDict[stock] = float(tmpW)
# # 对已有仓位做排序已有仓位做排序
# tmpDict = {}
# for stock in holdDict:
# if stock in trade_ratio:
# tmpDict[stock] = round((trade_ratio[stock] - holdDict[stock]), 2)
# tradeOrder = sorted(tmpDict.items(), key=lambda d:d[1], reverse=False)
# # 交易已有仓位的股票,从减仓的开始,腾空现金
# _tmplist = []
# for idx in tradeOrder:
# stock = idx[0]
# __fun_tradeStock(context, h[stock][-1], stock, trade_ratio[stock], trade_style)
# _tmplist.append(stock)
# # 交易新股票
# # for i in range(len(trade_list)):
# for stock in trade_list:
# # stock = trade_list[i]
# if len(_tmplist) != 0 :
# if stock not in _tmplist:
# __fun_tradeStock(context, h[stock][-1], stock, trade_ratio[stock], trade_style)
# else:
# __fun_tradeStock(context, h[stock][-1], stock, trade_ratio[stock], trade_style)
# def unpaused(self, stock_list, positions_list):
# current_data = get_current_data()
# tmpList = []
# for stock in stock_list:
# if not current_data[stock].paused or stock in positions_list:
# tmpList.append(stock)
# return tmpList
# def remove_st(self, stock_list, statsDate):
# current_data = get_current_data()
# return [s for s in stock_list if not current_data[s].is_st]
# def remove_limit_up(self, stock_list, positions_list):
# h = history(1, '1m', 'close', stock_list, df=False, skip_paused=False, fq='pre')
# h2 = history(1, '1m', 'high_limit', stock_list, df=False, skip_paused=False, fq='pre')
# tmpList = []
# for stock in stock_list:
# if h[stock][0] < h2[stock][0] or stock in positions_list:
# tmpList.append(stock)
# return tmpList
# def fun_get_bad_stock_list(self, statsDate):
# #0、剔除商誉占比 > 10% 的股票
# df = get_fundamentals(
# query(valuation.code, balance.good_will, balance.equities_parent_company_owners),
# date = statsDate
# )
# df = df.fillna(value = 0)
# df['good_will_ratio'] = 1.0*df['good_will'] / df['equities_parent_company_owners']
# list_good_will = list(df[df.good_will_ratio > 0.1].code)
# bad_stocks = list_good_will
# bad_stocks = list(set(bad_stocks))
# return bad_stocks
# def remove_bad_stocks(self, stock_list, bad_stock_list):
# tmpList = []
# for stock in stock_list:
# if stock not in bad_stock_list:
# tmpList.append(stock)
# return tmpList
# # 剔除上市时间较短的产品
# def fun_delNewShare(self, context, equity, deltaday):
# deltaDate = context.current_dt.date() - dt.timedelta(deltaday)
# tmpList = []
# for stock in equity:
# if get_security_info(stock).start_date < deltaDate:
# tmpList.append(stock)
# return tmpList
# def fun_trade(self, context, stock, value):
# self.fun_setCommission(context, stock)
# order_target_value(stock, value)
# def fun_setCommission(self, context, stock):
# if stock in context.moneyfund:
# set_order_cost(OrderCost(open_tax=0, close_tax=0, open_commission=0, close_commission=0, close_today_commission=0, min_commission=0), type='fund')
# else:
# set_order_cost(OrderCost(open_tax=0, close_tax=0.001, open_commission=0.0003, close_commission=0.0003, close_today_commission=0, min_commission=5), type='stock')
| [
"linbirg@gmail.com"
] | linbirg@gmail.com |
fb68c7639379c040a8111bd9b8d7448a5fe7d37d | 607b41ce463b9941fd6ae4fcaa46e0e9abab38f7 | /auth.py | 6206a845d3a1541c590a89ddbf7030aa688a1c7b | [
"MIT"
] | permissive | nislag/chat-bot-tornado | 45ab3ed2bd25f97bcefb0957a95a61b9f0fa5b2a | def046f5add0773362b547314e7dc3fdea74331f | refs/heads/master | 2021-01-10T05:43:46.960470 | 2015-06-04T13:38:33 | 2015-06-04T13:38:33 | 36,575,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,720 | py | # coding=UTF-8
# Tornado modules.
import tornado.auth
import tornado.web
import tornado.escape
# Import application modules.
from base import BaseHandler
# General modules.
import logging
class LoginHandler(BaseHandler, tornado.auth.GoogleOAuth2Mixin):
"""
Handler for logins with Google Open ID / OAuth
http://www.tornadoweb.org/documentation/auth.html#google
"""
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
elif self.get_argument("start_google_oauth", None):
# Set users attributes to ask for.
ax_attrs = ['name', 'email', 'language', 'username']
self.authenticate_redirect(ax_attrs=ax_attrs)
elif self.get_argument("start_direct_auth", None):
# Get form inputs.
try:
user = dict()
user["email"] = self.get_argument("email", default="")
user["name"] = self.get_argument("name", default="")
except:
# Send an error back to client.
content = "<p>There was an input error. Fill in all fields!</p>"
self.render_default("index.html", content=content)
# If user has not filled in all fields.
if not user["email"] or not user["name"]:
content = ('<h2>2. Direct Login</h2>'
+ '<p>Fill in both fields!</p>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name" value="' + str(user["name"]) + '"> '
+ '<input class="form-control" type="text" name="email" placeholder="Your Email" value="' + str(user["email"]) + '"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
self.render_default("index.html", content=content)
elif str(user.get("name")) == "Bot":
content = ('<h2>2. Direct Login</h2>'
+ '<p>Incorrect name Bot, write another!</p>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name" value="' + str(user["name"]) + '"> '
+ '<input class="form-control" type="text" name="email" placeholder="Your Email" value="' + str(user["email"]) + '"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
self.render_default("index.html", content=content)
# All data given. Log user in!
else:
self._on_auth(user)
else:
# Logins.
content = '<div class="page-header"><h1>Login</h1></div>'
content += ('<h2>1. Google Login</h2>'
+ '<form action="/login" method="get">'
+ '<input type="hidden" name="start_google_oauth" value="1">'
+ '<input type="submit" class="btn" value="Sign in with Google">'
+ '</form>')
content += ('<h2>2. Direct Login</h2>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name"> '
+ '<input class="form-control" type="text" name="email" placeholder="Your Email"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
content += ('<h2>Instructions</h2>'
+ '<div>'
+ '<p>There are 3 base rooms in Chat: Main, Rooms, Help. You can`t write in Rooms and Help.</p>'
+ '<p>If you want to change/create room you have to change url in your browser, after "http..../room/newroom"</p>'
+ '<p>There is command Bot in Chat. You can`t take name "Bot" but can give him commands.</p>'
+ '<p>!news - Bot will write 10 last news from news.ycombinator.com</p>'
+ '<p>!duck word - write 10 search resuts this word by duckduckgo</p>'
+ '<p>!sum numb1 numb2 ... - write sum=numb1+numb2...</p>'
+ '<p>!mean numb1 numb2 ... - write mean of this numbers</p>'
+ '</div>')
self.render_default("index.html", content=content)
def _on_auth(self, user):
"""
Callback for third party authentication (last step).
"""
if not user:
content = ('<div class="page-header"><h1>Login</h1></div>'
+ '<div class="alert alert-error">'
+ '<button class="close" data-dismiss="alert">×</button>'
+ '<h3>Authentication failed</h3>'
+ '<p>This might be due to a problem in Tornados GoogleMixin.</p>'
+ '</div>')
self.render_default("index.html", content=content)
return None
# @todo: Validate user data.
# Save user when authentication was successful.
def on_user_find(result, user=user):
#@todo: We should check if email is given even though we can assume.
if result == "null" or not result:
# If user does not exist, create a new entry.
self.application.client.set("user:" + user["email"], tornado.escape.json_encode(user))
else:
# Update existing user.
# @todo: Should use $set to update only needed attributes?
dbuser = tornado.escape.json_decode(result)
dbuser.update(user)
user = dbuser
self.application.client.set("user:" + user["email"], tornado.escape.json_encode(user))
# Save user id in cookie.
self.set_secure_cookie("user", user["email"])
self.application.usernames[user["email"]] = user.get("name") or user["email"]
# Closed client connection
if self.request.connection.stream.closed():
logging.warning("Waiter disappeared")
return
self.redirect("/")
dbuser = self.application.client.get("user:" + user["email"], on_user_find)
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie('user')
self.redirect("/")
| [
"nislagg@gmail.com"
] | nislagg@gmail.com |
61094d5d3babcb4ac784998ee52b573967471ac0 | 7fc22330d96b48a425894311441c4e83cb4d2447 | /code/snakeeyes/tests/__init__.py | e207e34b2b0db2f98b137a14327de8cf795330f9 | [] | no_license | tangentstorm/snakeeyes | 5c23791adfe4511a3a97a35d725d1b2769552000 | a036884e39fe7989e8101c7f96cae8d4f3c507ea | refs/heads/master | 2021-01-22T08:23:27.661057 | 2020-11-22T05:08:56 | 2020-11-22T05:08:56 | 10,516,815 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | """
Created on Aug 1, 2009
@author: michal
"""
import sys; sys.path.append("..") # for testloop.sh
import unittest
from snakeeyes.tests.img_test import *
from snakeeyes.tests.ocr_test import *
from snakeeyes.tests.scrape_test import *
if __name__ == '__main__':
unittest.main()
| [
"michal.wallace@gmail.com"
] | michal.wallace@gmail.com |
49256118e79555242d05bc0d7a022c34619aa4ae | c86cd75be4f5b4eef605fb0f40743406ae19685f | /core/ui_test.py | cd1ce62099cf077a55dbf0934f3f6763c20bac3b | [
"Apache-2.0"
] | permissive | jyn514/oil | 3de53092c81e7f9129c9d12d51a8dfdbcacd397b | 42adba6a1668ff30c6312a6ce3c3d1f1acd529ec | refs/heads/master | 2022-02-23T08:12:48.381272 | 2019-03-15T08:54:31 | 2019-03-15T08:54:31 | 176,316,917 | 0 | 0 | Apache-2.0 | 2019-03-18T15:36:14 | 2019-03-18T15:36:13 | null | UTF-8 | Python | false | false | 279 | py | #!/usr/bin/python -S
from __future__ import print_function
"""
ui_test.py: Tests for ui.py
"""
import unittest
from core import ui # module under test
class UiTest(unittest.TestCase):
def testFoo(self):
ui.usage('oops')
if __name__ == '__main__':
unittest.main()
| [
"andy@oilshell.org"
] | andy@oilshell.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.