blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4054a08fa2e0f33fc5e9e743b682974f22fac612 | bf09e7e806aec473bc6a383ef05b443ffa1e415a | /models/yolo_model.py | 572ded1357b2ccb533591a143bf955dee335016a | [] | no_license | reggiehsu111/ml_template | 50522a2528cdf50503da9515b2abbb62046595d9 | 624da8caa02fc693f117e07866db5438684247be | refs/heads/master | 2020-06-26T21:58:07.901631 | 2019-08-07T03:15:36 | 2019-08-07T03:15:36 | 199,768,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | import torch
from torch.utils.data import random_split
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from .yolo_networks import *
from util.obj_detection_utils import weights_init_normal
from .networks import *
class YOLOModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
parser.add_argument('--model_config', type=str, default='options/yolov3.cfg', help='configuration for yolov3 model')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
self.phase = opt.args.phase
# define losses to plot
self.loss_names = []
self.layer_losses = ['loss', 'x', 'y', 'w', 'h', 'conf', 'cls', 'cls_acc', 'recall50', 'recall75', 'precision', 'conf_obj', 'conf_noobj', 'grid_size']
for x in range(3):
for loss in self.layer_losses:
self.loss_names.append("layer" + str(x) + "_" + loss)
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_name = ['predictions']
self.model_names = ['Yolo']
self.netYolo = Darknet(opt.args.model_config).to(self.device)
self.netYolo.apply(weights_init_normal)
self.optimizer = torch.optim.Adam(self.netYolo.parameters(), lr=opt.args.lr, betas=(opt.args.beta1, 0.999))
self.optimizers.append(self.optimizer)
self.netYolo = init_net(self.netYolo, opt.args.init_type, opt.args.init_gain, opt.args.gpu_ids)
def set_input(self, input):
self.img_path = input[0]
self.img = input[1].to(self.device)
if input[2] is not None:
self.targets = input[2].to(self.device)
else:
self.targets = None
def forward(self):
if self.targets is not None:
self.loss, self.outputs, self.yolo_layer_metrics = self.netYolo(self.img, self.targets)
i = 0
for layer in self.yolo_layer_metrics:
for k in layer.keys():
loss_str = "loss_layer"+ str(i) + "_" + k
setattr(self, loss_str, layer[k])
i += 1
else:
self.outputs = self.netYolo(self.img)
return self.outputs
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
return self.outputs
def load_darknet_weights(self,weights_path):
print("Load weights from: ", weights_path)
self.netYolo.load_darknet_weights(weights_path)
def optimize_parameters(self):
outputs = self.forward()
self.loss.backward()
| [
"reggiehsu111@gmail.com"
] | reggiehsu111@gmail.com |
c151f1cb971c5514c93deb2d3355846a22aa6971 | 6f21068b31084e81f38db304a51a2609d8af37cd | /2_Scientific_Libraries/plotsine.py | 13f08b42e470e8a434e801048a9ba254ea8288aa | [] | no_license | vickyf/eurocontrol_datascience | 374b889cac7b8d377caa78079fb57098e73bba0a | 0a7c09002e3b5f22ad563b05a6b4afe4cb6791d7 | refs/heads/master | 2020-03-19T06:03:14.864839 | 2018-06-04T07:24:25 | 2018-06-04T07:24:25 | 135,986,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,2*np.pi, 100)
y = np.sin(x)
plt.plot(x,y)
plt.show() | [
"vicky.froyen@infofarm.be"
] | vicky.froyen@infofarm.be |
be50f5509dae69ae85eaa02caaa1e7f12f6f3064 | c8bfbaf6ee061ea28696ba3d6e557bf684943f13 | /gameplay/models.py | 748bfe44d8fdbec4163b7611753d34723aaa6f0a | [
"MIT"
] | permissive | PyColors/django_game | b5acf83ea98f396e22899919bdea434f4a068f10 | cc7361980fadcd5e6541006cbd22e786e60c1368 | refs/heads/master | 2021-07-09T18:00:45.281095 | 2020-08-20T21:26:40 | 2020-08-20T21:26:40 | 189,754,241 | 0 | 0 | null | 2020-08-20T21:26:41 | 2019-06-01T16:24:25 | Python | UTF-8 | Python | false | false | 3,389 | py | from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.urls import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
# Set all possibilities value for the status of the game
# with a description string for each value (drop-down)
GAME_STATUS_CHOICES = (
('F', 'First Player To move'),
('S', 'Second Player To move'),
('W', 'First Player Wins'),
('L', 'Second Player Wins'),
('D', 'Draw'),
)
#
# QuerySet: collection from the database and use think like:
# filter and exclude
class GameQuerySet(models.QuerySet):
# Method to call all game of user that only return game for specific user
def game_for_user(self, user):
return self.filter(
# `Q` is a function can use to construct queries with a logical OR in them.
Q(first_player=user) | Q(second_player=user)
)
# Selecting game depending on the status
def active(self):
return self.filter(
Q(status='F') | Q(status='S')
)
class Game(models.Model):
first_player = models.ForeignKey(User,
related_name="games_first_player", on_delete=models.CASCADE)
second_player = models.ForeignKey(User,
related_name="games_second_player", on_delete=models.CASCADE)
start_time = models.DateTimeField(auto_now_add=True)
last_active = models.DateTimeField(auto_now=True)
# default value is required here: by 'F'
# GAME_STATUS_CHOICES is an option for the field
status = models.CharField(max_length=1, default='F',
choices=GAME_STATUS_CHOICES)
objects = GameQuerySet.as_manager()
def board(self):
# Return two dimensional list of Move Object
board = [[None for x in range(BOARD_SIZE)] for y in range(BOARD_SIZE)]
for move in self.move_set.all():
board[move.y][move.x] = move
return board
# Return true if that player's move in the game
def is_user_move(self, user):
return (user == self.first_player and self.status == 'F') or\
(user == self.second_player and self.status == 'S')
# Return new player move object
def new_move(self):
if self.status not in 'FS':
raise ValueError("Cannot make move on finished game")
return Move(
game=self,
by_first_player=self.status == 'F'
)
# What is the canonical URL for a model instance
def get_absolute_url(self):
return reverse('gameplay_detail', args=[self.id])
# Display the object to a user-friendly way with `format` rather than `Game object (3)` in the Admin
def __str__(self):
return "{0} vs {1}".format(
self.first_player, self.second_player
)
# Model will represent a table in the database
class Move(models.Model):
x = models.IntegerField(
validators=[MinValueValidator(0),
MaxValueValidator(BOARD_SIZE-1)]
)
y = models.IntegerField(
validators=[MinValueValidator(0),
MaxValueValidator(BOARD_SIZE - 1)]
)
comment = models.CharField(max_length=300, blank=True)
by_first_player = models.BooleanField(editable=False)
game = models.ForeignKey(Game, on_delete=models.CASCADE)
| [
"patweb44@gmail.com"
] | patweb44@gmail.com |
de790c45b93d28e725e2777bc3ed60430e397d06 | f6afea26e311644dbc83091c2eafe9724d08fdb7 | /pipeline.py | de279d5876f73a6697701b4574fd6cc76ee12c14 | [] | no_license | MechRams/UltimateGoalCV | 38c09b6dc0e12af3aa90816ef5d5e82feb59ff8a | 33c7e166c39b592e3ac62c5bac5e3dabcc2dae18 | refs/heads/master | 2022-12-17T21:02:55.634971 | 2020-09-20T06:07:01 | 2020-09-20T06:07:01 | 296,227,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,366 | py | import cv2
import numpy
import math
from enum import Enum
class GripPipeline:
"""
An OpenCV pipeline generated by GRIP.
"""
def __init__(self):
"""initializes all values to presets or None if need to be set
"""
self.__resize_image_width = 640
self.__resize_image_height = 480
self.__resize_image_interpolation = cv2.INTER_LINEAR
self.resize_image_output = None
self.__blur_input = self.resize_image_output
self.__blur_type = BlurType.Box_Blur
self.__blur_radius = 4.716981132075474
self.blur_output = None
self.__hsv_threshold_0_input = self.blur_output
self.__hsv_threshold_0_hue = [8.446981015790435, 43.741098662849254]
self.__hsv_threshold_0_saturation = [181.00395338142792, 253.16016738674898]
self.__hsv_threshold_0_value = [115.04061714101688, 247.45084150389948]
self.hsv_threshold_0_output = None
self.__hsv_threshold_1_input = self.resize_image_output
self.__hsv_threshold_1_hue = [8.474576271186441, 43.63636363636364]
self.__hsv_threshold_1_saturation = [182.48587570621467, 243.63636363636363]
self.__hsv_threshold_1_value = [114.70652908569495, 247.37730463782646]
self.hsv_threshold_1_output = None
self.__cv_erode_src = self.hsv_threshold_0_output
self.__cv_erode_kernel = None
self.__cv_erode_anchor = (-1, -1)
self.__cv_erode_iterations = 2.0
self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
self.__cv_erode_bordervalue = (-1)
self.cv_erode_output = None
self.__cv_dilate_src = self.cv_erode_output
self.__cv_dilate_kernel = None
self.__cv_dilate_anchor = (-1, -1)
self.__cv_dilate_iterations = 20.0
self.__cv_dilate_bordertype = cv2.BORDER_CONSTANT
self.__cv_dilate_bordervalue = (-1)
self.cv_dilate_output = None
self.__mask_input = self.resize_image_output
self.__mask_mask = self.cv_dilate_output
self.mask_output = None
self.__find_blobs_input = self.mask_output
self.__find_blobs_min_area = 800.0
self.__find_blobs_circularity = [0.5084745762711864, 1.0]
self.__find_blobs_dark_blobs = False
self.find_blobs_output = None
self.__find_contours_input = self.hsv_threshold_1_output
self.__find_contours_external_only = False
self.find_contours_output = None
def process(self, source0):
"""
Runs the pipeline and sets all outputs to new values.
"""
# Step Resize_Image0:
self.__resize_image_input = source0
(self.resize_image_output) = self.__resize_image(self.__resize_image_input, self.__resize_image_width, self.__resize_image_height, self.__resize_image_interpolation)
# Step Blur0:
self.__blur_input = self.resize_image_output
(self.blur_output) = self.__blur(self.__blur_input, self.__blur_type, self.__blur_radius)
# Step HSV_Threshold0:
self.__hsv_threshold_0_input = self.blur_output
(self.hsv_threshold_0_output) = self.__hsv_threshold(self.__hsv_threshold_0_input, self.__hsv_threshold_0_hue, self.__hsv_threshold_0_saturation, self.__hsv_threshold_0_value)
# Step HSV_Threshold1:
self.__hsv_threshold_1_input = self.resize_image_output
(self.hsv_threshold_1_output) = self.__hsv_threshold(self.__hsv_threshold_1_input, self.__hsv_threshold_1_hue, self.__hsv_threshold_1_saturation, self.__hsv_threshold_1_value)
# Step CV_erode0:
self.__cv_erode_src = self.hsv_threshold_0_output
(self.cv_erode_output) = self.__cv_erode(self.__cv_erode_src, self.__cv_erode_kernel, self.__cv_erode_anchor, self.__cv_erode_iterations, self.__cv_erode_bordertype, self.__cv_erode_bordervalue)
# Step CV_dilate0:
self.__cv_dilate_src = self.cv_erode_output
(self.cv_dilate_output) = self.__cv_dilate(self.__cv_dilate_src, self.__cv_dilate_kernel, self.__cv_dilate_anchor, self.__cv_dilate_iterations, self.__cv_dilate_bordertype, self.__cv_dilate_bordervalue)
# Step Mask0:
self.__mask_input = self.resize_image_output
self.__mask_mask = self.cv_dilate_output
(self.mask_output) = self.__mask(self.__mask_input, self.__mask_mask)
# Step Find_Blobs0:
self.__find_blobs_input = self.mask_output
(self.find_blobs_output) = self.__find_blobs(self.__find_blobs_input, self.__find_blobs_min_area, self.__find_blobs_circularity, self.__find_blobs_dark_blobs)
# Step Find_Contours0:
self.__find_contours_input = self.hsv_threshold_1_output
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
@staticmethod
def __resize_image(input, width, height, interpolation):
"""Scales and image to an exact size.
Args:
input: A numpy.ndarray.
Width: The desired width in pixels.
Height: The desired height in pixels.
interpolation: Opencv enum for the type fo interpolation.
Returns:
A numpy.ndarray of the new size.
"""
return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)
@staticmethod
def __blur(src, type, radius):
"""Softens an image using one of several filters.
Args:
src: The source mat (numpy.ndarray).
type: The blurType to perform represented as an int.
radius: The radius for the blur as a float.
Returns:
A numpy.ndarray that has been blurred.
"""
if(type is BlurType.Box_Blur):
ksize = int(2 * round(radius) + 1)
return cv2.blur(src, (ksize, ksize))
elif(type is BlurType.Gaussian_Blur):
ksize = int(6 * round(radius) + 1)
return cv2.GaussianBlur(src, (ksize, ksize), round(radius))
elif(type is BlurType.Median_Filter):
ksize = int(2 * round(radius) + 1)
return cv2.medianBlur(src, ksize)
else:
return cv2.bilateralFilter(src, -1, round(radius), round(radius))
@staticmethod
def __hsv_threshold(input, hue, sat, val):
"""Segment an image based on hue, saturation, and value ranges.
Args:
input: A BGR numpy.ndarray.
hue: A list of two numbers the are the min and max hue.
sat: A list of two numbers the are the min and max saturation.
lum: A list of two numbers the are the min and max value.
Returns:
A black and white numpy.ndarray.
"""
out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)
return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))
@staticmethod
def __cv_erode(src, kernel, anchor, iterations, border_type, border_value):
"""Expands area of lower value in an image.
Args:
src: A numpy.ndarray.
kernel: The kernel for erosion. A numpy.ndarray.
iterations: the number of times to erode.
border_type: Opencv enum that represents a border type.
border_value: value to be used for a constant border.
Returns:
A numpy.ndarray after erosion.
"""
return cv2.erode(src, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
@staticmethod
def __cv_dilate(src, kernel, anchor, iterations, border_type, border_value):
"""Expands area of higher value in an image.
Args:
src: A numpy.ndarray.
kernel: The kernel for dilation. A numpy.ndarray.
iterations: the number of times to dilate.
border_type: Opencv enum that represents a border type.
border_value: value to be used for a constant border.
Returns:
A numpy.ndarray after dilation.
"""
return cv2.dilate(src, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
@staticmethod
def __mask(input, mask):
"""Filter out an area of an image using a binary mask.
Args:
input: A three channel numpy.ndarray.
mask: A black and white numpy.ndarray.
Returns:
A three channel numpy.ndarray.
"""
return cv2.bitwise_and(input, input, mask=mask)
@staticmethod
def __find_blobs(input, min_area, circularity, dark_blobs):
"""Detects groups of pixels in an image.
Args:
input: A numpy.ndarray.
min_area: The minimum blob size to be found.
circularity: The min and max circularity as a list of two numbers.
dark_blobs: A boolean. If true looks for black. Otherwise it looks for white.
Returns:
A list of KeyPoint.
"""
params = cv2.SimpleBlobDetector_Params()
params.filterByColor = 1
params.blobColor = (0 if dark_blobs else 255)
params.minThreshold = 10
params.maxThreshold = 220
params.filterByArea = True
params.minArea = min_area
params.filterByCircularity = True
params.minCircularity = circularity[0]
params.maxCircularity = circularity[1]
params.filterByConvexity = False
params.filterByInertia = False
detector = cv2.SimpleBlobDetector_create(params)
return detector.detect(input)
@staticmethod
def __find_contours(input, external_only):
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
Args:
input: A numpy.ndarray.
external_only: A boolean. If true only external contours are found.
Return:
A list of numpy.ndarray where each one represents a contour.
"""
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
return contours
BlurType = Enum('BlurType', 'Box_Blur Gaussian_Blur Median_Filter Bilateral_Filter')
| [
"serivesmejia@gmail.com"
] | serivesmejia@gmail.com |
b45fc6b72d0b4ee114c9e2a39ad6ad02c04c851e | 8a0a1035f80debf66fb982931d5d397d68022a66 | /class2action.py | 0b0b985d7132c2ead2f31006b6319001bb4187b5 | [] | no_license | harry1080/scripts-2 | 14e078fd42f79d2e27540e75c590d70f0ced31a5 | 63ef7f6024d4e4b0a3699b6b0dbc9933d771c064 | refs/heads/master | 2020-05-09T23:34:32.113454 | 2019-04-08T09:49:55 | 2019-04-08T09:49:55 | 181,506,139 | 0 | 1 | null | 2019-04-15T14:39:36 | 2019-04-15T14:39:36 | null | UTF-8 | Python | false | false | 447 | py | def user(c):
def F(s,f):
return c(s,f)
return F
class Api(object):
action = {}
def __init__(self,token=None):
self.token = token
@classmethod
def auth(self,k):
if k==2:
return 1
return
def do(self,act,kv):
return getattr(self,act)(kv)
@user
def action(self,kv):
print kv
return 'sdfffsgf'+self.token
print Api('123234324').do('action',2)
| [
"46884495+YDHCUI@users.noreply.github.com"
] | 46884495+YDHCUI@users.noreply.github.com |
d77aed3db535b4860c3a22f7c4fe9ef7e3e4f897 | 475d05030e7bb3447fd2022c97cc471a9f8623cd | /create_article_dataset.py | 79c6b54ffd865b00deb410083cd0dee76b4d047f | [] | no_license | sambeettiady/venture | 2e33cbeee02c65c80a2f08ad0e56a907d0041989 | 24e3253730632a7f7fe493bddc6369c5f37e5302 | refs/heads/master | 2020-03-30T03:54:08.581212 | 2018-09-29T07:20:22 | 2018-09-29T07:20:22 | 150,713,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | import pandas as pd
import numpy as np
import os
os.chdir('/Users/sambeet/Desktop/venture/')
companies = os.listdir('articles/')
df = pd.DataFrame(columns=['company','article','text'])
for company in companies:
if company != '.DS_Store':
articles = os.listdir('articles/' + company + '/')
text = []
for article in articles:
with open('articles/' + company + '/' + article,'r') as file:
y = ''
for x in file.readlines():
y = y + ' ' + x.replace('\\t','').replace('\\n','').strip()
text.append(y)
temp = pd.DataFrame({'company':[company]*len(articles),'article':articles,'text':text})
df = pd.concat([df,temp],axis=0)
df.to_csv('articles.csv',index=False)
| [
"noreply@github.com"
] | sambeettiady.noreply@github.com |
561acaa13aab2c2f896707be1a586e74762b6442 | 72c716056fde63f0499bde1fc35c9f3eccb460d3 | /jayprakash/assignment2/Q10. program to create the multiplication table (from 1 to 10) of a number.py | a80fd2e0210fb3ffe7ff80647268bd05c1aa7b20 | [] | no_license | rapidcode-technologies-private-limited/Python | c8a5648185a760d4c4d6a0c3c43033405265f87b | 3e938cc40cde94dab4562794058e44d52fa89d38 | refs/heads/master | 2020-07-02T02:49:38.229488 | 2020-02-12T17:25:08 | 2020-02-12T17:25:08 | 201,391,965 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | while True:
a=int(input('Enter any no. of a='))
if a>0:
print(' Zero Is not multiplicatio')
for i in range(1,11):
print(a,'*',i,'='+str(a*i))
| [
"jayprakash1172000@gmail.com"
] | jayprakash1172000@gmail.com |
07bac4b0659c7151d22ec455cb5bbb340db2a1c5 | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /common/util_vietnamese_test.py | 3fa3dfa1e91ecf9cc5553850f8be6ef7c293dfd5 | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | #!/usr/bin/env python
# encoding: utf-8
import unittest
from common import util_vietnamese as uv
class TestUtilVietnamese(unittest.TestCase):
def testConvert2Unsign(self):
self.assertEquals(uv.convert2Unsign(u'Dĩ độc trị độc'), u'Di doc tri doc')
self.assertEquals(uv.convert2Unsign(u'Ông ăn ổi Ạ'), u'Ong an oi A')
self.assertEquals(uv.convert2Unsign(u'Giầy thể thao nữ'), u'Giay the thao nu')
self.assertEquals(uv.convert2Unsign(u'Thử xem ổn không nhé: Lưu Vĩnh Toàn, Phạm Kim Cương'), u'Thu xem on khong nhe: Luu Vinh Toan, Pham Kim Cuong')
def testTokenized(self):
s = u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong A-B. A_B'
expect = [u'Lưu', u'Vĩnh', u'Toàn', u'Pham', u'Kim', u'Cuong', u'A', u'B', 'A_B']
self.assertEquals(uv.tokenized(s), expect)
def testMakePhraseToken(self):
self.assertEquals(uv.makePhraseToken(u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong'), u'_lưu_vĩnh_toàn_pham_kim_cuong')
self.assertEquals(uv.makePhraseToken(u'Toàn'), u'_toàn')
self.assertEquals(uv.makePhraseToken(u';'), u'__')
self.assertEquals(uv.makePhraseToken(u''), u'_')
def testMakeSuffixNGramToken(self):
expect = set()
expect.add(u'_lưu_vĩnh_toàn_pham_kim_cuong')
expect.add(u'_luu_vinh_toan_pham_kim_cuong')
expect.add(u'_vĩnh_toàn_pham_kim_cuong')
expect.add(u'_vinh_toan_pham_kim_cuong')
expect.add(u'_toàn_pham_kim_cuong')
expect.add(u'_toan_pham_kim_cuong')
expect.add(u'_pham_kim_cuong')
expect.add(u'_kim_cuong')
expect.add(u'_cuong')
self.assertEquals(uv.makeSuffixNGramToken(u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong'), expect)
def testMakeNGramToken(self):
expect = set()
expect.add(u'_lưu_vĩnh_toàn_pham')
expect.add(u'_vĩnh_toàn_pham_kim')
expect.add(u'_toàn_pham_kim_cuong')
expect.add(u'_lưu_vĩnh_toàn')
expect.add(u'_vĩnh_toàn_pham')
expect.add(u'_toàn_pham_kim')
expect.add(u'_pham_kim_cuong')
expect.add(u'_lưu_vĩnh')
expect.add(u'_vĩnh_toàn')
expect.add(u'_toàn_pham')
expect.add(u'_pham_kim')
expect.add(u'_kim_cuong')
expect.add(u'_lưu')
expect.add(u'_vĩnh')
expect.add(u'_toàn')
expect.add(u'_pham')
expect.add(u'_kim')
expect.add(u'_cuong')
self.assertEquals(uv.makeNGramToken(u'Lưu Vĩnh+Toàn, Pham; Kim.Cuong'), expect)
def testSimpleTokenized(self):
self.assertEquals(uv.simpleTokenized(u'hello \tw'), ['hello', 'w'])
self.assertEquals(uv.simpleTokenized(u't-mobile'), ['t','mobile'])
self.assertEquals(uv.simpleTokenized(u'o to, xe may'), ['o', 'to','xe', 'may'])
if __name__ == '__main__':
unittest.main()
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
ecee26c6f963737ad632fe2f9b7806fb872c741f | 1cbfe38b16da00f90b275bd3b0886fe832aa7bc3 | /movie.py | 4e06608756343a2974fe6a317b59d559b606b9f6 | [] | no_license | CrazyLikeABEE/Movie-Trailer-Site | eb445bd3843011a09a5ebedca46d55b46f0f3696 | 4de68339565ebdeede216f7e0d4aee9f6f528939 | refs/heads/master | 2021-04-12T11:51:08.857675 | 2018-04-09T17:01:26 | 2018-04-09T17:01:26 | 126,533,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import webbrowser
class Movie():
def __init__(self, title, storyline, poster, trailer):
self.title = title
self.storyline = storyline
self.poster_image_url = poster
self.trailer_youtube_url = trailer
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url) | [
"noreply@github.com"
] | CrazyLikeABEE.noreply@github.com |
97b3bf87a736a6ba81767dac4224f7b3907fd82e | f4020b05167f88a18ca0ddf0e9eb98dae0f0d261 | /wedit/apps/orders/schema/order_mutation.py | 779c0adf38a2409494b653a95d8c9febe3645ae9 | [] | no_license | mwibutsa/wedit | 15979c748d92f53feecc5d32c651504fc31aece3 | c2ba07dca32e6bb915b8928e15176233f2a69984 | refs/heads/develop | 2022-12-10T12:11:23.088979 | 2019-10-21T19:24:21 | 2019-10-21T19:24:21 | 206,139,220 | 1 | 0 | null | 2022-12-08T06:36:36 | 2019-09-03T17:54:22 | Python | UTF-8 | Python | false | false | 1,256 | py | import graphene
from wedit.apps.orders.models import Order
from graphql_jwt.decorators import login_required
class CreateOrder(graphene.Mutation):
""" Mutation to create orders. """
owner = graphene.String()
order_title = graphene.String()
order_summary = graphene.String()
order_file = graphene.String()
description_file = graphene.String()
class Arguments:
order_title = graphene.String()
order_summary = graphene.String()
order_file = graphene.String()
description_file = graphene.String()
@login_required
def mutate(self, info, order_title, order_summary, order_file, description_file):
order = Order(
order_title=order_file,
order_summary=order_summary,
order_file=order_file,
description_file=description_file
)
order.owner = info.context.user
order.save()
return CreateOrder(
owner=order.owner.username,
order_title=order.order_title,
order_summary=order.order_summary,
order_file=order.order_file,
description_file=order.description_file
)
class Mutation(graphene.ObjectType):
create_order = CreateOrder.Field()
| [
"mflohost@gmail.com"
] | mflohost@gmail.com |
c1b7341897c25e9f6a21ed58039268685b18bd99 | 2815666161ac8593a1282fb22f74964cdf8a573e | /VAN_API/model.py | 56f17ea79d090f0c6b73f89a968e747186882d71 | [] | no_license | vanshikagupta07/Predicting-High-Risk-Countries-for-Political-Instability-and-Conflict | 02d0c80d42cf1bf0ed684155a7011eb0a8db43b5 | 33c4d02420c7ed79edda807f42974808ceecaef8 | refs/heads/master | 2022-10-01T08:35:03.680344 | 2020-06-07T10:16:57 | 2020-06-07T10:16:57 | 270,264,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
df=pd.read_csv('Master_refined.csv')
#Replacing 'Czechia' with 'Czech Republic' as both are same countries
df['Country'] = df['Country'].replace('Czechia', 'Czech Republic')
#Replacing 'Eswatini' with 'Swaziland' as both are same countries
df['Country'] = df['Country'].replace('Eswatini', 'Swaziland')
#Since Rank is a categorical variable so we need to extract the numerical part
df['Rank_numerical'] = df.Rank.str.extract('(\d+)') # captures numerical part
df['Rank_categorical'] = df['Rank'].str[-2:] # captures the first letter
#Dropping 'Rank' and 'Rank_categorical' as we extracted the numerical part of 'Rank' is in 'Rank_numerical'
df=df.drop(columns=['Rank','Rank_categorical'])
#Changing the dtype of 'Rank_numerical'
df['Rank_numerical']=df['Rank_numerical'].astype(str).astype(int)
#encoding 'Country' as it is a categorical variable
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df['Country']= label_encoder.fit_transform(df['Country'])
X = df.iloc[:, :15]
y = df.iloc[:, -1]
#Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
dregressor = DecisionTreeRegressor()
dregressor.fit(X.drop(['C1: Security Apparatus',
'C2: Factionalized Elites',
'C3: Group Grievance',
'E1: Economy',
'E2: Economic Inequality',
'E3: Human Flight and Brain Drain',
'P1: State Legitimacy',
'P2: Public Services',
'P3: Human Rights',
'S1: Demographic Pressures',
'S2: Refugees and IDPs',
'X1: External Intervention'],axis=1), y)
import pickle
# Saving model to disk
pickle.dump(dregressor, open('model.pkl','wb'))
# Loading model to compare the results
model = pickle.load(open('model.pkl','rb'))
| [
"vanshikaseby99@gmail.com"
] | vanshikaseby99@gmail.com |
d1076378ce392fc368cb0cfd65323b2257865e3a | b2c6ae2746db8b1d6e8705d441ddcf3b3b9db298 | /Demo/PieChart.py | 930433d35ced2e0dbee9a4cfedf005d2d8aff8ab | [] | no_license | karthigahariharan/The-QA | 4faf05a22ae11e78864da4feaaa89c8f48346f68 | 1ee9cf3f421a67b877abd55456cb7714f8866839 | refs/heads/master | 2021-06-14T21:30:42.891949 | 2019-08-24T19:55:48 | 2019-08-24T19:55:48 | 204,205,689 | 0 | 0 | null | 2021-06-05T09:40:16 | 2019-08-24T19:54:24 | HTML | UTF-8 | Python | false | false | 7,919 | py |
# coding: utf-8
# In[5]:
import plotly
plotly.__version__
plotly.tools.set_credentials_file(username='kyle777', api_key='aDZyXDOPFiN1j5S6btB2')
import json
import plotly.plotly as py
import plotly.graph_objs as go
from pprint import pprint
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode()
import itertools
from random import shuffle
import random
from collections import OrderedDict
with open('comparative.json') as f:
data = json.load(f)
f.close()
print(len(data))
from random import randint
# Global list of values. Each parameter will be set to one random value from each list
# CSS Colors recognized by plotly
# https://community.plot.ly/t/plotly-colours-list/11730/2
colors = ["aliceblue", "antiquewhite", "aqua", "aquamarine", "azure", "beige", "bisque", "black", "blanchedalmond", "blue", "blueviolet", "brown", "burlywood", "cadetblue", "chartreuse", "chocolate", "coral", "cornflowerblue", "cornsilk", "crimson", "cyan", "darkblue", "darkcyan", "darkgoldenrod", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta", "darkolivegreen", "darkorange", "darkorchid", "darkred", "darksalmon", "darkseagreen", "darkslateblue", "darkslategray", "darkslategrey", "darkturquoise", "darkviolet", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "goldenrod", "gray", "grey", "green", "greenyellow", "honeydew", "hotpink", "indianred", "indigo", "ivory", "khaki", "lavender", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcoral", "lightcyan", "lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightpink", "lightsalmon", "lightseagreen", "lightskyblue", "lightslategray", "lightslategrey", "lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "magenta", "maroon", "mediumaquamarine", "mediumblue", "mediumorchid", "mediumpurple", "mediumseagreen", "mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin", "navajowhite", "navy", "oldlace", "olive", "olivedrab", "orange", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru", "pink", "plum", "powderblue", "purple", "red", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver", "skyblue", "slateblue", "slategray", "slategrey", "snow", "springgreen", "steelblue", "tan", "teal", "thistle", "tomato", "turquoise", "violet", "wheat", "white", "whitesmoke", "yellow", "yellowgreen"]
markerlinecolor = colors[randint(0, len(colors) - 1)]
# Font colors
fontcolors = ["white", "black"]
# Font sizes
# https://plot.ly/python/reference/#bar-textfont-size
fontsizes = [16, 17, 18, 19, 20, 21, 22, 23, 24]
# Font families
# https://plot.ly/python/reference/#bar-textfont-family
fontfamilies = ["Arial", "Balto", "Courier New", "Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman"]
fontfamily = fontfamilies[randint(0, len(fontfamilies) - 1)]
fontcolor = fontcolors[randint(0, len(fontcolors) - 1)]
fontsize = fontsizes[randint(0, len(fontsizes) - 1)]
# TextInfo
# https://plot.ly/python/reference/#pie-textinfo
textinfos = ["label+value", "label+percent", "label+value+percent"]
# Direction
# https://plot.ly/python/reference/#pie-direction
directions = ["clockwise", "counterclockwise"]
# Pull
# https://plot.ly/python/reference/#pie-pull
pulls = [0, 0.02, 0, 0.04, 0, 0.06, 0, 0.08, 0, 0.1]
# Marker width
# https://plot.ly/python/reference/#pie-marker-line-width
markerWidths = [0, 1, 2, 3, 4]
# Values specific to data
# filename = 'filename_goes_here'
# title = "title_goes_here"
# xaxis_title = "xaxis_title_goes_here"
# yaxis_title = "yaxis_title_goes_here"
# labels = ['a','b','c','d','e','f']
# values = [randint(4, 16), randint(4, 16), randint(4, 16), randint(4, 16), randint(4, 16), randint(4, 16)]
for i in range(len(data)):
filename=""
values_dict={}
labels=[]
values1=[]
values2=[]
#print(data[i])
for key in data[i]:
filename=key
title=key
values_dict=data[i][key]
b=list(values_dict.items())
shuffle(b)
values_dict=OrderedDict(b)
value_dict={k: v for k, v in values_dict.items() if v}
values_dict={}
for key in value_dict:
y= value_dict[key].split()
if(y[0].endswith("%")):
y[0]=y[0].replace("%","")
if(float(y[0].replace(",",""))!=0):
values_dict.update({key:value_dict[key]})
values_dict=dict(itertools.islice(values_dict.items(), random.randint(5,12)))
print(values_dict)
labels=[]
values=[]
#values2=[]
multipliers = {'thousand':1000, 'million':1000000, 'billion':1000000000, 'trillion':1000000000000}
for key in values_dict:
print(key)
labels.append(key)
values2.append(key)
i=values_dict[key]
if i is None:
continue
if(type(i)==int):
value=i
else:
label=""
y=i.split()
if(y[0].endswith("%")):
value=i[:i.index("%")]
label=i[i.index("%"):]
label=label.replace('%','Percentage')
#print("value="+str(value)+", label="+label)
else:
value=y[0].replace(',','')
if len(y) > 1:
if y[1] in multipliers:
mult = multipliers[y[1]]
value=float(value)*int(mult)
if len(y) >2:
label=i[i.index(y[2]):]
else:
label=i[i.index(y[1]):]
#print("value="+str(value)+", label="+label)
values.append(value)
yaxis_title = label
# for key in values_dict:
# labels.append(key)
# print("key"+key)
# values2.append(key)
# print(values_dict[key])
# values1.append(values_dict[key])
# # Values specific to data
# filename = key
# title = key
xaxis_title = "countries"
trace = go.Pie(
name = "name1_goes_here",
labels = labels,
values = values,
textinfo = textinfos[randint(0, len(textinfos) - 1)],
textposition = "outside",
textfont = dict(
size = fontsizes[randint(0, len(fontsizes) - 1)],
color = colors[randint(0, len(colors) - 1)]
),
sort = True,
direction = directions[randint(0, len(directions) - 1)],
rotation = randint(-360, 360),
pull = pulls[randint(0, len(pulls) - 1)],
hole = 0.5 if randint(0, 1) == 0 else 0,
marker = dict(
colors = [colors[randint(0, len(colors) - 1)] for x in labels],
line = dict(
color = colors[randint(0, len(colors) - 1)],
width = markerWidths[randint(0, len(markerWidths) - 1)]
)
)
)
data_val = [trace]
layout = go.Layout(
autosize=False,
title=title,
titlefont=dict(
color="black",
family=fontfamily,
size=fontsizes[randint(0, len(fontsizes) - 1)]
),
margin=dict(
r=80,
t=80,
b=80,
l=80,
pad=2
),
plot_bgcolor=colors[randint(0, len(colors) - 1)],
showlegend=True,
height=800,
width=800,
)
fig = go.Figure(data=data_val, layout=layout)
filename=filename.replace("/","-")
print(filename)
plotly.offline.plot(fig, filename=filename, image = 'png', image_filename=filename,
output_type='file', image_width=800, image_height=600)
| [
"karthigahariharan94@gmail.com"
] | karthigahariharan94@gmail.com |
739d271656785b46f69581779f1386627452831c | 4bb399152226477a84781892d50af45abd40c5dc | /clicli.py | c2bb3c48c8f63801eec216ca4d81230390269465 | [] | no_license | senyorjou/clicli | 167b853b3c1f426e3d84fe1dacc2f1c65137f874 | 87fa32635530a8bd260add06956321d88dca9dc3 | refs/heads/main | 2023-04-25T16:33:01.353395 | 2021-05-20T05:49:59 | 2021-05-20T05:49:59 | 369,094,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import click
from dotenv import load_dotenv
from commands.ns import cmd_ns
@click.group()
def cli():
pass
cli.add_command(cmd_ns)
if __name__ == "__main__":
load_dotenv()
cli()
| [
"smyslov@gmail.com"
] | smyslov@gmail.com |
2a1962952945bb19fc590008b2c02d3ce88c2b5c | 2aefbdde1481a893153e61385d6a224a1c806731 | /launches/admin.py | 434fd0686cc21ef04d02a8649dcffae53403cc81 | [] | no_license | Felix-FE/P4-BE | f0607958e4c1345f5fe84aedb6a930e3b93e1657 | 8e8b1f5923a6900b0592cb919d8ce4b10d24414c | refs/heads/main | 2023-08-24T15:57:04.890343 | 2021-11-09T14:22:03 | 2021-11-09T14:22:03 | 404,301,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from django.contrib import admin
from .models import Launch, Update
admin.site.register(Launch)
admin.site.register(Update)
| [
"felixfennevans@gmail.com"
] | felixfennevans@gmail.com |
949a33c041675b7ddf0cd974daa9ab5cd08582b1 | f5b8d0a7719355d27f17841f71d236017e6c8dcd | /jumpy_tut12.py | 87e4b5e42459c17210f0c48b1f144832c103449a | [
"MIT"
] | permissive | ShinyShingi/Jumpy | 18216a184a1c7181fec4b7a74634a7f3c970e47f | 93687bc18cd37a553fb0a1a533afceb7d0c23a46 | refs/heads/main | 2023-08-04T22:32:30.137354 | 2021-09-14T19:42:45 | 2021-09-14T19:42:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,333 | py | #import libraries
import pygame
import random
import os
from spritesheet import SpriteSheet
from enemy import Enemy
#initialise pygame
pygame.init()
#game window dimensions
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 600
#create game window
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('Jumpy')
#set frame rate
clock = pygame.time.Clock()
FPS = 60
#game variables
SCROLL_THRESH = 200
GRAVITY = 1
MAX_PLATFORMS = 10
scroll = 0
bg_scroll = 0
game_over = False
score = 0
fade_counter = 0
if os.path.exists('score.txt'):
with open('score.txt', 'r') as file:
high_score = int(file.read())
else:
high_score = 0
#define colours
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PANEL = (153, 217, 234)
#define font
font_small = pygame.font.SysFont('Lucida Sans', 20)
font_big = pygame.font.SysFont('Lucida Sans', 24)
#load images
jumpy_image = pygame.image.load('assets/jump.png').convert_alpha()
bg_image = pygame.image.load('assets/bg.png').convert_alpha()
platform_image = pygame.image.load('assets/wood.png').convert_alpha()
#bird spritesheet
bird_sheet_img = pygame.image.load('assets/bird.png').convert_alpha()
bird_sheet = SpriteSheet(bird_sheet_img)
#function for outputting text onto the screen
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
#function for drawing info panel
def draw_panel():
pygame.draw.rect(screen, PANEL, (0, 0, SCREEN_WIDTH, 30))
pygame.draw.line(screen, WHITE, (0, 30), (SCREEN_WIDTH, 30), 2)
draw_text('SCORE: ' + str(score), font_small, WHITE, 0, 0)
#function for drawing the background
def draw_bg(bg_scroll):
screen.blit(bg_image, (0, 0 + bg_scroll))
screen.blit(bg_image, (0, -600 + bg_scroll))
#player class
class Player():
def __init__(self, x, y):
self.image = pygame.transform.scale(jumpy_image, (45, 45))
self.width = 25
self.height = 40
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = (x, y)
self.vel_y = 0
self.flip = False
def move(self):
#reset variables
scroll = 0
dx = 0
dy = 0
#process keypresses
key = pygame.key.get_pressed()
if key[pygame.K_a]:
dx = -10
self.flip = True
if key[pygame.K_d]:
dx = 10
self.flip = False
#gravity
self.vel_y += GRAVITY
dy += self.vel_y
#ensure player doesn't go off the edge of the screen
if self.rect.left + dx < 0:
dx = -self.rect.left
if self.rect.right + dx > SCREEN_WIDTH:
dx = SCREEN_WIDTH - self.rect.right
#check collision with platforms
for platform in platform_group:
#collision in the y direction
if platform.rect.colliderect(self.rect.x, self.rect.y + dy, self.width, self.height):
#check if above the platform
if self.rect.bottom < platform.rect.centery:
if self.vel_y > 0:
self.rect.bottom = platform.rect.top
dy = 0
self.vel_y = -20
#check if the player has bounced to the top of the screen
if self.rect.top <= SCROLL_THRESH:
#if player is jumping
if self.vel_y < 0:
scroll = -dy
#update rectangle position
self.rect.x += dx
self.rect.y += dy + scroll
return scroll
def draw(self):
screen.blit(pygame.transform.flip(self.image, self.flip, False), (self.rect.x - 12, self.rect.y - 5))
pygame.draw.rect(screen, WHITE, self.rect, 2)
#platform class
class Platform(pygame.sprite.Sprite):
def __init__(self, x, y, width, moving):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(platform_image, (width, 10))
self.moving = moving
self.move_counter = random.randint(0, 50)
self.direction = random.choice([-1, 1])
self.speed = random.randint(1, 2)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self, scroll):
#moving platform side to side if it is a moving platform
if self.moving == True:
self.move_counter += 1
self.rect.x += self.direction * self.speed
#change platform direction if it has moved fully or hit a wall
if self.move_counter >= 100 or self.rect.left < 0 or self.rect.right > SCREEN_WIDTH:
self.direction *= -1
self.move_counter = 0
#update platform's vertical position
self.rect.y += scroll
#check if platform has gone off the screen
if self.rect.top > SCREEN_HEIGHT:
self.kill()
#player instance
jumpy = Player(SCREEN_WIDTH // 2, SCREEN_HEIGHT - 150)
#create sprite groups
platform_group = pygame.sprite.Group()
enemy_group = pygame.sprite.Group()
#create starting platform
platform = Platform(SCREEN_WIDTH // 2 - 50, SCREEN_HEIGHT - 50, 100, False)
platform_group.add(platform)
#game loop
run = True
while run:
clock.tick(FPS)
if game_over == False:
scroll = jumpy.move()
#draw background
bg_scroll += scroll
if bg_scroll >= 600:
bg_scroll = 0
draw_bg(bg_scroll)
#generate platforms
if len(platform_group) < MAX_PLATFORMS:
p_w = random.randint(40, 60)
p_x = random.randint(0, SCREEN_WIDTH - p_w)
p_y = platform.rect.y - random.randint(80, 120)
p_type = random.randint(1, 2)
if p_type == 1 and score > 500:
p_moving = True
else:
p_moving = False
platform = Platform(p_x, p_y, p_w, p_moving)
platform_group.add(platform)
#update platforms
platform_group.update(scroll)
#generate enemies
if len(enemy_group) == 0 and score > 1500:
enemy = Enemy(SCREEN_WIDTH, 100, bird_sheet, 1.5)
enemy_group.add(enemy)
#update enemies
enemy_group.update(scroll, SCREEN_WIDTH)
#update score
if scroll > 0:
score += scroll
#draw line at previous high score
pygame.draw.line(screen, WHITE, (0, score - high_score + SCROLL_THRESH), (SCREEN_WIDTH, score - high_score + SCROLL_THRESH), 3)
draw_text('HIGH SCORE', font_small, WHITE, SCREEN_WIDTH - 130, score - high_score + SCROLL_THRESH)
#draw sprites
platform_group.draw(screen)
enemy_group.draw(screen)
jumpy.draw()
#draw panel
draw_panel()
#check game over
if jumpy.rect.top > SCREEN_HEIGHT:
game_over = True
else:
if fade_counter < SCREEN_WIDTH:
fade_counter += 5
for y in range(0, 6, 2):
pygame.draw.rect(screen, BLACK, (0, y * 100, fade_counter, 100))
pygame.draw.rect(screen, BLACK, (SCREEN_WIDTH - fade_counter, (y + 1) * 100, SCREEN_WIDTH, 100))
else:
draw_text('GAME OVER!', font_big, WHITE, 130, 200)
draw_text('SCORE: ' + str(score), font_big, WHITE, 130, 250)
draw_text('PRESS SPACE TO PLAY AGAIN', font_big, WHITE, 40, 300)
#update high score
if score > high_score:
high_score = score
with open('score.txt', 'w') as file:
file.write(str(high_score))
key = pygame.key.get_pressed()
if key[pygame.K_SPACE]:
#reset variables
game_over = False
score = 0
scroll = 0
fade_counter = 0
#reposition jumpy
jumpy.rect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT - 150)
#reset enemies
enemy_group.empty()
#reset platforms
platform_group.empty()
#create starting platform
platform = Platform(SCREEN_WIDTH // 2 - 50, SCREEN_HEIGHT - 50, 100, False)
platform_group.add(platform)
#event handler
for event in pygame.event.get():
if event.type == pygame.QUIT:
#update high score
if score > high_score:
high_score = score
with open('score.txt', 'w') as file:
file.write(str(high_score))
run = False
#update display window
pygame.display.update()
pygame.quit()
| [
"71890899+russs123@users.noreply.github.com"
] | 71890899+russs123@users.noreply.github.com |
d75cfec04356371e9cb361413293d295f26960ac | b49c04560012e6140692ec5ad8bdf8be8e0696d9 | /dogtor/dogtor/settings.py | fc950f5407215e94b2fda6b4262fc5cf960cf1b1 | [] | no_license | msaldeveloper/python_primera_generacion | c9abada8e24fb9a46660882b07896ca3522ff7b1 | ba21f89df0a9208093642062e3aed16b164c18f7 | refs/heads/main | 2023-03-19T17:55:45.098766 | 2021-03-19T21:59:09 | 2021-03-19T21:59:09 | 348,542,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | """
Django settings for dogtor project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0o-5u5sa4b_gdvu80w74g1l^y6bp89zrja!m93@q$x7_vq5j#q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#custom apps
'vet',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dogtor.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dogtor.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dogtor',
'USER': 'admin',
'PASSWORD': '1234567',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"msalda53@gmail.com"
] | msalda53@gmail.com |
21bdb9ebe524c8a4b44d23e8adf7fdb24c9a822e | 12d652d5f53be2481e349a1a5423e7f0d2b02fd0 | /libsaas/services/twilio/__init__.py | e04e99fdeb97eacc20fd0e41bdf7a99b2b1ae2ae | [
"MIT"
] | permissive | piplcom/libsaas | 46d4414844b2ef99e441a9c518d1e90f1adc970c | 25caa745a104c8dc209584fa359294c65dbf88bb | refs/heads/master | 2021-01-19T21:11:58.076814 | 2020-06-08T15:05:02 | 2020-06-08T15:05:02 | 88,621,980 | 1 | 1 | null | 2017-04-18T12:23:00 | 2017-04-18T12:23:00 | null | UTF-8 | Python | false | false | 28 | py | from .service import Twilio
| [
"aitorciki@gmail.com"
] | aitorciki@gmail.com |
d273d31be1fc3f312c06f5395e9b8f3ba259c66e | 45dd9a79cc3a13dc9bcf6d2e52e4ebd9f49f44fa | /migrations/versions/283fb4db0052_autenticacao_do_sistema.py | a3e376390252f4cd5c9fd0ad89fdf3cc8c41d7e1 | [] | no_license | zhasny17/medias-crud | 3a804de2a675f150c2c294a751164c3231e76590 | d77427be26b904a8528b1d7733d8377c96964269 | refs/heads/develop | 2023-01-03T05:08:27.374749 | 2020-10-26T18:47:41 | 2020-10-26T18:47:41 | 292,314,444 | 1 | 0 | null | 2020-10-26T18:47:42 | 2020-09-02T14:59:31 | null | UTF-8 | Python | false | false | 1,522 | py | """autenticacao do sistema
Revision ID: 283fb4db0052
Revises: 0f44243b2485
Create Date: 2020-09-02 14:26:15.171158
"""
from alembic import op
import sqlalchemy as sa
from datetime import datetime
# revision identifiers, used by Alembic.
revision = '283fb4db0052'
down_revision = '0f44243b2485'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'refresh_tokens',
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False, default=datetime.utcnow),
sa.Column('valid', sa.Boolean, nullable=False, default=True),
sa.Column('expiration_date', sa.DateTime(timezone=True), nullable=False),
sa.Column('user_id', sa.String(36), sa.ForeignKey('users.id'), nullable=False),
mysql_charset='utf8mb4',
)
op.create_table(
'access_tokens',
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False, default=datetime.utcnow),
sa.Column('valid', sa.Boolean, nullable=False, default=True),
sa.Column('expiration_date', sa.DateTime(timezone=True), nullable=False),
sa.Column('user_id', sa.String(36), sa.ForeignKey('users.id'), nullable=False),
sa.Column('refresh_token_id', sa.String(36), sa.ForeignKey('refresh_tokens.id'), nullable=False),
mysql_charset='utf8mb4',
)
def downgrade():
op.drop_table('access_tokens')
op.drop_table('refresh_tokens')
| [
"lucasluz@fortalsistemas.com.br"
] | lucasluz@fortalsistemas.com.br |
abffbf28fffc2cff9a3165f8a00e57f2989d81b9 | 3b219f97d6ad54b8d061c3d7776dad064777ba0a | /matplotlayers/backends/tk/stack_settings.py | 0a2bb5ae0664a6f831bae21c7905a590ab68b033 | [
"MIT"
] | permissive | friedrichromstedt/matplotlayers | f03e94d99d6550e1657023889ad4defe7f1eb64f | a0c883476ac5b0f457e32e8831d87f7a0ca0bb80 | refs/heads/master | 2021-01-10T21:31:36.012401 | 2011-05-24T14:38:36 | 2011-05-24T14:38:36 | 1,793,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,303 | py | # Copyright (c) 2010 Friedrich Romstedt <friedrichromstedt@gmail.com>
# See also <www.friedrichromstedt.org> (if e-mail has changed)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Developed since: Aug 2008
"""Defines the settings dialog class for accessing the details of a Stack."""
import Tkinter
import tkFileDialog
import ventry
class StackSettings(Tkinter.Toplevel):
def __init__(self, master,
stack,
callback_update):
"""STACK is the matplotlayers.Stack to act upon."""
Tkinter.Toplevel.__init__(self, master)
self.stack = stack
self.callback_update = callback_update
# Create Settings widgets ...
self.lframe_settings = Tkinter.LabelFrame(self, text = 'Settings')
self.lframe_settings.pack(side = Tkinter.LEFT, anchor = Tkinter.N)
# Create labeling widgets.
self.lframe_labeling = Tkinter.LabelFrame(self.lframe_settings,
text = 'Labeling')
self.lframe_labeling.pack(side = Tkinter.TOP, anchor = Tkinter.W)
self.frame_labeling = Tkinter.Frame(self.lframe_labeling)
self.frame_labeling.pack(side = Tkinter.TOP)
if self.stack.title is None:
initial_title = ''
else:
initial_title = self.stack.title.replace('\n',r'\n')
if self.stack.xlabel is None:
initial_xlabel = ''
else:
initial_xlabel = self.stack.xlabel.replace('\n',r'\n')
if self.stack.ylabel is None:
initial_ylabel = ''
else:
initial_ylabel = self.stack.ylabel.replace('\n',r'\n')
self.title = ventry.NamedVEntry(self.frame_labeling,
name = 'Title:',
column = 0, row = 0,
initial = initial_title,
width = 40)
self.xlabel = ventry.NamedVEntry(self.frame_labeling,
name = 'x label:',
column = 0, row = 1,
initial = initial_xlabel,
width = 40)
self.ylabel = ventry.NamedVEntry(self.frame_labeling,
name = 'y label:',
column = 0, row = 2,
initial = initial_ylabel,
width = 40)
self.title.initialise()
self.xlabel.initialise()
self.ylabel.initialise()
self.update_title()
self.button_update_labeling = Tkinter.Button(self.lframe_labeling,
text = 'Update Labeling',
command = self.tk_update_labeling)
self.button_update_labeling.pack(side = Tkinter.TOP,
fill = Tkinter.X)
# Create limit widgets.
self.lframe_limits = Tkinter.LabelFrame(self.lframe_settings,
text = 'Limits')
self.lframe_limits.pack(side = Tkinter.TOP, anchor = Tkinter.W)
self.frame_limits = Tkinter.Frame(self.lframe_limits)
self.frame_limits.pack(side = Tkinter.TOP)
(xlim0, xlim1) = self.stack.get_xlim()
(ylim0, ylim1) = self.stack.get_ylim()
self.xlim_left = ventry.NamedVEntry(self.frame_limits,
name = 'x Limits:',
column = 0, row = 0,
initial = xlim0,
validate = ventry.number)
self.xlim_right = ventry.VEntry(self.frame_limits,
initial = xlim1,
validate = ventry.number)
self.xlim_right.grid(column = 2, row = 0)
self.xlim_left.initialise()
self.xlim_right.initialise()
self.ylim_bottom = ventry.NamedVEntry(self.frame_limits,
name = 'y Limits:',
column = 0, row = 1,
initial = ylim0,
validate = ventry.number)
self.ylim_top = ventry.VEntry(self.frame_limits,
initial = ylim1,
validate = ventry.number)
self.ylim_top.grid(column = 2, row = 1)
self.ylim_bottom.initialise()
self.ylim_top.initialise()
self.autoscalex_on = Tkinter.BooleanVar(self.lframe_limits)
self.autoscaley_on = Tkinter.BooleanVar(self.lframe_limits)
self.autoscalex_on.set(self.stack.get_autoscalex_on())
self.autoscaley_on.set(self.stack.get_autoscaley_on())
self.checkbutton_autoscalex_on = Tkinter.Checkbutton(
self.lframe_limits,
text = 'x Autoscale',
command = self.tk_autoscalex_on,
variable = self.autoscalex_on)
self.checkbutton_autoscalex_on.pack(side = Tkinter.TOP)
self.checkbutton_autoscaley_on = Tkinter.Checkbutton(
self.lframe_limits,
text = 'y Autoscale',
command = self.tk_autoscaley_on,
variable = self.autoscaley_on)
self.checkbutton_autoscaley_on.pack(side = Tkinter.TOP)
self.button_update_limits = Tkinter.Button(self.lframe_limits,
text = 'Update Scales',
command = self.tk_update_limits)
self.button_update_limits.pack(side = Tkinter.TOP,
fill = Tkinter.X)
self.update_autoscalex_accessibility()
self.update_autoscaley_accessibility()
def tk_update_labeling(self):
self.stack.set_title(self.title.get().replace('\\n', '\n'))
self.stack.set_xlabel(self.xlabel.get().replace('\\n', '\n'))
self.stack.set_ylabel(self.ylabel.get().replace('\\n', '\n'))
self.callback_update()
self.update_title()
def tk_update_limits(self):
# Tells wheter an update is needed or not:
update_needed = False
if self.autoscalex_on.get():
# We are in autoscale mode, thus update the values displayed ...
(xlim0, xlim1) = self.stack.get_xlim()
self.xlim_left.set(xlim0)
self.xlim_right.set(xlim1)
else:
# We are in explicit mode, thus write the values typed in to
# the stack ...
self.stack.set_xlim(
(self.xlim_left.get(), self.xlim_right.get()))
# Only in this branch update the stack.
update_needed = True
if self.autoscaley_on.get():
# We are in autoscale mode, thus update the values displayed ...
(ylim0, ylim1) = self.stack.get_ylim()
self.ylim_bottom.set(ylim0)
self.ylim_top.set(ylim1)
else:
# We are in explicit mode thus write the values typed in to
# the stack ...
self.stack.set_ylim(
(self.ylim_bottom.get(), self.ylim_top.get()))
# Only in this branch update the stack.
update_needed = True
if update_needed:
self.callback_update()
def update_autoscalex_accessibility(self):
"""Enables / Disables widgets according to the X autoscale setting."""
if self.autoscalex_on.get():
# Disable the controls:
self.xlim_left.disable()
self.xlim_right.disable()
else:
# Enable the controls:
self.xlim_left.enable()
self.xlim_right.enable()
def update_autoscaley_accessibility(self):
"""Enables / Disables widgets according to the Y autoscale setting."""
if self.autoscaley_on.get():
# Disable the controls:
self.ylim_bottom.disable()
self.ylim_top.disable()
else:
# Enable the controls:
self.ylim_bottom.enable()
self.ylim_top.enable()
def tk_autoscalex_on(self):
"""Called on changes of the autoscale X checkbutton."""
# Update the stack's settings ...
if self.autoscalex_on.get():
self.stack.set_autoscale_on(x_on=True)
else:
self.stack.set_autoscale_on(x_on=False)
# Enable / disable the controls ...
self.update_autoscalex_accessibility()
# If the autoscaling has been disabled, update the limits
# because they may have changed due to autoscaling under the way ...
(xlim0, xlim1) = self.stack.get_xlim()
self.xlim_left.set(xlim0)
self.xlim_right.set(xlim1)
self.callback_update()
def tk_autoscaley_on(self):
"""Called on changes of the autoscale Y checkbutton."""
# Update the stack's settings ...
if self.autoscaley_on.get():
self.stack.set_autoscale_on(y_on=True)
else:
self.stack.set_autoscale_on(y_on=False)
# Enable / disable the controls ...
self.update_autoscaley_accessibility()
# If the autoscaling has been disabled, update the limits
# because they may have changed due to autoscaling under the way ...
(ylim0, ylim1) = self.stack.get_ylim()
self.ylim_bottom.set(ylim0)
self.ylim_top.set(ylim1)
self.callback_update()
def update_title(self):
"""Update the title of the window according to the title of the
stack."""
# Choose a title which is meaningful both if the title has been set
# and also if not.
self.wm_title('Stack Settings ' + self.title.get())
| [
"friedrichromstedt@gmail.com"
] | friedrichromstedt@gmail.com |
47114303d4036a4aeb4733f34ef927d7095bb970 | ac2c3e8c278d0aac250d31fd023c645fa3984a1b | /saleor/saleor/core/payments.py | 777cdcf229f3af0436638628319a4ed5f6c33a12 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | jonndoe/saleor-test-shop | 152bc8bef615382a45ca5f4f86f3527398bd1ef9 | 1e83176684f418a96260c276f6a0d72adf7dcbe6 | refs/heads/master | 2023-01-21T16:54:36.372313 | 2020-12-02T10:19:13 | 2020-12-02T10:19:13 | 316,514,489 | 1 | 1 | BSD-3-Clause | 2020-11-27T23:29:20 | 2020-11-27T13:52:33 | TypeScript | UTF-8 | Python | false | false | 1,983 | py | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
# flake8: noqa
from ..checkout.models import Checkout, CheckoutLine
from ..discount import DiscountInfo
from ..payment.interface import (
PaymentData,
GatewayResponse,
TokenConfig,
CustomerSource,
PaymentGateway,
)
class PaymentInterface(ABC):
@abstractmethod
def list_payment_gateways(
self, currency: Optional[str] = None, active_only: bool = True
) -> List["PaymentGateway"]:
pass
@abstractmethod
def checkout_available_payment_gateways(
self, checkout: "Checkout",
) -> List["PaymentGateway"]:
pass
@abstractmethod
def authorize_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def capture_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def refund_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def void_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def confirm_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def token_is_required_as_payment_input(self, gateway) -> bool:
pass
@abstractmethod
def process_payment(
self, gateway: str, payment_information: "PaymentData"
) -> "GatewayResponse":
pass
@abstractmethod
def get_client_token(self, gateway: str, token_config: "TokenConfig") -> str:
pass
@abstractmethod
def list_payment_sources(
self, gateway: str, customer_id: str
) -> List["CustomerSource"]:
pass
| [
"testuser@151-248-122-3.cloudvps.regruhosting.ru"
] | testuser@151-248-122-3.cloudvps.regruhosting.ru |
790b20e763c294fc23c22ff01dde9e2435ff065d | 908a81de9a5dd27f6a0b626e9ca016ed50b98085 | /CommomClass.py | 08f6ef735ad8a9dcc3d30832f4387abd7f807371 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | autolordz/docx-content-modify | ad619b0687e74f028b7c0bee1fea292bd4d76af2 | 85c4b972f804e2513834c46791955c28c6c7ca0e | refs/heads/master | 2022-09-09T15:33:25.301672 | 2022-08-09T02:01:27 | 2022-08-09T02:01:27 | 146,964,372 | 6 | 1 | MIT | 2022-07-15T03:36:08 | 2018-09-01T03:34:26 | Python | UTF-8 | Python | false | false | 4,344 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 27 20:42:50 2022
@author: Autozhz
"""
import pandas as pd
import re,os,sys
import rsa,datetime
import win32api,win32con
from PrintLog import PrintLog
from Global import current_path
class CommomClass:
def __init__(self, *args, **kwargs):
try:
with open(os.path.join(current_path, 'verification.txt'),'rb') as verif, \
open(os.path.join(current_path, 'private.pem'),'rb') as privatefile:
content = verif.read()
p = privatefile.read()
privkey = rsa.PrivateKey.load_pkcs1(p)
self.dectex = rsa.decrypt(content, privkey).decode()
if datetime.datetime.now() > \
datetime.datetime.strptime(self.dectex,'%Y%m%d_%H%M'):
# print("APP Expired, Please Contact Admin, Exit!! ")
self.exit_msg('APP过期, 请联系管理员, 退出!')
except Exception as e:
PrintLog.log(e)
sys.exit()
self.df = None
pass
def check_expired(self):
diff_t1 = datetime.datetime.strptime(self.dectex,'%Y%m%d_%H%M')
PrintLog.log('APP过期时间',diff_t1)
def exit_msg(self, msg):
PrintLog.log(msg)
win32api.MessageBox(0,msg,'提示',win32con.MB_OK)
sys.exit()
def pop_msg(self, msg):
PrintLog.log(msg)
win32api.MessageBox(0,msg,'提示',win32con.MB_OK)
def read_file(self, **kwargs):
df = kwargs.get('df_input',None)
df_name = kwargs.get('df_input_name',None)
try:
if df is None or not df.any().any() and df_name:
df = pd.read_excel(df_name,na_filter=False)
# df = pd.read_csv(self.df_name,na_filter=False)
except Exception as e:
PrintLog.log(e)
df = pd.DataFrame()
self.df = df.copy()
return df
def open_path(self, path, **kwargs):
isOpenPath = kwargs.get('isOpenPath',0)
if isOpenPath:
if os.path.isdir(path):
os.system('explorer %s'%path)
else:
os.system('explorer %s'%os.path.dirname(path))
def sort_duplicates_data(self,df,sortlist=['']):
if set(sortlist).issubset(set(df.columns)):
df.drop_duplicates(sortlist,keep='first',inplace=True)
df.sort_values(sortlist,ascending=False,inplace=True)
df.reset_index(drop=True,inplace=True)
df.fillna('',inplace=True)
return df
def save_file(self, **kwargs):
isSave = kwargs.get('isSave',0)
# dfi = kwargs.get('df_input',None)
dfi = self.df
df = kwargs.get('df_output',None)
save_name = kwargs.get('df_output_name',None)
try:
if isSave and save_name and df is not None:
if dfi is not None and df.equals(dfi):
print('No Updated with File %s'%os.path.relpath(save_name))
else:
df.to_excel(save_name,index=0)
# df.to_csv(self.df_name,encoding='utf_8_sig',index=0)
PrintLog.log('更新 %s 数据 【%s】 条'%(os.path.basename(save_name),len(df)))
return 1
except Exception as e:
PrintLog.log(e)
return 0
def check_df_expand(self, **kwargs):
df = self.read_file(**kwargs)
if not df['联系'].any():
df_name = kwargs.get('df_input_name',None)
self.pop_msg('正在打开 df_expand,请补充代理人、联系电话等信息')
ret = os.system('start excel %s'%df_name)
if ret > 0:
self.pop_msg('没有安装打开表格的 excel,请手动打开df_expand')
os.system('explorer %s'%os.path.dirname(df_name))
sys.exit()
else:
print('Continute to Export')
def split_list(self, regex,L):
return list(filter(None,re.split(regex,L)))
if __name__ == '__main__':
CommomClass1 = CommomClass()
CommomClass1.check_expired()
| [
"noreply@github.com"
] | autolordz.noreply@github.com |
20a937c65dfbea6b05584dc147b820093b524b73 | e72ce489ebb26c4cb24aaf123ba59148573aebdb | /creatures/physics.py | 34a3374e4332f0f78983c3f1836678b5e021a0bb | [] | no_license | SimonCarryer/mutant_flowers | 6f4e2016a5871214c01b1a79c89ca1c25fcca059 | 4646663410251f6db27861b63f1eacd56c3c1604 | refs/heads/master | 2023-07-13T02:39:55.218853 | 2021-08-21T06:43:56 | 2021-08-21T06:43:56 | 353,199,343 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import numpy as np
def normalise_vector(a, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2 == 0] = 1
return (a / np.expand_dims(l2, axis))[0]
def magnitude_vector(vector):
return np.linalg.norm(vector)
def distance_between_points(current, target):
return np.linalg.norm(target - current)
| [
"simoncarryer@gmail.com"
] | simoncarryer@gmail.com |
15b3cfcd42a20500ce5b34dabd3cf34ef53a67b1 | e890fd6988845f95423b00c73cd73b94173881eb | /mainpage/__init__.py | 2fbd40a0f44eb768ac2f4fb0bd04868858873c1a | [] | no_license | wanwanbree/mywork | e541639becadafc7d64358242a7ec62af43a4756 | cb094596b96dc71635e2fba56ba5d92bfa1be349 | refs/heads/master | 2023-03-13T02:14:36.305718 | 2021-03-01T17:39:10 | 2021-03-01T17:39:10 | 333,817,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | # -- coding: utf-8 --
# @ModuleName:__init__.py
# @Author:wanwan
# @Time:2021/3/1 23:51 | [
"[15578303150@163.com]"
] | [15578303150@163.com] |
e015be57d3fba1d0cdea0f917b20355a047ff2fe | d1d03bf8b00698efc420e0ebb1e06df0eb6c6379 | /Course1/Part5/5.1.py | 79dfd7971d9a4715eed30a5a352985b2072dddab | [] | no_license | supersowbow/P4E | fb86b1b5819058188e3a81962794772c473903cf | 0749f643a6aff491689c178f7b67f0c6c91eaf24 | refs/heads/master | 2021-08-16T07:04:27.432137 | 2017-11-19T07:50:56 | 2017-11-19T07:50:56 | 111,253,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | """Exercise 1: Write a program which repeatedly reads numbers until the user enters "done". Once "done" is entered, print out the total, count, and average of the numbers. If the user enters anything other than a number, detect their mistake using try and except and print an error message and skip to the next number."""
num = 0
total = 0.0
while True:
sval = input("Enter a number or Type 'done' if finished: ")
if sval == "done":
break
try:
fval = float(sval)
except:
print("Please enter a number or type done.")
continue
num = num + 1
total = total + fval
print("Done :)")
print(num, total, total/num)
| [
"kybow@protonmail.com"
] | kybow@protonmail.com |
541a20915197d5eb3a7603ea78ed0660a5337347 | b92ac6cde56fc83562653486e80d6056bf8435da | /vts/examples_parser.py | 807ed3cb793423d87d13300f07608123828ce82e | [] | no_license | grantpassmore/Real-Algebra-Playground | 3aa800979db44d3b9031a2729c475c0ec12e5421 | be72cb44ea62372266f531c5f81d2003bf6df016 | refs/heads/master | 2020-12-24T16:23:45.835458 | 2014-04-21T15:16:00 | 2014-04-21T15:16:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,160 | py | """Module for parsing GRF format examples.
Supports faster parsing from pickled python objects.
"""
import sys
import os
import errno
import pickle
import itertools
import sympy
import grf_parser as gparser
import isym
x = sympy.var('x')
# "With pyparsing, the default sys recursion limit must be increased for
# parsing w.r.t. even relatively small recursive grammars."
# -- https://github.com/grantpassmore/Real-Algebra-Playground/blob/master/GRF/smt2.py
sys.setrecursionlimit(2 ** 20)
def write_sympy_parseable_examples():
"""Parses examples form ../GRF/Examples directory and pickles them to
./Examples directory
"""
dir_examples = examples_preformat_uni()
for dir in dir_examples:
try:
os.makedirs(os.path.join('Examples', dir))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise exception
for file in dir_examples[dir]:
print file
# could do it manually (which might make it faster)
with open(os.path.join('Examples', dir, file + '.pkl'), 'wb') as f:
pickle.dump(dir_examples[dir][file], f)
def examples_preformat_uni():
"""Converts examples to be univariate
Keyword arguments:
return -- map of maps where first level key is directory name,
second level keys are file names and values are lists of examples in that file.
Single example consists of tuples (antecent, succedent)
"""
dirResults = parse_example_dirs()
dirFormulas = {}
for dir in dirResults:
fileFormulas = {}
for file in dirResults[dir]:
file_examples = []
for example in dirResults[dir][file]:
#print example
#print example['antecedent']['fmls']
uni_ante = make_univariate(example['antecedent']['fmls'])
uni_succ = make_univariate(example['succedent']['fmls'])
file_examples.append((uni_ante, uni_succ))
#formulas = vts_preformat(uni_ante, uni_succ)
fileFormulas[file] = file_examples
dirFormulas[dir] = fileFormulas
return dirFormulas
def make_univariate(relations):
"""Converts a list of relations to univariate ones
and transforms right hand sides of relations to zero.
Keyword arguments:
relations -- list of sympy relations (multivariate, arbitrary relations)
return -- lists of univariate relations that have right hand side of zero
"""
acc = []
for relation in relations:
old_relation = relation.lhs - relation.rhs
vars = old_relation.free_symbols
zipped_vars = zip(vars, map(lambda p: x, range(len(vars))))
new_lhs = old_relation.subs(zipped_vars).expand()
cls = relation.__class__
acc.append(cls(new_lhs, 0))
return acc
def unpickle_dirs_gen():
base_dir = os.path.join('Examples')
met_dir = os.path.join(base_dir, 'MetiTarski')
hid_dir = os.path.join(base_dir, 'HidingProblems')
dirGen = itertools.chain( os.walk(met_dir), os.walk(hid_dir))
all_names = []
for dirpath, dirnames, filenames in dirGen:
all_names += map(lambda s: os.path.join(dirpath, s), filenames)
def unpickle_file(filename):
with open(filename, 'rb') as f:
shorter = os.path.basename(filename)
# print
sys.stdout.write("parsing %s" %shorter)
sys.stdout.flush()
content = pickle.load(f)
sys.stdout.write("\rdone parsing %s\n" %shorter)
sys.stdout.flush()
return content
return itertools.imap(unpickle_file, all_names)
def unpickle_dirs():
"""Loads python objects from "./Examples" directory
Keyword arguemtns:
return -- map of maps where first level key is directory name,
second level keys are file names and values are lists of examples in that file.
Single example consists of tuples (antecent, succedent)
"""
base_dir = os.path.join('Examples')
met_dir = os.path.join(base_dir, 'MetiTarski')
hid_dir = os.path.join(base_dir, 'HidingProblems')
met_results = unpickle_from_dir(met_dir)
hid_results = unpickle_from_dir(hid_dir)
return {
'Metitarski':met_results,
'HidingProblems':hid_results
}
def unpickle_from_dir(dir):
"""Unpickles files in directory
Keyword arguments:
dir -- directory where files are located
return -- map with file names as keys and unpickled objects as values
"""
print "unpickling examples from %s" %dir
ret = {}
for dirpath, dirnames, filenames in \
os.walk(dir):
for filename in filenames:
print filename
with open(os.path.join(dirpath, filename), 'rb') as f:
content = pickle.load(f)
ret[filename] = content
# break
return ret
def parse_example_dirs():
"""Parses examples in ../GRF/Examples/Metitarski and
../GRF/Examples/HidingProblems.
Keyword arguments:
return -- two level where first level keys are directories, second level keys
are file names and values are examples in those files.
"""
base_dir = os.path.join('..', 'GRF', 'Examples')
met_dir = os.path.join(base_dir, 'MetiTarski')
hid_dir = os.path.join(base_dir, 'HidingProblems')
met_results = parse_files_from_dir(met_dir)
#hid_results = parse_files_from_dir(hid_dir)
return {
'Metitarski':met_results,
#'HidingProblems':hid_results
}
def parse_files_from_dir(dir):
"""Parses files from directory
Keyword arguments:
dir -- directory where files are located
return -- map with filenames as keys and examples in those files as values.
"""
print "parsing examples from %s" %dir
ret = {}
count = 0
for dirpath, dirnames, filenames in \
os.walk(dir):
for filename in filenames:
count += 1
#if not count in range(50, 90):
# continue
print filename
f = open(os.path.join(dirpath, filename))
content = f.read()
#print "len: %d" %len(list(gparser.lit_sequent.scanString(content)))
#print "len: %d" %len(list(gparser.cmd.scanString(content)))
resultList = []
for results, start, end in gparser.cmd.scanString(content):
#print results
resultList.append(results)
ret[filename] = resultList
#break #TODO delete when parsing all files
return ret
| [
"kristjan.liiva1@gmail.com"
] | kristjan.liiva1@gmail.com |
95d9264aa5c9795340c3d30446927184fb413a67 | dbf35ec4f640799ffa25d0d30c002fb4e7c82efa | /test/gw/input.py | 80c1972c29062cec0989d99fb31c26c8da2e3a11 | [] | no_license | Shibu778/LaDa | 8962553c2f62882960b402f1a0761a9b80410ca6 | 9c0ab667f94dc4629404a8ec99cbeaa323f0c8b3 | refs/heads/master | 2023-03-16T19:58:44.411851 | 2013-06-16T10:59:11 | 2013-06-16T10:59:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | from pylada.crystal.binary import zinc_blende
from pylada.mpi import world
lattice = zinc_blende()
lattice.scale = 5.45
for site in lattice.sites: site.type = 'Si'
functional = GWVasp()
""" VASP functional """
functional.kpoints = "Automatic generation\n0\nMonkhorst\n2 2 2\n0 0 0"
functional.precision = "accurate"
functional.ediff = 1e-5
functional.encut = 0.8
functional.lorbit = 10
functional.npar = 2
functional.lplane = True
functional.addgrid = True
functional.set_smearing = "metal", 0.01
functional.relaxation = "static"
functional.nbands = 20
functional.add_specie = "Si", "pseudos/Si"
result = functional(lattice.to_structure(), comm=world)
| [
"mayeul.davezac@nrel.gov"
] | mayeul.davezac@nrel.gov |
cb8577c3dcf6e03871afeb5044afd9a6a6e5b22c | a5e6e0e1a126dadb5b9dd47dd8674c01c6bfb0d3 | /debugComparisonOperatorDrills.py | 4e04206a266b7752e6a45e03b6f68efff2bf6020 | [] | no_license | bvanboxtel/variableDrill | 55bbde205fdd7d6bca3040656516af61ce72fdf3 | 95551f2f1b93b093f9b2d7c7ad15731422f4e10a | refs/heads/master | 2020-09-03T00:39:27.472275 | 2019-11-27T04:46:06 | 2019-11-27T04:46:06 | 219,342,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | '''
For this assignment you should look at the code created below and find the error.
For each task, there will be one error that you must find and correct.
Sometimes there will be an explanation of the problem and/or tips that can help you
complete the tasks.
EXAMPLE TASK:
'''
#EX)
# )Broken:
# )5 = 5
# )Correct:
5 == 5
'''
END EXAMPLE
'''
'''
START HERE
'''
#1)
# )Broken:
# )4 = 4
# )Correct:4==4
#2)
# )Broken:
# )3 = 3
# )Correct:3==3
#3)
# )Broken:
# )5 = = 5
# )Correct:5==5
#4)
# )Broken:
# )4 = = 4
# )Correct:4==4
#5)
# )Broken:
# )a = 4
# )a = 3
# )Correct:
a=4
b=3
#6)
# )Broken:
# )a = 4
# )b = 3
# )a = b
# )Correct:a==b
#7)
# )Broken:
# )a = 3
# )a < 4
# )PROBLEM: I want it to be true when a is less than OR equal to 4
# )Correct:a<=4
#8)
# )Broken:
# )a = 3
# )a > 4
# )PROBLEM: I want it to be true when a is greater than OR equal to 4
# )Correct:a>=4
#9)
# )Broken:
# )a = 3
# )b = 3
# )a < 3
# )PROBLEM: I want it to be true when a is less than OR equal to b
# )Correct: a<=b
#10)
# )Broken:
# )a = 3
# )b = 2
# )a > b
# )PROBLEM: I want it to be true when a is greater than OR equal to b
# )Correct:a>=b
| [
"57278831+bvanboxtel@users.noreply.github.com"
] | 57278831+bvanboxtel@users.noreply.github.com |
09e35450b6520f6def9cc7c4b3196fd617f912dc | f7b3c098db4dcea347eac5ee18fc19b84cbf2059 | /scrubadub/scrubbers.py | fa06a388bb3e10f0b9bdd5a8bc93ad220ffe8f15 | [
"MIT"
] | permissive | jb08/scrubadub | f625a4bc265dfb743ab91f0a1449629392233cb2 | 7e7b6acc3938ded1e596960b6f095b7e79ae503e | refs/heads/master | 2021-01-16T22:03:02.271663 | 2016-01-14T20:25:32 | 2016-01-14T20:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,748 | py | import re
import operator
import textblob
import nltk
from . import exceptions
from . import detectors
from .filth import Filth, MergedFilth
class Scrubber(object):
"""The Scrubber class is used to clean personal information out of dirty
dirty text. It manages a set of ``Detector``'s that are each responsible
for identifying their particular kind of ``Filth``.
"""
def __init__(self, *args, **kwargs):
super(Scrubber, self).__init__(*args, **kwargs)
# instantiate all of the detectors
self.detectors = {}
for type, detector_cls in detectors.types.iteritems():
self.detectors[type] = detector_cls()
def clean(self, text, **kwargs):
"""This is the master method that cleans all of the filth out of the
dirty dirty ``text``. All keyword arguments to this function are passed
through to the ``Filth.replace_with`` method to fine-tune how the
``Filth`` is cleaned.
"""
if not isinstance(text, unicode):
raise exceptions.UnicodeRequired
clean_chunks = []
filth = Filth()
for next_filth in self.iter_filth(text):
clean_chunks.append(text[filth.end:next_filth.beg])
clean_chunks.append(next_filth.replace_with(**kwargs))
filth = next_filth
clean_chunks.append(text[filth.end:])
return u''.join(clean_chunks)
def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self.detectors.itervalues():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth')
all_filths.append(filth)
all_filths.sort(key=operator.attrgetter("beg"))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth
else:
filth = filth.merge(next_filth)
yield filth
| [
"dean.malmgren@datascopeanalytics.com"
] | dean.malmgren@datascopeanalytics.com |
dc40bd6b6e3a7c1a27ac23ecaac487a6ac1ddf4e | 362e5473d2d2f154c30e5530c73346d67ee6953e | /leetcode/python/4_findNumberIn2DArray.py | 92be17f270bc6cdaaef5dc609bfe7caacad86208 | [] | no_license | greebear/MyTechnologyStack | 467a0f5abf96e852644502a5c3ef29876eb35860 | 007b323f99e99d762e64fa0ce8fcac6eee89cb76 | refs/heads/main | 2023-06-06T10:52:53.364722 | 2021-06-21T02:12:43 | 2021-06-21T02:12:43 | 354,377,470 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
if len(matrix) == 0: return False
i, j = 0, len(matrix[0]) - 1
while i < len(matrix) and j >= 0:
if matrix[i][j] == target:
return True
elif matrix[i][j] < target:
i += 1
else:
j -= 1
return False
| [
"greebear@qq.com"
] | greebear@qq.com |
75bcca176acd9a0af9f6600087c1dea6d24709af | f37b8af090a8da00654a8d94654c9596d6b8df00 | /Data_processing_python/pandas/ftpDownloader.py | 4e7c6661f194f9c92c85a7e93146d6e2cfd19d82 | [] | no_license | harshitkakkar/PythonBasics | 0cfb67242e397b5f5485b1a513586b0b1aa2d428 | 5b6d15a0c6ea1ce686aa8a187d6cbadbfb22bc2d | refs/heads/master | 2021-07-19T20:15:04.574352 | 2020-04-30T08:45:31 | 2020-04-30T08:45:31 | 149,562,901 | 0 | 0 | null | 2020-04-30T08:45:32 | 2018-09-20T06:35:09 | null | UTF-8 | Python | false | false | 1,632 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 12 12:31:46 2020
@author: hkacker
"""
from ftplib import FTP , error_perm
import os
import glob
#import patoolib
def ftpdownloader(stationID,startYear,endYear,
host="ftp.pyclass.com",user="student@pyclass.com",password="student123") :
ftp = FTP(host)
ftp.login(user,password)
# print(ftp.nlst())
ftp.cwd("Data")
if not os.path.exists("C:\\Users\\hkacker\\Documents\\Python_Scripts\\input"):
os.makedirs("C:\\Users\\hkacker\\Documents\\Python_Scripts\\input")
os.chdir("C:\\Users\\hkacker\\Documents\\Python_Scripts\\input")
for year in range (startYear,endYear+11):
fullpath='/Data/%s/%s-%s.gz' %(year,stationID,year)
filename = os.path.basename(fullpath)
try:
with open(filename, 'wb') as file :
ftp.retrbinary('RETR %s' %fullpath, file.write)
print("%s succesfully downloaded" % filename)
except error_perm:
print("%s is not available" % filename)
os.remove(filename)
ftp.close()
def extractedfiles(indir="C:\\Users\\hkacker\\Documents\\Python_Scripts\\input",
out="C:\\Users\\hkacker\\Documents\\Python_Scripts\\input\\Extracted"):
os.chdir(indir)
archives = glob.glob("*.gz*")
if not os.path.exists(out):
os.makedirs(out)
files = os.listdir("Extracted")
for archive in archives :
if archive[:-3] not in files :
patoolib.extract_archive(archive, outdir=out)
| [
"hkacker@deloitte.com"
] | hkacker@deloitte.com |
1db87ae81b0c2047cf759aee09108a756205d110 | d50ba03d9968598669ac0e702ce6e976f2b76960 | /InserirDeletarCards.py | 60b8317c59d92e1b34e70d03f831587926908bd4 | [] | no_license | MelicHamtes/EMA | 6749af41f9a2c0367128462719e4bb38a38f0803 | da97493a8449c5bc17fc3b067f73d815f54cb1e3 | refs/heads/master | 2023-08-19T09:24:44.616832 | 2021-10-07T12:31:00 | 2021-10-07T12:31:00 | 275,916,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,638 | py | import tkinter
from tkinter import messagebox
from Banco_armazenamento.Banco_dados import Banco_dados
from Deck import Deck
import sys
class Editar_cartoes:
def __init__(self, nome_deck):
self.janela = tkinter.Tk()
self.janela.title('Configurações de card')
self.janela.geometry("460x190+850+150")
self.janela.resizable(0,0)
self.janela.protocol('WM_DELETE_WINDOW', lambda: self.janela.destroy())
# Chama da classe controladora de dados
self.control = Deck()
self.control.deck = nome_deck
bd = Banco_dados()
bd.conectar()
self.codigo_deck = bd.puxar_codigo_deck(nome_deck)
fram=tkinter.Frame(self.janela)
fr=tkinter.Frame(fram)
frameF=tkinter.LabelFrame(fr)
lb1 = tkinter.Label(frameF, text='Frente:')
lb1.pack(side=tkinter.LEFT)
self.ent1 = tkinter.Text(frameF, width = 30, height=4)
self.ent1.pack()
frameF.pack()
frameV= tkinter.LabelFrame(fr)
lb2 =tkinter.Label(frameV, text='Verso: ')
lb2.pack(side=tkinter.LEFT)
self.ent2 =tkinter.Text(frameV, width=30, height = 4)
self.ent2.pack()
frameV.pack()
framebt=tkinter.LabelFrame(fr)
#self.btM=tkinter.Button(framebt,text="Mostar Cards",width=10,height=1, command=self.mostrar_deck)
#self.btM.pack(side=tkinter.LEFT)
btL=tkinter.Button(framebt,text="Limpar",width=10,height=1, command=self.limpar_entries)
btL.pack(side=tkinter.RIGHT)
framebt.pack(expand=1,fill=tkinter.BOTH)
fr.pack(side=tkinter.LEFT)
frameBot=tkinter.LabelFrame(fram,text="Cartãoº")
btADIC=tkinter.Button(frameBot,text="Adicionar", command=self.adicionar)
btADIC.pack(side=tkinter.BOTTOM,expand=1,fill=tkinter.BOTH)
btDelet=tkinter.Button(frameBot,text="Deletar", command=self.remover)
btDelet.pack(side=tkinter.BOTTOM,expand=1,fill=tkinter.BOTH)
btALt=tkinter.Button(frameBot,text="Alterar",command=self.alterar)
btALt.pack(side=tkinter.BOTTOM,expand=1,fill=tkinter.BOTH)
frameBot.pack(side=tkinter.RIGHT,anchor=tkinter.N)
fram.pack(anchor=tkinter.CENTER)
self.mostrar_deck()
self.janela.mainloop()
# método adicionador de cards
def adicionar(self):
try:
if self.control.frente == '' or self.control.verso == '':
raise Exception('Vazio')
self.fechar_toplevel()
f = self.ent1.get(1.0, tkinter.END)
v = self.ent2.get(1.0, tkinter.END)
self.control.frente = f
self.control.verso = v
#print(f,v) # TESTE
bd = Banco_dados()
bd.conectar()
bd.inserir_card(self.control.frente, self.control.verso, self.codigo_deck)
self.mostrar_deck()
self.limpar_entries()
bd.fechar_banco()
#print('Sucesso') # TESTE
except (AttributeError):
self.fechar_toplevel()
f = self.ent1.get(1.0, tkinter.END)
v = self.ent2.get(1.0, tkinter.END)
self.control.frente = f
self.control.verso = v
#print(f,v) # TESTE
bd = Banco_dados()
bd.conectar()
bd.inserir_card(self.control.frente, self.control.verso, self.codigo_deck)
bd.fechar_banco()
self.mostrar_deck()
self.limpar_entries()
#print('Sucesso') # TESTE
except (Exception):
messagebox.showerror('Botão: Adicionar','ERRO: Frente ou verso estão vazios')
# método removedor de cards
def remover(self):
try:
f = self.ent1.get(1.0, tkinter.END)
self.control.frente = f
bd = Banco_dados()
bd.conectar()
c = bd.puxar_codigo_card(self.control.frente)
bd.deletar_card(c)
self.fechar_toplevel()
self.mostrar_deck()
self.limpar_entries()
bd.fechar_banco()
#print('Sucesso') # TESTE
except (TypeError):
messagebox.showerror('Botão: Remover', 'ERRO: o código não corresponde, possiveis erros:\n1- verifique se o valor é um número\n2- verifique se a caixa não está vazia\n3- verifique se o codigo existe.')
except (AttributeError):
self.fechar_toplevel()
bd = Banco_dados()
bd.conectar()
f = self.ent1.get(1.0, tkinter.END)
c = bd.puxar_codigo_card(self.control.frente)
bd.deletar_card(c)
self.limpar_entries()
bd.fechar_banco()
self.mostrar_deck()
#print('Sucesso') # TESTE
# método alterador de cards
def alterar(self):
try:
f = self.ent1.get(1.0, tkinter.END)
v = self.ent2.get(1.0, tkinter.END)
self.control.frente = f
self.control.verso = v
if self.control.frente == '' or self.control.verso == '':
raise Exception('Vazio')
bd = Banco_dados()
bd.conectar()
#self.control.codigo = c
if self.frente_antigo:
c = bd.puxar_codigo_card(self.frente_antigo)
bd.alterar_card(c, self.control.frente, self.control.verso)
self.fechar_toplevel()
self.atualizar()
self.mostrar_deck()
self.limpar_entries()
bd.fechar_banco()
#print('Sucesso') # TESTE
except (Exception):
messagebox.showerror('Botão: Alterar', 'ERRO: o código não corresponde, possiveis erros:\n1- verifique se o valor é um número\n2- verifique se a caixa não está vazia\n3- verifique se o codigo existe.')
except (AttributeError):
self.fechar_toplevel()
f = self.ent1.get(1.0, tkinter.END)
v = self.ent2.get(1.0, tkinter.END)
self.control.frente = f
self.control.verso = v
bd = Banco_dados()
bd.conectar()
c = bd.puxar_codigo_card(self.frente_antigo)
bd.alterar_card(c, self.control.frente, self.control.verso)
self.mostrar_deck()
self.limpar_entries()
bd.fechar_banco()
#print('Sucesso') # TESTE
#except (Exception):
#messagebox.showerror('Botão: Alterar','ERRO: Frente ou verso estão vazios')
# método de aviso sobre remoção de cards
def adendo(self):
tkinter.messagebox.showinfo('Adendo', '*Para remover ou pesquisar um card, insira somente o codigo do mesmo*')
def limpar_entries(self):
self.ent1.delete(1.0, tkinter.END)
self.ent2.delete(1.0, tkinter.END)
#self.ent3.delete(0,tkinter.END)
def mostrar_deck(self):
self.janela.geometry('460x190+550+150')
self.toplevel = tkinter.Toplevel(self.janela)
self.toplevel.group(self.janela)
self.toplevel.protocol('WM_DELETE_WINDOW', lambda: messagebox.showinfo('Não','bobão, não pode fechar')) # Reescreve o método de fechar janela (x)
self.toplevel.minsize(400,300)
self.toplevel.maxsize(400,300)
#self.btM['state'] = tkinter.DISABLED
# Chamada de banco de dados
self.bd = Banco_dados(self.control.deck)
self.bd.conectar()
self.codigo_deck = self.bd.puxar_codigo_deck(self.control.deck)
self.listbox = tkinter.Listbox(self.toplevel, font='-size 10', width=40, height=30, bd=0)
self.listbox.bind('<Button-1>', self.card_selecionado)
self.listbox.pack()
try:
self.deck = self.bd.puxar_deck(self.codigo_deck)
deck_keys = list(self.deck.keys())
i = 0
i_2 = 1
for chave, valor in self.deck.items():
cod = self.bd.puxar_codigo_card(deck_keys[i])
a = str(deck_keys[i])+':'+ str(self.deck[chave])
self.listbox.insert(i, a)
i_2 += 1
i += 1
except:
self.listbox.insert(1,'ERRO: Não é possível exibir')
def fechar_toplevel(self):
self.toplevel.destroy()
self.janela.geometry('460x190+550+150')
#self.janela.geometry('400x200+100+100')
# self.btM['state'] = tkinter.NORMAL
self.deck = self.bd.puxar_deck(self.codigo_deck)
self.frente_antigo = ''
def card_selecionado(self, *args):
self.janela.geometry("460x190+350+150")
self.limpar_entries()
card_codigo = self.listbox.curselection()
card = self.listbox.get(card_codigo[0])
for i in range(len(card)):
if card[i] == ':':
i_2 = i + 1
self.frente_antigo = card[:i]
self.verso_antigo = card[i_2:]
self.ent1.insert(1.0, self.frente_antigo)
self.ent2.insert(1.0, self.verso_antigo)
self.janela.lift()
def atualizar(self):
pass
if __name__ == '__main__':
idc = IDC('teste')
| [
"sdml.16@outlook.com"
] | sdml.16@outlook.com |
a50ab7354bd04c8263af34a4f7c90352a755304e | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/scipy/sparse/lil.py | c70f816d9a9118c4cec72b2b2917dca5086450f2 | [
"MIT",
"BSD-3-Clause-Open-MPI",
"BSD-3-Clause",
"GPL-3.0-or-later",
"Apache-2.0",
"Qhull",
"BSD-2-Clause",
"GCC-exception-3.1",
"Python-2.0",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 17,782 | py | """LInked List sparse matrix class
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['lil_matrix','isspmatrix_lil']
from bisect import bisect_left
import numpy as np
from scipy._lib.six import xrange, zip
from .base import spmatrix, isspmatrix
from .sputils import (getdtype, isshape, isscalarlike, IndexMixin,
upcast_scalar, get_index_dtype, isintlike, check_shape,
check_reshape_kwargs)
from . import _csparsetools
class lil_matrix(spmatrix, IndexMixin):
"""Row-based linked list sparse matrix
This is a structure for constructing sparse matrices incrementally.
Note that inserting a single item can take linear time in the worst case;
to construct a matrix efficiently, make sure the items are pre-sorted by
index, per row.
This can be instantiated in several ways:
lil_matrix(D)
with a dense matrix or rank-2 ndarray D
lil_matrix(S)
with another sparse matrix S (equivalent to S.tolil())
lil_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
LIL format data array of the matrix
rows
LIL format row index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the LIL format
- supports flexible slicing
- changes to the matrix sparsity structure are efficient
Disadvantages of the LIL format
- arithmetic operations LIL + LIL are slow (consider CSR or CSC)
- slow column slicing (consider CSC)
- slow matrix vector products (consider CSR or CSC)
Intended Usage
- LIL is a convenient format for constructing sparse matrices
- once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- consider using the COO format when constructing large matrices
Data Structure
- An array (``self.rows``) of rows, each of which is a sorted
list of column indices of non-zero elements.
- The corresponding nonzero values are stored in similar
fashion in ``self.data``.
"""
format = 'lil'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
spmatrix.__init__(self)
self.dtype = getdtype(dtype, arg1, default=float)
# First get the shape
if isspmatrix(arg1):
if isspmatrix_lil(arg1) and copy:
A = arg1.copy()
else:
A = arg1.tolil()
if dtype is not None:
A = A.astype(dtype)
self._shape = check_shape(A.shape)
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
elif isinstance(arg1,tuple):
if isshape(arg1):
if shape is not None:
raise ValueError('invalid use of shape parameter')
M, N = arg1
self._shape = check_shape((M, N))
self.rows = np.empty((M,), dtype=object)
self.data = np.empty((M,), dtype=object)
for i in range(M):
self.rows[i] = []
self.data[i] = []
else:
raise TypeError('unrecognized lil_matrix constructor usage')
else:
# assume A is dense
try:
A = np.asmatrix(arg1)
except TypeError:
raise TypeError('unsupported matrix type')
else:
from .csr import csr_matrix
A = csr_matrix(A, dtype=dtype).tolil()
self._shape = check_shape(A.shape)
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
def __iadd__(self,other):
self[:,:] = self + other
return self
def __isub__(self,other):
self[:,:] = self - other
return self
def __imul__(self,other):
if isscalarlike(other):
self[:,:] = self * other
return self
else:
return NotImplemented
def __itruediv__(self,other):
if isscalarlike(other):
self[:,:] = self / other
return self
else:
return NotImplemented
# Whenever the dimensions change, empty lists should be created for each
# row
def getnnz(self, axis=None):
if axis is None:
return sum([len(rowvals) for rowvals in self.data])
if axis < 0:
axis += 2
if axis == 0:
out = np.zeros(self.shape[1], dtype=np.intp)
for row in self.rows:
out[row] += 1
return out
elif axis == 1:
return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
else:
raise ValueError('axis out of bounds')
def count_nonzero(self):
return sum(np.count_nonzero(rowvals) for rowvals in self.data)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def __str__(self):
val = ''
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos]))
return val[:-1]
def getrowview(self, i):
"""Returns a view of the 'i'th row (without copying).
"""
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i]
new.data[0] = self.data[i]
return new
def getrow(self, i):
"""Returns a copy of the 'i'th row.
"""
i = self._check_row_bounds(i)
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i][:]
new.data[0] = self.data[i][:]
return new
def _check_row_bounds(self, i):
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('row index out of bounds')
return i
def _check_col_bounds(self, j):
if j < 0:
j += self.shape[1]
if j < 0 or j >= self.shape[1]:
raise IndexError('column index out of bounds')
return j
def __getitem__(self, index):
"""Return the element(s) index=(i, j), where j may be a slice.
This always returns a copy for consistency, since slices into
Python lists return copies.
"""
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Other types are
# handled below.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
# Utilities found in IndexMixin
i, j = self._unpack_index(index)
# Proper check for other scalar index types
i_intlike = isintlike(i)
j_intlike = isintlike(j)
if i_intlike and j_intlike:
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
elif j_intlike or isinstance(j, slice):
# column slicing fast path
if j_intlike:
j = self._check_col_bounds(j)
j = slice(j, j+1)
if i_intlike:
i = self._check_row_bounds(i)
i = xrange(i, i+1)
i_shape = None
elif isinstance(i, slice):
i = xrange(*i.indices(self.shape[0]))
i_shape = None
else:
i = np.atleast_1d(i)
i_shape = i.shape
if i_shape is None or len(i_shape) == 1:
return self._get_row_ranges(i, j)
i, j = self._index_to_arrays(i, j)
if i.size == 0:
return lil_matrix(i.shape, dtype=self.dtype)
new = lil_matrix(i.shape, dtype=self.dtype)
i, j = _prepare_index_for_memoryview(i, j)
_csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
i, j)
return new
def _get_row_ranges(self, rows, col_slice):
"""
Fast path for indexing in the case where column index is slice.
This gains performance improvement over brute force by more
efficient skipping of zeros, by accessing the elements
column-wise in order.
Parameters
----------
rows : sequence or xrange
Rows indexed. If xrange, must be within valid bounds.
col_slice : slice
Columns indexed
"""
j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
col_range = xrange(j_start, j_stop, j_stride)
nj = len(col_range)
new = lil_matrix((len(rows), nj), dtype=self.dtype)
_csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
rows,
j_start, j_stop, j_stride, nj)
return new
def __setitem__(self, index, x):
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Scalar index
# assignment for other types is handled below together
# with fancy indexing.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
x = self.dtype.type(x)
if x.size > 1:
# Triggered if input was an ndarray
raise ValueError("Trying to assign a sequence to an item")
_csparsetools.lil_insert(self.shape[0], self.shape[1],
self.rows, self.data, i, j, x)
return
# General indexing
i, j = self._unpack_index(index)
# shortcut for common case of full matrix assign:
if (isspmatrix(x) and isinstance(i, slice) and i == slice(None) and
isinstance(j, slice) and j == slice(None)
and x.shape == self.shape):
x = lil_matrix(x, dtype=self.dtype)
self.rows = x.rows
self.data = x.data
return
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
x = x.toarray()
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
# Set values
i, j, x = _prepare_index_for_memoryview(i, j, x)
_csparsetools.lil_fancy_set(self.shape[0], self.shape[1],
self.rows, self.data,
i, j, x)
def _mul_scalar(self, other):
if other == 0:
# Multiply by zero: return the zero matrix
new = lil_matrix(self.shape, dtype=self.dtype)
else:
res_dtype = upcast_scalar(self.dtype, other)
new = self.copy()
new = new.astype(res_dtype)
# Multiply this scalar by every element.
for j, rowvals in enumerate(new.data):
new.data[j] = [val*other for val in rowvals]
return new
def __truediv__(self, other): # self / other
if isscalarlike(other):
new = self.copy()
# Divide every element by this scalar
for j, rowvals in enumerate(new.data):
new.data[j] = [val/other for val in rowvals]
return new
else:
return self.tocsr() / other
def copy(self):
from copy import deepcopy
new = lil_matrix(self.shape, dtype=self.dtype)
new.data = deepcopy(self.data)
new.rows = deepcopy(self.rows)
return new
copy.__doc__ = spmatrix.copy.__doc__
def reshape(self, *args, **kwargs):
shape = check_shape(args, self.shape)
order, copy = check_reshape_kwargs(kwargs)
# Return early if reshape is not required
if shape == self.shape:
if copy:
return self.copy()
else:
return self
new = lil_matrix(shape, dtype=self.dtype)
if order == 'C':
ncols = self.shape[1]
for i, row in enumerate(self.rows):
for col, j in enumerate(row):
new_r, new_c = np.unravel_index(i * ncols + j, shape)
new[new_r, new_c] = self[i, j]
elif order == 'F':
nrows = self.shape[0]
for i, row in enumerate(self.rows):
for col, j in enumerate(row):
new_r, new_c = np.unravel_index(i + j * nrows, shape, order)
new[new_r, new_c] = self[i, j]
else:
raise ValueError("'order' must be 'C' or 'F'")
return new
reshape.__doc__ = spmatrix.reshape.__doc__
def resize(self, *shape):
shape = check_shape(shape)
new_M, new_N = shape
M, N = self.shape
if new_M < M:
self.rows = self.rows[:new_M]
self.data = self.data[:new_M]
elif new_M > M:
self.rows = np.resize(self.rows, new_M)
self.data = np.resize(self.data, new_M)
for i in range(M, new_M):
self.rows[i] = []
self.data[i] = []
if new_N < N:
for row, data in zip(self.rows, self.data):
trunc = bisect_left(row, new_N)
del row[trunc:]
del data[trunc:]
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
def toarray(self, order=None, out=None):
d = self._process_toarray_args(order, out)
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
d[i, j] = self.data[i][pos]
return d
toarray.__doc__ = spmatrix.toarray.__doc__
def transpose(self, axes=None, copy=False):
return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False)
transpose.__doc__ = spmatrix.transpose.__doc__
def tolil(self, copy=False):
if copy:
return self.copy()
else:
return self
tolil.__doc__ = spmatrix.tolil.__doc__
def tocsr(self, copy=False):
lst = [len(x) for x in self.rows]
idx_dtype = get_index_dtype(maxval=max(self.shape[1], sum(lst)))
indptr = np.cumsum([0] + lst, dtype=idx_dtype)
indices = np.array([x for y in self.rows for x in y], dtype=idx_dtype)
data = np.array([x for y in self.data for x in y], dtype=self.dtype)
from .csr import csr_matrix
return csr_matrix((data, indices, indptr), shape=self.shape)
tocsr.__doc__ = spmatrix.tocsr.__doc__
def _prepare_index_for_memoryview(i, j, x=None):
"""
Convert index and data arrays to form suitable for passing to the
Cython fancy getset routines.
The conversions are necessary since to (i) ensure the integer
index arrays are in one of the accepted types, and (ii) to ensure
the arrays are writable so that Cython memoryview support doesn't
choke on them.
Parameters
----------
i, j
Index arrays
x : optional
Data arrays
Returns
-------
i, j, x
Re-formatted arrays (x is omitted, if input was None)
"""
if i.dtype > j.dtype:
j = j.astype(i.dtype)
elif i.dtype < j.dtype:
i = i.astype(j.dtype)
if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
i = i.astype(np.intp)
if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
j = j.astype(np.intp)
if x is not None:
if not x.flags.writeable:
x = x.copy()
return i, j, x
else:
return i, j
def isspmatrix_lil(x):
"""Is x of lil_matrix type?
Parameters
----------
x
object to check for being a lil matrix
Returns
-------
bool
True if x is a lil matrix, False otherwise
Examples
--------
>>> from scipy.sparse import lil_matrix, isspmatrix_lil
>>> isspmatrix_lil(lil_matrix([[5]]))
True
>>> from scipy.sparse import lil_matrix, csr_matrix, isspmatrix_lil
>>> isspmatrix_lil(csr_matrix([[5]]))
False
"""
return isinstance(x, lil_matrix)
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
621de074d9e048060460d7b5562145e37a568939 | ac3c3886cdf6899b4e16e3368d0aba39ce5812d3 | /units_list/rangers.py | 2420d9d79d4533726f2b55d53ef1393063e99435 | [] | no_license | karolSuszczynski/GamesName | 786e68f91610fe3d330b79ac3c2c04446b346a64 | 6b4d300b23850c1bb19db881481e7fa0b44469d4 | refs/heads/main | 2023-07-17T15:23:33.309174 | 2021-08-29T13:52:39 | 2021-08-29T13:52:39 | 331,026,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | from PIL import Image, ImageTk
from units.base_unit import BaseUnit
from units.unit_with_ammo import UnitWithAmmo
from units.possible_action_map import AirbornActionMapGenerator
from units.attacks import get_sword_attack
class Peasant(UnitWithAmmo):
def __init__(self):
super().__init__("img/peasant.png", speed=1, hp=77, rest_ability=4, attack=get_sword_attack(10), reach=1, healing=0, ammo=1, ammo_reach=4, ammo_attack=299)
def try_attack_position(self,x,y,special):
result = super().try_attack_position(x,y,special)
if self.ammo == 0:
self.current_attack=8
image_path="img/hick.png"
img = Image.open(image_path)
img = img.resize((50,50), Image.ANTIALIAS)
self.image = ImageTk.PhotoImage(img)
return result
class Bowman(UnitWithAmmo):
def __init__(self):
super().__init__("img/bowman.png", speed=1, hp=50, rest_ability=4, attack=get_sword_attack(5), reach=1, healing=0, ammo=12, ammo_reach=5, ammo_attack=get_sword_attack(10))
class Archer(UnitWithAmmo):
def __init__(self):
super().__init__("img/archer.png", speed=1, hp=50, rest_ability=6, attack=get_sword_attack(7), reach=1, healing=0, ammo=18, ammo_reach=6, ammo_attack=get_sword_attack(12))
class Xbowman(UnitWithAmmo):
def __init__(self):
super().__init__("img/xbowman.png", speed=1, hp=75, rest_ability=8, attack=get_sword_attack(8), reach=1, healing=0, ammo=20, ammo_reach=6, ammo_attack=get_sword_attack(20))
class Rifleman(UnitWithAmmo):
def __init__(self):
super().__init__("img/rifleman.png", speed=2, hp=75, rest_ability=9, attack=get_sword_attack(8), reach=1, healing=1, ammo=7, ammo_reach=6, ammo_attack=get_sword_attack(50))
class Catapult(UnitWithAmmo):
def __init__(self):
super().__init__("img/catapult.png", speed=0, hp=25, rest_ability=0.1, attack=get_sword_attack(74), reach=3, healing=0, ammo=5, ammo_reach=5, ammo_attack=get_sword_attack(49)) | [
"karol@suszczynski.eu"
] | karol@suszczynski.eu |
9e8d55b19f819bc5d3bd1235d4e62225b2271730 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pyedit/pyedit-032/pyedlib/pedync.py | 0ba8b937ecb2f51e498cc3516a5f9b0a422ebcc7 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,693 | py | #!/usr/bin/env python
# Prompt Handler for pyedit
import os, string, gtk, gobject
import pyedlib.pedconfig
# ------------------------------------------------------------------------
def yes_no_cancel(title, message, cancel = True):
dialog = gtk.Dialog(title,
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)
dialog.set_default_response(gtk.RESPONSE_YES)
dialog.set_position(gtk.WIN_POS_CENTER)
sp = " "
label = gtk.Label(message);
label2 = gtk.Label(sp); label3 = gtk.Label(sp)
hbox = gtk.HBox() ; hbox.pack_start(label2);
hbox.pack_start(label); hbox.pack_start(label3)
dialog.vbox.pack_start(hbox)
dialog.add_button("_Yes", gtk.RESPONSE_YES)
dialog.add_button("_No", gtk.RESPONSE_NO)
if cancel:
dialog.add_button("_Cancel", gtk.RESPONSE_CANCEL)
dialog.connect("key-press-event", area_key, cancel)
#dialog.connect("key-release-event", area_key, cancel)
dialog.show_all()
response = dialog.run()
# Convert all responses to cancel
if response == gtk.RESPONSE_CANCEL or \
response == gtk.RESPONSE_REJECT or \
response == gtk.RESPONSE_CLOSE or \
response == gtk.RESPONSE_DELETE_EVENT:
response = gtk.RESPONSE_CANCEL
dialog.destroy()
return response
def area_key(win, event, cancel):
#print event
if event.keyval == gtk.keysyms.y or \
event.keyval == gtk.keysyms.Y:
win.response(gtk.RESPONSE_YES)
if event.keyval == gtk.keysyms.n or \
event.keyval == gtk.keysyms.N:
win.response(gtk.RESPONSE_NO)
if cancel:
if event.keyval == gtk.keysyms.c or \
event.keyval == gtk.keysyms.C:
win.response(gtk.RESPONSE_CANCEL)
# ------------------------------------------------------------------------
# Show About dialog:
import platform
def about():
dialog = gtk.AboutDialog()
dialog.set_name(" PyEdit - Python Editor ")
dialog.set_version(str(pyedlib.pedconfig.conf.version));
comm = "\nPython based easily configurable editor.\n"\
"\nRunning PyGtk %d.%d.%d" % gtk.pygtk_version +\
"\nRunning GTK %d.%d.%d\n" % gtk.gtk_version +\
"\nRunning Python %s\n" % platform.python_version()
dialog.set_comments(comm);
dialog.set_copyright("Portions \302\251 Copyright Peter Glen\n"
"Project placed in the Public Domain.")
img_dir = os.path.join(os.path.dirname(__file__), 'images')
img_path = os.path.join(img_dir, 'gtk-logo-rgb.gif')
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(img_path)
#print "loaded pixbuf"
dialog.set_logo(pixbuf)
except gobject.GError, error:
print "Cannot load logo for about dialog";
#dialog.set_website("")
## Close dialog on user response
dialog.connect ("response", lambda d, r: d.destroy())
dialog.connect("key-press-event", about_key)
dialog.show()
def about_key(win, event):
#print "about_key", event
if event.type == gtk.gdk.KEY_PRESS:
if event.keyval == gtk.keysyms.x or event.keyval == gtk.keysyms.X:
if event.state & gtk.gdk.MOD1_MASK:
win.destroy()
# Show a regular message:
def message(strx, title = None, icon = gtk.MESSAGE_INFO):
dialog = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT,
icon, gtk.BUTTONS_CLOSE, strx)
if title:
dialog.set_title(title)
else:
dialog.set_title("pyedit")
# Close dialog on user response
dialog.connect("response", lambda d, r: d.destroy())
dialog.show()
| [
"peterglen99@gmail.com"
] | peterglen99@gmail.com |
a58a9d7303bef7ea14954d5a6376cf8f18b14d02 | fe91ffa11707887e4cdddde8f386a8c8e724aa58 | /chrome/test/enterprise/e2e/policy/safe_browsing/safe_browsing_ui_test.py | 296faf0623b41a371544722ac0962d719d89d5de | [
"BSD-3-Clause"
] | permissive | akshaymarch7/chromium | 78baac2b45526031846ccbaeca96c639d1d60ace | d273c844a313b1e527dec0d59ce70c95fd2bd458 | refs/heads/master | 2023-02-26T23:48:03.686055 | 2020-04-15T01:20:07 | 2020-04-15T01:20:07 | 255,778,651 | 2 | 1 | BSD-3-Clause | 2020-04-15T02:04:56 | 2020-04-15T02:04:55 | null | UTF-8 | Python | false | false | 2,371 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import test_util
import time
from absl import app
from selenium import webdriver
from pywinauto.application import Application
UnsafePageLink = "http://testsafebrowsing.appspot.com/s/malware.html"
UnsafePageLinkTabText = "Security error"
UnsafeDownloadLink = "http://testsafebrowsing.appspot.com/s/badrep.exe"
UnsafeDownloadTextRe = ".* is dangerous,\s*so\s*Chrom.* has blocked it"
def visit(window, url):
"""Visit a specific URL through pywinauto.Application.
SafeBrowsing intercepts HTTP requests & hangs WebDriver.get(), which prevents
us from getting the page source. Using pywinauto to visit the pages instead.
"""
window.Edit.set_edit_text(url).type_keys("%{ENTER}")
time.sleep(10)
def main(argv):
exclude_switches = ["disable-background-networking"]
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", exclude_switches)
driver = test_util.create_chrome_webdriver(chrome_options=chrome_options)
app = Application(backend="uia")
app.connect(title_re='.*Chrome|.*Chromium')
window = app.top_window()
# Wait for Chrome to download SafeBrowsing lists in the background.
# There's no trigger to force this operation or synchronize on it, but quick
# experiments have shown 3-4 minutes in most cases, so 5 should be plenty.
time.sleep(60 * 5)
print "Visiting unsafe page: %s" % UnsafePageLink
visit(window, UnsafePageLink)
unsafe_page = False
for desc in app.top_window().descendants():
if desc.window_text():
print "unsafe_page.item: %s" % desc.window_text()
if UnsafePageLinkTabText in desc.window_text():
unsafe_page = True
break
print "Downloading unsafe file: %s" % UnsafeDownloadLink
visit(window, UnsafeDownloadLink)
unsafe_download = False
for desc in app.top_window().descendants():
if desc.window_text():
print "unsafe_download.item: %s" % desc.window_text()
if re.search(UnsafeDownloadTextRe, desc.window_text()):
unsafe_download = True
break
print "RESULTS.unsafe_page: %s" % unsafe_page
print "RESULTS.unsafe_download: %s" % unsafe_download
driver.quit()
if __name__ == '__main__':
app.run(main)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
61a13c91993c787854fd3d4282aba7d225cc06e8 | 4b32cbc767e8cc0d61e7771b8a6d5c7c9b324a97 | /Python/1676 (팩토리얼 0의 개수,수학).py | 1f1b2191894cf9d0e4754ced7350cb220d5c75dc | [] | no_license | Jongminfire/Baekjoon | 45e554c983fa583ca7a1709e1ac435e1a38e075b | 8dc0ec58ddc43de2dd44b3b1af9346f708c1208e | refs/heads/master | 2023-07-29T10:35:52.648825 | 2021-09-14T15:12:22 | 2021-09-14T15:12:22 | 247,514,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | n = int(input())
print(n//5+n//25+n//125)
# 팩토리얼 0의 개수는 5의 x제곱일 때 x만큼 늘어난다
| [
"51112542+Jongminfire@users.noreply.github.com"
] | 51112542+Jongminfire@users.noreply.github.com |
a8b6f5e111d6183e5069a6819cb3177032881f29 | 672c454454cc62a49d0caf74558c265c7db4228f | /Model.py | 428fe5396ea2642e37d6dd7c1f008ff649c0a003 | [] | no_license | nmaypeter/project_nw_200505 | cd0ce01ce07c41683ca96b0dac3960bfac6b8ffd | f0871506f8ed4dbdd8e1b1a0dcbfa20196a652b7 | refs/heads/master | 2022-07-29T23:16:10.271862 | 2020-05-17T16:37:07 | 2020-05-17T16:37:07 | 261,580,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,600 | py | from SeedSelection import *
from Evaluation import *
import time
import copy
import math
class Model:
def __init__(self, mn_list, data_key, prod_key, cas_key, wallet_key=0):
self.model_name = get_model_name(mn_list)
self.r_flag = mn_list[1]
self.mn_list = mn_list
self.data_name = dataset_name_dict[data_key]
self.prod_name = product_name_dict[prod_key]
self.cas_name = cascade_model_dict[cas_key]
self.data_key = data_key
self.prod_key = prod_key
self.cas_key = cas_key
self.wallet_type = wallet_distribution_type_dict[wallet_key]
self.wallet_key = wallet_key
self.wd_seq = [wd for wd in wallet_distribution_type_dict.keys() if wd != 0]
self.budget_iteration = [i for i in range(10, 6, -1)]
self.monte_carlo = 100
def model_dag(self):
ini = Initialization(self.data_key, self.prod_key, self.cas_key, self.wallet_key)
seed_cost_dict = ini.constructSeedCostDict()
graph_dict = ini.constructGraphDict()
product_list, epw_list = ini.constructProductList()
num_product = len(product_list)
total_cost = sum(seed_cost_dict[i] for i in seed_cost_dict)
seed_set_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_time_sequence = [-1 for _ in range(len(self.budget_iteration))]
seed_data_sequence = [-1 for _ in range(len(self.budget_iteration))]
dag_class = int(list(model_dict['method'][self.mn_list[0]])[-1])
ssmioa_model = SeedSelectionMIOA(graph_dict, seed_cost_dict, product_list, epw_list, dag_class, self.r_flag)
ss_start_time = time.time()
bud_iteration = self.budget_iteration.copy()
now_b_iter = bud_iteration.pop(0)
now_budget, now_profit = 0.0, 0.0
seed_set = [set() for _ in range(num_product)]
wd_seq = [self.wallet_key] if wallet_distribution_type_dict[self.wallet_key] else self.wd_seq
mioa_dict = ssmioa_model.generateMIOA()
celf_heap = ssmioa_model.generateCelfHeap(mioa_dict)
ss_acc_time = round(time.time() - ss_start_time, 4)
temp_sequence = [[ss_acc_time, now_budget, now_profit, seed_set, celf_heap]]
temp_seed_data = [['time\tk_prod\ti_node\tnow_budget\tnow_profit\tseed_num\n']]
while temp_sequence:
ss_start_time = time.time()
now_bi_index = self.budget_iteration.index(now_b_iter)
total_budget = safe_div(total_cost, 2 ** now_b_iter)
[ss_acc_time, now_budget, now_profit, seed_set, celf_heap] = temp_sequence.pop()
seed_data = temp_seed_data.pop()
print('@ selection\t' + self.model_name + ' @ ' + self.data_name + '_' + self.cas_name +
'\t' + self.wallet_type + '_' + self.prod_name + '_bi' + str(now_b_iter) + ', budget = ' + str(total_budget))
celf_heap_c = []
while now_budget < total_budget and celf_heap:
mep_item = heap.heappop_max(celf_heap)
mep_mg, mep_k_prod, mep_i_node, mep_flag = mep_item
sc = seed_cost_dict[mep_i_node]
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
celf_heap_c = copy.deepcopy(celf_heap)
seed_set_length = sum(len(seed_set[k]) for k in range(num_product))
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
now_b_iter = bud_iteration.pop(0)
temp_sequence.append([ss_time, now_budget, now_profit, copy.deepcopy(seed_set), celf_heap_c])
temp_seed_data.append(seed_data.copy())
if round(now_budget + sc, 4) > total_budget:
continue
if mep_flag == seed_set_length:
seed_set[mep_k_prod].add(mep_i_node)
now_budget = round(now_budget + sc, 4)
now_profit = round(now_profit + (mep_mg * (sc if self.r_flag else 1.0)), 4)
seed_data.append(str(round(time.time() - ss_start_time + ss_acc_time, 4)) + '\t' + str(mep_k_prod) + '\t' + str(mep_i_node) + '\t' +
str(now_budget) + '\t' + str(now_profit) + '\t' + str([len(seed_set[k]) for k in range(num_product)]) + '\n')
else:
seed_set_t = copy.deepcopy(seed_set)
seed_set_t[mep_k_prod].add(mep_i_node)
dag_dict = [{} for _ in range(num_product)]
if dag_class == 1:
dag_dict = ssmioa_model.generateDAG1(mioa_dict, seed_set_t)
elif dag_class == 2:
dag_dict = ssmioa_model.generateDAG2(mioa_dict, seed_set_t)
ep_t = ssmioa_model.calculateExpectedProfit(dag_dict, seed_set_t)
mg_t = safe_div(round(ep_t - now_profit, 4), sc if self.r_flag else 1.0)
flag_t = seed_set_length
if mg_t > 0:
celf_item_t = (mg_t, mep_k_prod, mep_i_node, flag_t)
heap.heappush_max(celf_heap, celf_item_t)
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
print('ss_time = ' + str(ss_time) + 'sec, cost = ' + str(now_budget) + ', seed_set_length = ' + str([len(s_set_k) for s_set_k in seed_set]))
seed_set_sequence[now_bi_index] = seed_set
ss_time_sequence[now_bi_index] = ss_time
seed_data_sequence[now_bi_index] = seed_data
for wd in wd_seq:
seed_data_path = 'seed_data/' + self.data_name + '_' + self.cas_name
if not os.path.isdir(seed_data_path):
os.mkdir(seed_data_path)
seed_data_path0 = seed_data_path + '/' + wallet_distribution_type_dict[wd] + '_' + self.prod_name + '_bi' + str(self.budget_iteration[now_bi_index])
if not os.path.isdir(seed_data_path0):
os.mkdir(seed_data_path0)
seed_data_file = open(seed_data_path0 + '/' + self.model_name + '.txt', 'w')
for sd in seed_data:
seed_data_file.write(sd)
seed_data_file.close()
while -1 in seed_data_sequence:
no_data_index = seed_data_sequence.index(-1)
seed_set_sequence[no_data_index] = seed_set_sequence[no_data_index - 1]
ss_time_sequence[no_data_index] = ss_time_sequence[no_data_index - 1]
seed_data_sequence[no_data_index] = seed_data_sequence[no_data_index - 1]
eva_model = EvaluationM(self.mn_list, self.data_key, self.prod_key, self.cas_key)
for bi in self.budget_iteration:
now_bi_index = self.budget_iteration.index(bi)
if wallet_distribution_type_dict[self.wallet_key]:
eva_model.evaluate(bi, self.wallet_key, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
else:
for wd in self.wd_seq:
eva_model.evaluate(bi, wd, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
def model_spbp(self):
ini = Initialization(self.data_key, self.prod_key, self.cas_key, self.wallet_key)
seed_cost_dict = ini.constructSeedCostDict()
graph_dict = ini.constructGraphDict()
product_list, epw_list = ini.constructProductList()
num_product = len(product_list)
total_cost = sum(seed_cost_dict[i] for i in seed_cost_dict)
seed_set_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_time_sequence = [-1 for _ in range(len(self.budget_iteration))]
seed_data_sequence = [-1 for _ in range(len(self.budget_iteration))]
dag_class = int(list(model_dict['method'][self.mn_list[0]])[-1])
ssmioa_model = SeedSelectionMIOA(graph_dict, seed_cost_dict, product_list, epw_list, dag_class, self.r_flag)
ssspbp_model = SeedSelectionSPBP(graph_dict, seed_cost_dict, product_list, epw_list, dag_class, self.r_flag)
for now_b_iter in self.budget_iteration:
ss_start_time = time.time()
now_budget, now_profit = 0.0, 0.0
now_bi_index = self.budget_iteration.index(now_b_iter)
total_budget = safe_div(total_cost, 2 ** now_b_iter)
seed_set = [set() for _ in range(num_product)]
wd_seq = [self.wallet_key] if wallet_distribution_type_dict[self.wallet_key] else self.wd_seq
mioa_dict, ps_dict = ssspbp_model.generateMIOA()
celf_dict, max_s = ssspbp_model.generateCelfDict(mioa_dict, total_budget)
seed_data = ['time\tk_prod\ti_node\tnow_budget\tnow_profit\tseed_num\n']
print('@ selection\t' + get_model_name(self.mn_list) + ' @ ' + dataset_name_dict[self.data_key] + '_' + cascade_model_dict[self.cas_key] +
'\t' + wallet_distribution_type_dict[self.wallet_key] + '_' + product_name_dict[self.prod_key] + '_bi' + str(now_b_iter) + ', budget = ' + str(total_budget))
while now_budget < total_budget and celf_dict:
mep_k_prod, mep_i_node = max(celf_dict, key=celf_dict.get)
mep_mg = max(celf_dict.values())
del celf_dict[(mep_k_prod, mep_i_node)]
sc = seed_cost_dict[mep_i_node]
if round(now_budget + sc, 4) > total_budget:
continue
seed_set[mep_k_prod].add(mep_i_node)
now_budget = round(now_budget + sc, 4)
now_profit = round(now_profit + (mep_mg * (sc if self.r_flag else 1.0)), 4)
seed_data.append(str(round(time.time() - ss_start_time, 4)) + '\t' + str(mep_k_prod) + '\t' + str(mep_i_node) + '\t' +
str(now_budget) + '\t' + str(now_profit) + '\t' + str([len(seed_set[k]) for k in range(num_product)]) + '\n')
delta_max = 0.0
for (k, i) in ps_dict[mep_k_prod][mep_i_node]:
if i in seed_set[k]:
continue
if (k, i) not in celf_dict:
continue
if celf_dict[(k, i)] > delta_max:
seed_set_t = copy.deepcopy(seed_set)
seed_set_t[mep_k_prod].add(mep_i_node)
dag_dict = [{} for _ in range(num_product)]
if dag_class == 1:
dag_dict = ssmioa_model.generateDAG1(mioa_dict, seed_set_t)
elif dag_class == 2:
dag_dict = ssmioa_model.generateDAG2(mioa_dict, seed_set_t)
ep_t = ssmioa_model.calculateExpectedProfit(dag_dict, seed_set_t)
mg_t = round(ep_t - now_profit, 4)
mg_t = safe_div(mg_t, sc) if self.r_flag else mg_t
celf_dict[(k, i)] = mg_t
delta_max = mg_t if mg_t > delta_max else delta_max
if max_s[0] > now_profit and max_s[-1] != '-1':
seed_set = [set() for _ in range(num_product)]
seed_set[max_s[1]].add(max_s[2])
ss_time = round(time.time() - ss_start_time, 4)
print('ss_time = ' + str(ss_time) + 'sec, cost = ' + str(now_budget) + ', seed_set_length = ' + str([len(s_set_k) for s_set_k in seed_set]))
seed_set_sequence[now_bi_index] = seed_set
ss_time_sequence[now_bi_index] = ss_time
seed_data_sequence[now_bi_index] = seed_data
for wd in wd_seq:
seed_data_path = 'seed_data/' + self.data_name + '_' + self.cas_name
if not os.path.isdir(seed_data_path):
os.mkdir(seed_data_path)
seed_data_path0 = seed_data_path + '/' + wallet_distribution_type_dict[wd] + '_' + self.prod_name + '_bi' + str(self.budget_iteration[now_bi_index])
if not os.path.isdir(seed_data_path0):
os.mkdir(seed_data_path0)
seed_data_file = open(seed_data_path0 + '/' + self.model_name + '.txt', 'w')
for sd in seed_data:
seed_data_file.write(sd)
seed_data_file.close()
while -1 in seed_data_sequence:
no_data_index = seed_data_sequence.index(-1)
seed_set_sequence[no_data_index] = seed_set_sequence[no_data_index - 1]
ss_time_sequence[no_data_index] = ss_time_sequence[no_data_index - 1]
seed_data_sequence[no_data_index] = seed_data_sequence[no_data_index - 1]
eva_model = EvaluationM(self.mn_list, self.data_key, self.prod_key, self.cas_key)
for bi in self.budget_iteration:
now_bi_index = self.budget_iteration.index(bi)
if wallet_distribution_type_dict[self.wallet_key]:
eva_model.evaluate(bi, self.wallet_key, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
else:
for wd in self.wd_seq:
eva_model.evaluate(bi, wd, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
def model_ng(self):
ini = Initialization(self.data_key, self.prod_key, self.cas_key, self.wallet_key)
seed_cost_dict = ini.constructSeedCostDict()
graph_dict = ini.constructGraphDict()
product_list, epw_list = ini.constructProductList()
num_product = len(product_list)
total_cost = sum(seed_cost_dict[i] for i in seed_cost_dict)
seed_set_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_time_sequence = [-1 for _ in range(len(self.budget_iteration))]
seed_data_sequence = [-1 for _ in range(len(self.budget_iteration))]
ssng_model = SeedSelectionNG(graph_dict, seed_cost_dict, product_list, epw_list, self.r_flag)
ss_start_time = time.time()
bud_iteration = self.budget_iteration.copy()
now_b_iter = bud_iteration.pop(0)
now_budget, now_profit = 0.0, 0.0
seed_set = [set() for _ in range(num_product)]
wd_seq = [self.wallet_key] if wallet_distribution_type_dict[self.wallet_key] else self.wd_seq
celf_heap = ssng_model.generateCelfHeap()
ss_acc_time = round(time.time() - ss_start_time, 4)
temp_sequence = [[ss_acc_time, now_budget, now_profit, seed_set, celf_heap]]
temp_seed_data = [['time\tk_prod\ti_node\tnow_budget\tnow_profit\tseed_num\n']]
while temp_sequence:
ss_start_time = time.time()
now_bi_index = self.budget_iteration.index(now_b_iter)
total_budget = safe_div(total_cost, 2 ** now_b_iter)
[ss_acc_time, now_budget, now_profit, seed_set, celf_heap] = temp_sequence.pop()
seed_data = temp_seed_data.pop()
print('@ selection\t' + get_model_name(self.mn_list) + ' @ ' + dataset_name_dict[self.data_key] + '_' + cascade_model_dict[self.cas_key] +
'\t' + wallet_distribution_type_dict[self.wallet_key] + '_' + product_name_dict[self.prod_key] + '_bi' + str(now_b_iter) + ', budget = ' + str(total_budget))
celf_heap_c = []
while now_budget < total_budget and celf_heap:
mep_item = heap.heappop_max(celf_heap)
mep_mg, mep_k_prod, mep_i_node, mep_flag = mep_item
sc = seed_cost_dict[mep_i_node]
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
celf_heap_c = copy.deepcopy(celf_heap)
seed_set_length = sum(len(seed_set[k]) for k in range(num_product))
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
now_b_iter = bud_iteration.pop(0)
temp_sequence.append([ss_time, now_budget, now_profit, copy.deepcopy(seed_set), celf_heap_c])
temp_seed_data.append(seed_data.copy())
if round(now_budget + sc, 4) > total_budget:
continue
if mep_flag == seed_set_length:
seed_set[mep_k_prod].add(mep_i_node)
now_budget = round(now_budget + sc, 4)
now_profit = ssng_model.getSeedSetProfit(seed_set)
seed_data.append(str(round(time.time() - ss_start_time + ss_acc_time, 4)) + '\t' + str(mep_k_prod) + '\t' + str(mep_i_node) + '\t' +
str(now_budget) + '\t' + str(now_profit) + '\t' + str([len(seed_set[k]) for k in range(num_product)]) + '\n')
else:
seed_set_t = copy.deepcopy(seed_set)
seed_set_t[mep_k_prod].add(mep_i_node)
ep_t = ssng_model.getSeedSetProfit(seed_set_t)
mg_t = round(ep_t - now_profit, 4)
if self.r_flag:
mg_t = safe_div(mg_t, sc)
flag_t = seed_set_length
if mg_t > 0:
celf_item_t = (mg_t, mep_k_prod, mep_i_node, flag_t)
heap.heappush_max(celf_heap, celf_item_t)
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
print('ss_time = ' + str(ss_time) + 'sec, cost = ' + str(now_budget) + ', seed_set_length = ' + str([len(s_set_k) for s_set_k in seed_set]))
seed_set_sequence[now_bi_index] = seed_set
ss_time_sequence[now_bi_index] = ss_time
seed_data_sequence[now_bi_index] = seed_data
for wd in wd_seq:
seed_data_path = 'seed_data/' + self.data_name + '_' + self.cas_name
if not os.path.isdir(seed_data_path):
os.mkdir(seed_data_path)
seed_data_path0 = seed_data_path + '/' + wallet_distribution_type_dict[wd] + '_' + self.prod_name + '_bi' + str(self.budget_iteration[now_bi_index])
if not os.path.isdir(seed_data_path0):
os.mkdir(seed_data_path0)
seed_data_file = open(seed_data_path0 + '/' + self.model_name + '.txt', 'w')
for sd in seed_data:
seed_data_file.write(sd)
seed_data_file.close()
while -1 in seed_data_sequence:
no_data_index = seed_data_sequence.index(-1)
seed_set_sequence[no_data_index] = seed_set_sequence[no_data_index - 1]
ss_time_sequence[no_data_index] = ss_time_sequence[no_data_index - 1]
seed_data_sequence[no_data_index] = seed_data_sequence[no_data_index - 1]
eva_model = EvaluationM(self.mn_list, self.data_key, self.prod_key, self.cas_key)
for bi in self.budget_iteration:
now_bi_index = self.budget_iteration.index(bi)
if wallet_distribution_type_dict[self.wallet_key]:
eva_model.evaluate(bi, self.wallet_key, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
else:
for wd in self.wd_seq:
eva_model.evaluate(bi, wd, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
def model_hd(self):
ini = Initialization(self.data_key, self.prod_key, self.cas_key, self.wallet_key)
seed_cost_dict = ini.constructSeedCostDict()
graph_dict = ini.constructGraphDict()
product_list, epw_list = ini.constructProductList()
num_product = len(product_list)
total_cost = sum(seed_cost_dict[i] for i in seed_cost_dict)
seed_set_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_time_sequence = [-1 for _ in range(len(self.budget_iteration))]
seed_data_sequence = [-1 for _ in range(len(self.budget_iteration))]
sshd_model = SeedSelectionHD(graph_dict, product_list)
ss_start_time = time.time()
bud_iteration = self.budget_iteration.copy()
now_b_iter = bud_iteration.pop(0)
now_budget = 0.0
seed_set = [set() for _ in range(num_product)]
wd_seq = [self.wallet_key] if wallet_distribution_type_dict[self.wallet_key] else self.wd_seq
degree_heap = sshd_model.generateDegreeHeap()
ss_acc_time = round(time.time() - ss_start_time, 4)
temp_sequence = [[ss_acc_time, now_budget, seed_set, degree_heap]]
temp_seed_data = [['time\tk_prod\ti_node\tnow_budget\tnow_profit\tseed_num\n']]
while temp_sequence:
ss_start_time = time.time()
now_bi_index = self.budget_iteration.index(now_b_iter)
total_budget = safe_div(total_cost, 2 ** now_b_iter)
[ss_acc_time, now_budget, seed_set, degree_heap] = temp_sequence.pop()
seed_data = temp_seed_data.pop()
print('@ selection\t' + get_model_name(self.mn_list) + ' @ ' + dataset_name_dict[self.data_key] + '_' + cascade_model_dict[self.cas_key] +
'\t' + wallet_distribution_type_dict[self.wallet_key] + '_' + product_name_dict[self.prod_key] + '_bi' + str(now_b_iter) + ', budget = ' + str(total_budget))
degree_heap_c = []
while now_budget < total_budget and degree_heap:
mep_item = heap.heappop_max(degree_heap)
mep_deg, mep_k_prod, mep_i_node = mep_item
sc = seed_cost_dict[mep_i_node]
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
degree_heap_c = copy.deepcopy(degree_heap)
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
now_b_iter = bud_iteration.pop(0)
temp_sequence.append([ss_time, now_budget, copy.deepcopy(seed_set), degree_heap_c])
temp_seed_data.append(seed_data.copy())
if round(now_budget + sc, 4) > total_budget:
continue
seed_set[mep_k_prod].add(mep_i_node)
now_budget = round(now_budget + sc, 4)
seed_data.append(str(round(time.time() - ss_start_time + ss_acc_time, 4)) + '\t' + str(mep_k_prod) + '\t' + str(mep_i_node) + '\t' +
str(now_budget) + '\t' + str([len(seed_set[k]) for k in range(num_product)]) + '\n')
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
print('ss_time = ' + str(ss_time) + 'sec, cost = ' + str(now_budget) + ', seed_set_length = ' + str([len(s_set_k) for s_set_k in seed_set]))
seed_set_sequence[now_bi_index] = seed_set
ss_time_sequence[now_bi_index] = ss_time
seed_data_sequence[now_bi_index] = seed_data
for wd in wd_seq:
seed_data_path = 'seed_data/' + self.data_name + '_' + self.cas_name
if not os.path.isdir(seed_data_path):
os.mkdir(seed_data_path)
seed_data_path0 = seed_data_path + '/' + wallet_distribution_type_dict[wd] + '_' + self.prod_name + '_bi' + str(self.budget_iteration[now_bi_index])
if not os.path.isdir(seed_data_path0):
os.mkdir(seed_data_path0)
seed_data_file = open(seed_data_path0 + '/' + self.model_name + '.txt', 'w')
for sd in seed_data:
seed_data_file.write(sd)
seed_data_file.close()
while -1 in seed_data_sequence:
no_data_index = seed_data_sequence.index(-1)
seed_set_sequence[no_data_index] = seed_set_sequence[no_data_index - 1]
ss_time_sequence[no_data_index] = ss_time_sequence[no_data_index - 1]
seed_data_sequence[no_data_index] = seed_data_sequence[no_data_index - 1]
eva_model = EvaluationM(self.mn_list, self.data_key, self.prod_key, self.cas_key)
for bi in self.budget_iteration:
now_bi_index = self.budget_iteration.index(bi)
if wallet_distribution_type_dict[self.wallet_key]:
eva_model.evaluate(bi, self.wallet_key, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
else:
for wd in self.wd_seq:
eva_model.evaluate(bi, wd, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
def model_r(self):
ini = Initialization(self.data_key, self.prod_key, self.cas_key, self.wallet_key)
seed_cost_dict = ini.constructSeedCostDict()
graph_dict = ini.constructGraphDict()
product_list, epw_list = ini.constructProductList()
num_product = len(product_list)
total_cost = sum(seed_cost_dict[i] for i in seed_cost_dict)
seed_set_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_time_sequence = [-1 for _ in range(len(self.budget_iteration))]
seed_data_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_start_time = time.time()
bud_iteration = self.budget_iteration.copy()
now_b_iter = bud_iteration.pop(0)
now_budget = 0.0
seed_set = [set() for _ in range(num_product)]
wd_seq = [self.wallet_key] if wallet_distribution_type_dict[self.wallet_key] else self.wd_seq
random_node_list = [(k, i) for i in graph_dict for k in range(num_product)]
random.shuffle(random_node_list)
ss_acc_time = round(time.time() - ss_start_time, 4)
temp_sequence = [[ss_acc_time, now_budget, seed_set, random_node_list]]
temp_seed_data = [['time\tk_prod\ti_node\tnow_budget\tnow_profit\tseed_num\n']]
while temp_sequence:
ss_start_time = time.time()
now_bi_index = self.budget_iteration.index(now_b_iter)
total_budget = safe_div(total_cost, 2 ** now_b_iter)
[ss_acc_time, now_budget, seed_set, random_node_list] = temp_sequence.pop()
seed_data = temp_seed_data.pop()
print('@ selection\t' + get_model_name(self.mn_list) + ' @ ' + dataset_name_dict[self.data_key] + '_' + cascade_model_dict[self.cas_key] +
'\t' + wallet_distribution_type_dict[self.wallet_key] + '_' + product_name_dict[self.prod_key] + '_bi' + str(now_b_iter) + ', budget = ' + str(total_budget))
random_node_list_c = []
while now_budget < total_budget and random_node_list:
mep_item = random_node_list.pop(0)
mep_k_prod, mep_i_node = mep_item
sc = seed_cost_dict[mep_i_node]
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
random_node_list_c = copy.deepcopy(random_node_list)
if round(now_budget + sc, 4) >= total_budget and bud_iteration and not temp_sequence:
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
now_b_iter = bud_iteration.pop(0)
temp_sequence.append([ss_time, now_budget, copy.deepcopy(seed_set), random_node_list_c])
temp_seed_data.append(seed_data.copy())
if round(now_budget + sc, 4) > total_budget:
continue
seed_set[mep_k_prod].add(mep_i_node)
now_budget = round(now_budget + sc, 4)
seed_data.append(str(round(time.time() - ss_start_time + ss_acc_time, 4)) + '\t' + str(mep_k_prod) + '\t' + str(mep_i_node) + '\t' +
str(now_budget) + '\t' + str([len(seed_set[k]) for k in range(num_product)]) + '\n')
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
print('ss_time = ' + str(ss_time) + 'sec, cost = ' + str(now_budget) + ', seed_set_length = ' + str([len(s_set_k) for s_set_k in seed_set]))
seed_set_sequence[now_bi_index] = seed_set
ss_time_sequence[now_bi_index] = ss_time
seed_data_sequence[now_bi_index] = seed_data
for wd in wd_seq:
seed_data_path = 'seed_data/' + self.data_name + '_' + self.cas_name
if not os.path.isdir(seed_data_path):
os.mkdir(seed_data_path)
seed_data_path0 = seed_data_path + '/' + wallet_distribution_type_dict[wd] + '_' + self.prod_name + '_bi' + str(self.budget_iteration[now_bi_index])
if not os.path.isdir(seed_data_path0):
os.mkdir(seed_data_path0)
seed_data_file = open(seed_data_path0 + '/' + self.model_name + '.txt', 'w')
for sd in seed_data:
seed_data_file.write(sd)
seed_data_file.close()
while -1 in seed_data_sequence:
no_data_index = seed_data_sequence.index(-1)
seed_set_sequence[no_data_index] = seed_set_sequence[no_data_index - 1]
ss_time_sequence[no_data_index] = ss_time_sequence[no_data_index - 1]
seed_data_sequence[no_data_index] = seed_data_sequence[no_data_index - 1]
eva_model = EvaluationM(self.mn_list, self.data_key, self.prod_key, self.cas_key)
for bi in self.budget_iteration:
now_bi_index = self.budget_iteration.index(bi)
if wallet_distribution_type_dict[self.wallet_key]:
eva_model.evaluate(bi, self.wallet_key, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
else:
for wd in self.wd_seq:
eva_model.evaluate(bi, wd, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
def model_pmis(self):
ini = Initialization(self.data_key, self.prod_key, self.cas_key, self.wallet_key)
seed_cost_dict = ini.constructSeedCostDict()
graph_dict = ini.constructGraphDict()
product_list, epw_list = ini.constructProductList()
num_product = len(product_list)
total_cost = sum(seed_cost_dict[i] for i in seed_cost_dict)
seed_set_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_time_sequence = [-1 for _ in range(len(self.budget_iteration))]
seed_data_sequence = [-1 for _ in range(len(self.budget_iteration))]
ssng_model = SeedSelectionNG(graph_dict, seed_cost_dict, product_list, epw_list, True)
sspmis_model = SeedSelectionPMIS(graph_dict, seed_cost_dict, product_list, epw_list)
ss_start_time = time.time()
celf_heap_o = sspmis_model.generateCelfHeap()
ss_acc_time = round(time.time() - ss_start_time, 4)
for now_b_iter in self.budget_iteration:
ss_start_time = time.time()
now_bi_index = self.budget_iteration.index(now_b_iter)
total_budget = safe_div(total_cost, 2 ** now_b_iter)
celf_heap = copy.deepcopy(celf_heap_o)
print('@ selection\t' + get_model_name(self.mn_list) + ' @ ' + dataset_name_dict[self.data_key] + '_' + cascade_model_dict[self.cas_key] +
'\t' + wallet_distribution_type_dict[self.wallet_key] + '_' + product_name_dict[self.prod_key] + '_bi' + str(now_b_iter) + ', budget = ' + str(total_budget))
# -- initialization for each sample --
now_budget, now_profit = 0.0, 0.0
seed_set = [set() for _ in range(num_product)]
s_matrix, c_matrix = [[set() for _ in range(num_product)]], [0.0]
while now_budget < total_budget and celf_heap:
mep_item = heap.heappop_max(celf_heap)
mep_mg, mep_k_prod, mep_i_node, mep_flag = mep_item
sc = seed_cost_dict[mep_i_node]
seed_set_length = sum(len(seed_set[k]) for k in range(num_product))
if round(now_budget + sc, 4) > total_budget:
continue
if mep_flag == seed_set_length:
seed_set[mep_k_prod].add(mep_i_node)
now_budget = round(now_budget + sc, 4)
now_profit = ssng_model.getSeedSetProfit(seed_set)
s_matrix.append(copy.deepcopy(seed_set))
c_matrix.append(now_budget)
else:
seed_set_t = copy.deepcopy(seed_set)
seed_set_t[mep_k_prod].add(mep_i_node)
ep_t = ssng_model.getSeedSetProfit(seed_set_t)
mg_t = round(ep_t - now_profit, 4)
flag_t = seed_set_length
if mg_t > 0:
celf_item_t = (mg_t, mep_k_prod, mep_i_node, flag_t)
heap.heappush_max(celf_heap, celf_item_t)
seed_set = sspmis_model.solveMCPK(total_budget, [s_matrix] * num_product, [c_matrix] * num_product)
now_budget = sum(seed_cost_dict[i] for k in range(num_product) for i in seed_set[k])
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
print('ss_time = ' + str(ss_time) + 'sec, cost = ' + str(now_budget) + ', seed_set_length = ' + str([len(s_set_k) for s_set_k in seed_set]))
seed_set_sequence[now_bi_index] = seed_set
ss_time_sequence[now_bi_index] = ss_time
seed_data_sequence[now_bi_index] = seed_set
while -1 in seed_data_sequence:
no_data_index = seed_data_sequence.index(-1)
seed_set_sequence[no_data_index] = seed_set_sequence[no_data_index - 1]
ss_time_sequence[no_data_index] = ss_time_sequence[no_data_index - 1]
seed_data_sequence[no_data_index] = seed_data_sequence[no_data_index - 1]
eva_model = EvaluationM(self.mn_list, self.data_key, self.prod_key, self.cas_key)
for bi in self.budget_iteration:
now_bi_index = self.budget_iteration.index(bi)
if wallet_distribution_type_dict[self.wallet_key]:
eva_model.evaluate(bi, self.wallet_key, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
else:
for wd in self.wd_seq:
eva_model.evaluate(bi, wd, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
def model_bcs(self):
ini = Initialization(self.data_key, self.prod_key, self.cas_key, self.wallet_key)
seed_cost_dict = ini.constructSeedCostDict()
graph_dict = ini.constructGraphDict()
product_list, epw_list = ini.constructProductList()
num_product = len(product_list)
total_cost = sum(seed_cost_dict[i] for i in seed_cost_dict)
seed_set_sequence = [-1 for _ in range(len(self.budget_iteration))]
ss_time_sequence = [-1 for _ in range(len(self.budget_iteration))]
seed_data_sequence = [-1 for _ in range(len(self.budget_iteration))]
ssbcs_model = SeedSelectionBCS(graph_dict, seed_cost_dict, product_list, epw_list)
ss_start_time = time.time()
celf_heap_list_o = ssbcs_model.generateCelfHeap()
ss_acc_time = round(time.time() - ss_start_time, 4)
for now_b_iter in self.budget_iteration:
ss_start_time = time.time()
now_bi_index = self.budget_iteration.index(now_b_iter)
total_budget = safe_div(total_cost, 2 ** now_b_iter)
celf_heap_list = copy.deepcopy(celf_heap_list_o)
print('@ selection\t' + get_model_name(self.mn_list) + ' @ ' + dataset_name_dict[self.data_key] + '_' + cascade_model_dict[self.cas_key] +
'\t' + wallet_distribution_type_dict[self.wallet_key] + '_' + product_name_dict[self.prod_key] + '_bi' + str(now_b_iter) + ', budget = ' + str(total_budget))
seed_set_list = []
while celf_heap_list:
celf_heap = celf_heap_list.pop()
now_budget, now_profit = 0.0, 0.0
seed_set = [set() for _ in range(num_product)]
while now_budget < total_budget and celf_heap:
mep_item = heap.heappop_max(celf_heap)
mep_mg, mep_k_prod, mep_i_node, mep_flag = mep_item
sc = seed_cost_dict[mep_i_node]
seed_set_length = sum(len(seed_set[k]) for k in range(num_product))
if round(now_budget + sc, 4) > total_budget:
continue
if mep_flag == seed_set_length:
seed_set[mep_k_prod].add(mep_i_node)
now_budget = round(now_budget + sc, 4)
now_profit = round(now_profit + mep_mg * (sc if len(celf_heap_list) else 1.0), 4)
else:
seed_set_t = copy.deepcopy(seed_set)
seed_set_t[mep_k_prod].add(mep_i_node)
ep_t = ssbcs_model.getSeedSetProfit(seed_set_t)
mg_t = round(ep_t - now_profit, 4)
if len(celf_heap_list):
mg_t = safe_div(mg_t, sc)
flag_t = seed_set_length
if mg_t > 0:
celf_item_t = (mg_t, mep_k_prod, mep_i_node, flag_t)
heap.heappush_max(celf_heap, celf_item_t)
seed_set_list.insert(0, seed_set)
final_seed_set = copy.deepcopy(seed_set_list[0])
final_bud = sum(seed_cost_dict[i] for k in range(num_product) for i in final_seed_set[k])
final_ep = ssbcs_model.getSeedSetProfit(seed_set_list[0])
for k in range(num_product):
Handbill_counter = 0
AnnealingScheduleT, detT = 1000000, 1000
for s in seed_set_list[0][k]:
# -- first level: replace billboard seed by handbill seed --
final_seed_set_t = copy.deepcopy(final_seed_set)
final_seed_set_t[k].remove(s)
final_bud_t = final_bud - seed_cost_dict[s]
Handbill_seed_set = set((k, i) for k in range(num_product) for i in seed_set_list[1][k] if i not in final_seed_set_t[k])
if Handbill_seed_set:
min_Handbill_cost = min(seed_cost_dict[Handbill_item[1]] for Handbill_item in Handbill_seed_set)
while total_budget - final_bud_t >= min_Handbill_cost and Handbill_seed_set:
k_prod, i_node = Handbill_seed_set.pop()
if seed_cost_dict[i_node] <= total_budget - final_bud_t:
final_seed_set_t[k_prod].add(i_node)
final_bud_t += seed_cost_dict[i_node]
Handbill_counter += 1
final_ep_t = ssbcs_model.getSeedSetProfit(final_seed_set_t)
final_mg_t = final_ep_t - final_ep
# -- second level: replace handbill seed by handbill seed --
if final_mg_t >= 0 or math.exp(safe_div(final_mg_t, AnnealingScheduleT)) > random.random():
final_seed_set = final_seed_set_t
final_bud = final_bud_t
final_ep = final_ep_t
for q in range(min(Handbill_counter, 10)):
final_seed_set_t = copy.deepcopy(final_seed_set)
final_Handbill_seed_set = set((k, i) for k in range(num_product) for i in final_seed_set_t[k] if i in seed_set_list[1][k])
if final_Handbill_seed_set:
k_prod, i_node = final_Handbill_seed_set.pop()
final_seed_set_t[k_prod].remove(i_node)
final_bud_t = final_bud - seed_cost_dict[i_node]
Handbill_seed_set = set((k, i) for k in range(num_product) for i in seed_set_list[1][k] if i not in final_seed_set_t[k])
min_Handbill_cost = min(seed_cost_dict[Handbill_item[1]] for Handbill_item in Handbill_seed_set)
while total_budget - final_bud_t >= min_Handbill_cost and Handbill_seed_set:
k_prod, i_node = Handbill_seed_set.pop()
if seed_cost_dict[i_node] <= total_budget - final_bud_t:
final_seed_set_t[k_prod].add(i_node)
final_bud_t += seed_cost_dict[i_node]
final_ep_t = ssbcs_model.getSeedSetProfit(final_seed_set_t)
final_mg_t = final_ep_t - final_ep
if final_mg_t >= 0 or math.exp(safe_div(final_mg_t, AnnealingScheduleT)) > random.random():
final_seed_set = final_seed_set_t
final_bud = final_bud_t
final_ep = final_ep_t
AnnealingScheduleT -= detT
seed_set = copy.deepcopy(final_seed_set)
ss_time = round(time.time() - ss_start_time + ss_acc_time, 4)
print('ss_time = ' + str(ss_time) + 'sec, cost = ' + str(final_bud) + ', seed_set_length = ' + str([len(s_set_k) for s_set_k in seed_set]))
seed_set_sequence[now_bi_index] = seed_set
ss_time_sequence[now_bi_index] = ss_time
seed_data_sequence[now_bi_index] = final_seed_set
while -1 in seed_data_sequence:
no_data_index = seed_data_sequence.index(-1)
seed_set_sequence[no_data_index] = seed_set_sequence[no_data_index - 1]
ss_time_sequence[no_data_index] = ss_time_sequence[no_data_index - 1]
seed_data_sequence[no_data_index] = seed_data_sequence[no_data_index - 1]
eva_model = EvaluationM(self.mn_list, self.data_key, self.prod_key, self.cas_key)
for bi in self.budget_iteration:
now_bi_index = self.budget_iteration.index(bi)
if wallet_distribution_type_dict[self.wallet_key]:
eva_model.evaluate(bi, self.wallet_key, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index])
else:
for wd in self.wd_seq:
eva_model.evaluate(bi, wd, seed_set_sequence[now_bi_index], ss_time_sequence[now_bi_index]) | [
"37822464+nmaypeter@users.noreply.github.com"
] | 37822464+nmaypeter@users.noreply.github.com |
1871dc5a236ac1574119220acd73a37bc860d137 | 2f48a4adb131621c7ad4d772af67b2c40f2f41d7 | /Dev/cfehome/src/cfehome/settings/__init__.py | c0df761f17f1f9e599cc3252e83b97aee7a7a926 | [] | no_license | akashbijwe/Python | ad0f6ae36e41e08a4ed24bfdcefde3287cefeb14 | 17948ece6835e240584d6835cce66eb75c4f1b22 | refs/heads/master | 2022-11-02T00:13:19.858003 | 2018-07-27T11:45:13 | 2018-07-27T11:45:13 | 141,707,337 | 2 | 1 | null | 2022-10-21T13:18:59 | 2018-07-20T12:05:30 | Python | UTF-8 | Python | false | false | 103 | py | from .base import *
# from .production import *
# try:
# from .local import *
# except:
# pass
| [
"mr.akashbijwe@gmail.com"
] | mr.akashbijwe@gmail.com |
b1a541ae2823325189c5b0f803ec117c9df66d07 | de69d99db8be567d97060149481091c25907d4ef | /src/trees/binary_trees.py | 84f2bcde422555256b4619c0ba4e877f5b7f152d | [] | no_license | chalam/Pynaconda | 0dd5acdb19c38352ee5d4b92c002d05bd75e452d | e24600d26afbc685e3853a6037f50dfc3fe077d2 | refs/heads/master | 2021-01-10T13:37:54.811250 | 2018-10-13T20:48:44 | 2018-10-13T20:48:44 | 36,340,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,556 | py | class Node:
"""
Class Node
"""
def __init__(self, value):
self.left = None # No self-referential Node in python
self.data = value
self.right = None
class Tree:
"""
Class tree will provide a tree as well as utility functions.
"""
def createNode(self, data):
"""
Utility function to create a node.
"""
return Node(data)
def insert(self, node, data):
"""
Insert function will insert a node into tree.
Duplicate keys are not allowed.
"""
# if tree is empty , return a root node
if node is None:
return self.createNode(data)
# if data is smaller than parent , insert it into left side
if data < node.data:
node.left = self.insert(node.left, data)
elif data > node.data:
node.right = self.insert(node.right, data)
return node
def search(self, node, data):
"""
Search function will search a node into tree.
"""
# if root is None or root is the search data.
if node is None or node.data == data:
return node
if node.data < data:
return self.search(node.right, data)
else:
return self.search(node.left, data)
def deleteNode(self, node, data):
"""
Delete function will delete a node into tree.
Not complete , may need some more scenarion that we can handle
Now it is handling only leaf.
"""
# Check if tree is empty.
if node is None:
return None
# searching key into BST.
if data < node.data:
node.left = self.deleteNode(node.left, data)
elif data > node.data:
node.right = self.deleteNode(node.right, data)
else: # reach to the node that need to delete from BST.
if node.left is None and node.right is None:
del node
if node.left == None:
temp = node.right
del node
return temp
elif node.right == None:
temp = node.left
del node
return temp
return node
def traverseInorder(self, root):
"""
traverse function will print all the node in the tree.
"""
if root is not None:
self.traverseInorder(root.left)
print(root.data)
self.traverseInorder(root.right)
def traversePreorder(self, root):
"""
traverse function will print all the node in the tree.
"""
if root is not None:
print(root.data)
self.traversePreorder(root.left)
self.traversePreorder(root.right)
def traversePostorder(self, root):
"""
traverse function will print all the node in the tree.
"""
if root is not None:
self.traversePreorder(root.left)
self.traversePreorder(root.right)
print(root.data)
def main():
root = None
tree = Tree()
root = tree.insert(root, 10)
print(root)
tree.insert(root, 20)
tree.insert(root, 30)
tree.insert(root, 40)
tree.insert(root, 70)
tree.insert(root, 60)
tree.insert(root, 80)
print("Traverse Inorder")
tree.traverseInorder(root)
print("Traverse Preorder")
tree.traversePreorder(root)
print("Traverse Postorder")
tree.traversePostorder(root)
if __name__ == "__main__":
main()
| [
"dim2dip@gmail.com"
] | dim2dip@gmail.com |
c5ee772862e7f91cab0caa6ad73a7ad70504357b | 6ea3058efb25c490dcb6ea2e6ce519ec4c2cc7b0 | /locust/lib/example_functions.py | 15b0fc6ecfbb85795c8d26e80660ed355fbebc67 | [] | no_license | studio-design/distributed-load-testing-using-kubernetes-locust | c921738e4a4bd9a0ace83776395f27fbd29ca5c7 | d15bf00a491ca8fca05e0fff955c0f3e9e2321e4 | refs/heads/master | 2023-08-24T10:55:58.914353 | 2021-10-31T21:39:46 | 2021-10-31T21:45:57 | 423,021,392 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | # -*- coding: utf-8 -*-
import random
def choose_random_page():
pages = [
'/'
]
return random.choice(pages)
| [
"yasuyuki.takeo@liferay.com"
] | yasuyuki.takeo@liferay.com |
c9d62cd28eb6a98c113b079864bf0553c983be35 | 284f4f56aed56573eb5516aa67c99bf41e595522 | /Leetcode/Arrays/p3574.py | 4261a42c73d1469fdff5a35d33f807e57238da87 | [] | no_license | rohangoli/PythonAdvanced | 537a05eff9ec305a6ec32fa2d0962a64976cd097 | 6448a5f0d82c7e951b5e476638e15a3c34966cd9 | refs/heads/develop | 2023-07-20T04:33:50.764104 | 2023-07-14T04:04:18 | 2023-07-14T04:04:18 | 126,811,520 | 0 | 0 | null | 2022-06-10T23:07:10 | 2018-03-26T10:20:16 | Jupyter Notebook | UTF-8 | Python | false | false | 514 | py | ## Squares of a Sorted Array
# Example 1:
# Input: nums = [-4,-1,0,3,10]
# Output: [0,1,9,16,100]
# Explanation: After squaring, the array becomes [16,1,0,9,100].
# After sorting, it becomes [0,1,9,16,100].
# Example 2:
# Input: nums = [-7,-3,2,3,11]
# Output: [4,9,9,49,121]
class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
N=len(nums)
i=0
while i<N:
nums[i]=nums[i]**2
i+=1
nums.sort()
return nums | [
"rohanr27@gmail.com"
] | rohanr27@gmail.com |
0eb2aee59c25c67c7a7ebb47b287de264c836ccb | 083f983919df3c8637668f6e4339742aa9fde761 | /docker/generate_makefile.py | 205da5d6297749ea38cd5b05044cdd97ad6f7dac | [
"Apache-2.0"
] | permissive | liumuqing/fuzzbench | 97174149a7b76b04ccada8010ec5198f36ab4080 | 140e69107d725611290a05d6369f159337d61bdf | refs/heads/master | 2022-11-17T17:12:46.448307 | 2020-07-17T03:33:58 | 2020-07-17T03:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,956 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple generator for local Makefile rules."""
import os
BASE_TAG = 'gcr.io/fuzzbench'
BENCHMARKS_DIR = os.path.join(os.path.dirname(__file__), os.pardir,
'benchmarks')
FUZZERS_DIR = os.path.join(os.path.dirname(__file__), os.pardir, 'fuzzers')
BOILERPLATE = """
cache_from = $(if ${RUNNING_ON_CI},--cache-from {fuzzer},)
"""
FUZZER_TEMPLATE = """
.{fuzzer}-builder: base-builder
docker build \\
--tag {base_tag}/builders/{fuzzer} \\
--file fuzzers/{fuzzer}/builder.Dockerfile \\
$(call cache_from,{base_tag}/builders/{fuzzer}) \\
fuzzers/{fuzzer}
.pull-{fuzzer}-builder: pull-base-builder
docker pull {base_tag}/builders/{fuzzer}
"""
FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE = """
build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner
pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-runner
run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner
docker run \\
--cpus=1 \\
--cap-add SYS_NICE \\
--cap-add SYS_PTRACE \\
-e FUZZ_OUTSIDE_EXPERIMENT=1 \\
-e TRIAL_ID=1 \\
-e FUZZER={fuzzer} \\
-e BENCHMARK={benchmark} \\
-it {base_tag}/runners/{fuzzer}/{benchmark}
test-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner
docker run \\
--cap-add SYS_NICE \\
--cap-add SYS_PTRACE \\
-e FUZZ_OUTSIDE_EXPERIMENT=1 \\
-e TRIAL_ID=1 \\
-e FUZZER={fuzzer} \\
-e BENCHMARK={benchmark} \\
-e MAX_TOTAL_TIME=20 \\
-e SNAPSHOT_PERIOD=10 \\
{base_tag}/runners/{fuzzer}/{benchmark}
debug-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-runner
docker run \\
--cpus=1 \\
--cap-add SYS_NICE \\
--cap-add SYS_PTRACE \\
-e FUZZ_OUTSIDE_EXPERIMENT=1 \\
-e TRIAL_ID=1 \\
-e FUZZER={fuzzer} \\
-e BENCHMARK={benchmark} \\
--entrypoint "/bin/bash" \\
-it {base_tag}/runners/{fuzzer}/{benchmark}
"""
FUZZER_BENCHMARK_TEMPLATE = """
.{fuzzer}-{benchmark}-builder: .{fuzzer}-builder
docker build \\
--tag {base_tag}/builders/{fuzzer}/{benchmark} \\
--build-arg fuzzer={fuzzer} \\
--build-arg benchmark={benchmark} \\
$(call cache_from,{base_tag}/builders/{fuzzer}/{benchmark}) \\
--file docker/benchmark-builder/Dockerfile \\
.
.pull-{fuzzer}-{benchmark}-builder: .pull-{fuzzer}-builder
docker pull {base_tag}/builders/{fuzzer}/{benchmark}
ifeq (,$(filter {fuzzer},coverage coverage_source_based))
.{fuzzer}-{benchmark}-intermediate-runner: base-runner
docker build \\
--tag {base_tag}/runners/{fuzzer}/{benchmark}-intermediate \\
--file fuzzers/{fuzzer}/runner.Dockerfile \\
$(call cache_from,{base_tag}/runners/{fuzzer}/{benchmark}-intermediate) \\
fuzzers/{fuzzer}
.pull-{fuzzer}-{benchmark}-intermediate-runner: pull-base-runner
docker pull {base_tag}/runners/{fuzzer}/{benchmark}-intermediate
.{fuzzer}-{benchmark}-runner: .{fuzzer}-{benchmark}-builder .{fuzzer}-{benchmark}-intermediate-runner
docker build \\
--tag {base_tag}/runners/{fuzzer}/{benchmark} \\
--build-arg fuzzer={fuzzer} \\
--build-arg benchmark={benchmark} \\
$(call cache_from,{base_tag}/runners/{fuzzer}/{benchmark}) \\
--file docker/benchmark-runner/Dockerfile \\
.
.pull-{fuzzer}-{benchmark}-runner: .pull-{fuzzer}-{benchmark}-builder .pull-{fuzzer}-{benchmark}-intermediate-runner
docker pull {base_tag}/runners/{fuzzer}/{benchmark}
""" + FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE + """
else
# Coverage builds don't need runners.
build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-builder
pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-builder
endif
"""
OSS_FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE = """
build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner
pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-oss-fuzz-runner
run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner
docker run \\
--cpus=1 \\
--cap-add SYS_NICE \\
--cap-add SYS_PTRACE \\
-e FUZZ_OUTSIDE_EXPERIMENT=1 \\
-e FORCE_LOCAL=1 \\
-e TRIAL_ID=1 \\
-e FUZZER={fuzzer} \\
-e BENCHMARK={benchmark} \\
-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\
-it {base_tag}/runners/{fuzzer}/{benchmark}
test-run-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner
docker run \\
--cap-add SYS_NICE \\
--cap-add SYS_PTRACE \\
-e FUZZ_OUTSIDE_EXPERIMENT=1 \\
-e FORCE_LOCAL=1 \\
-e TRIAL_ID=1 \\
-e FUZZER={fuzzer} \\
-e BENCHMARK={benchmark} \\
-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\
-e MAX_TOTAL_TIME=20 \\
-e SNAPSHOT_PERIOD=10 \\
{base_tag}/runners/{fuzzer}/{benchmark}
debug-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-runner
docker run \\
--cpus=1 \\
--cap-add SYS_NICE \\
--cap-add SYS_PTRACE \\
-e FUZZ_OUTSIDE_EXPERIMENT=1 \\
-e FORCE_LOCAL=1 \\
-e TRIAL_ID=1 \\
-e FUZZER={fuzzer} \\
-e BENCHMARK={benchmark} \\
-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\
--entrypoint "/bin/bash" \\
-it {base_tag}/runners/{fuzzer}/{benchmark}
"""
OSS_FUZZER_BENCHMARK_TEMPLATE = """
.{fuzzer}-{benchmark}-oss-fuzz-builder-intermediate:
docker build \\
--tag {base_tag}/builders/{fuzzer}/{benchmark}-intermediate \\
--file=fuzzers/{fuzzer}/builder.Dockerfile \\
--build-arg parent_image=gcr.io/fuzzbench/oss-fuzz/$({benchmark}-project-name)@sha256:$({benchmark}-oss-fuzz-builder-hash) \\
$(call cache_from,{base_tag}/builders/{fuzzer}/{benchmark}-intermediate) \\
fuzzers/{fuzzer}
.pull-{fuzzer}-{benchmark}-oss-fuzz-builder-intermediate:
docker pull {base_tag}/builders/{fuzzer}/{benchmark}-intermediate
.{fuzzer}-{benchmark}-oss-fuzz-builder: .{fuzzer}-{benchmark}-oss-fuzz-builder-intermediate
docker build \\
--tag {base_tag}/builders/{fuzzer}/{benchmark} \\
--file=docker/oss-fuzz-builder/Dockerfile \\
--build-arg parent_image={base_tag}/builders/{fuzzer}/{benchmark}-intermediate \\
--build-arg fuzzer={fuzzer} \\
--build-arg benchmark={benchmark} \\
$(call cache_from,{base_tag}/builders/{fuzzer}/{benchmark}) \\
.
.pull-{fuzzer}-{benchmark}-oss-fuzz-builder: .pull-{fuzzer}-{benchmark}-oss-fuzz-builder-intermediate
docker pull {base_tag}/builders/{fuzzer}/{benchmark}
ifeq (,$(filter {fuzzer},coverage coverage_source_based))
.{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner: base-runner
docker build \\
--tag {base_tag}/runners/{fuzzer}/{benchmark}-intermediate \\
--file fuzzers/{fuzzer}/runner.Dockerfile \\
$(call cache_from,{base_tag}/runners/{fuzzer}/{benchmark}-intermediate) \\
fuzzers/{fuzzer}
.pull-{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner: pull-base-runner
docker pull {base_tag}/runners/{fuzzer}/{benchmark}-intermediate
.{fuzzer}-{benchmark}-oss-fuzz-runner: .{fuzzer}-{benchmark}-oss-fuzz-builder .{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner
docker build \\
--tag {base_tag}/runners/{fuzzer}/{benchmark} \\
--build-arg fuzzer={fuzzer} \\
--build-arg benchmark={benchmark} \\
$(call cache_from,{base_tag}/runners/{fuzzer}/{benchmark}) \\
--file docker/oss-fuzz-runner/Dockerfile \\
.
.pull-{fuzzer}-{benchmark}-oss-fuzz-runner: .pull-{fuzzer}-{benchmark}-oss-fuzz-builder .pull-{fuzzer}-{benchmark}-oss-fuzz-intermediate-runner
docker pull {base_tag}/runners/{fuzzer}/{benchmark}
""" + OSS_FUZZER_BENCHMARK_RUN_TARGETS_TEMPLATE + """
else
build-{fuzzer}-{benchmark}: .{fuzzer}-{benchmark}-oss-fuzz-builder
pull-{fuzzer}-{benchmark}: .pull-{fuzzer}-{benchmark}-oss-fuzz-builder
endif
"""
def generate_fuzzer(fuzzer, benchmarks, oss_fuzz_benchmarks):
"""Output make rules for a single fuzzer."""
# Generate build rules for the fuzzer itself.
print(FUZZER_TEMPLATE.format(fuzzer=fuzzer, base_tag=BASE_TAG))
# Generate rules for fuzzer-benchmark pairs.
for benchmark in benchmarks:
print(
FUZZER_BENCHMARK_TEMPLATE.format(fuzzer=fuzzer,
benchmark=benchmark,
base_tag=BASE_TAG))
for benchmark in oss_fuzz_benchmarks:
print(
OSS_FUZZER_BENCHMARK_TEMPLATE.format(fuzzer=fuzzer,
benchmark=benchmark,
base_tag=BASE_TAG))
# Generate rules for building/pulling all target/benchmark pairs.
all_benchmarks = benchmarks + oss_fuzz_benchmarks
all_build_targets = ' '.join([
'build-{0}-{1}'.format(fuzzer, benchmark)
for benchmark in all_benchmarks
])
all_pull_targets = ' '.join([
'pull-{0}-{1}'.format(fuzzer, benchmark) for benchmark in all_benchmarks
])
print('build-{fuzzer}-all: {all_targets}'.format(
fuzzer=fuzzer, all_targets=all_build_targets))
print('pull-{fuzzer}-all: {all_targets}'.format(
fuzzer=fuzzer, all_targets=all_pull_targets))
def main():
"""Main entry point."""
# Output boilerplate used by other templates and generated rules.
print(BOILERPLATE)
# Compute the list of benchmarks. OSS-Fuzz benchmarks are built
# differently from standard benchmarks.
benchmarks = []
oss_fuzz_benchmarks = []
for benchmark in os.listdir(BENCHMARKS_DIR):
benchmark_path = os.path.join(BENCHMARKS_DIR, benchmark)
if not os.path.isdir(benchmark_path):
continue
if os.path.exists(os.path.join(benchmark_path, 'build.sh')):
benchmarks.append(benchmark)
if os.path.exists(os.path.join(benchmark_path, 'oss-fuzz.yaml')):
oss_fuzz_benchmarks.append(benchmark)
# Generate the build rules for fuzzer/benchmark pairs.
fuzzers = []
for fuzzer in os.listdir(FUZZERS_DIR):
# Skip non-directory files. These do not represent fuzzers.
fuzzer_dir = os.path.join(FUZZERS_DIR, fuzzer)
if not os.path.isdir(fuzzer_dir):
continue
generate_fuzzer(fuzzer, benchmarks, oss_fuzz_benchmarks)
fuzzers.append(fuzzer)
# Generate rules to build all known targets.
all_build_targets = ' '.join(
['build-{0}-all'.format(name) for name in fuzzers])
all_pull_targets = ' '.join(
['pull-{0}-all'.format(name) for name in fuzzers])
print('build-all: {all_targets}'.format(all_targets=all_build_targets))
print('pull-all: {all_targets}'.format(all_targets=all_pull_targets))
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | liumuqing.noreply@github.com |
cf12b9fec72682fc2aa7ad9307da65aab512a315 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/operations/_virtual_network_gateway_nat_rules_operations.py | 8cc37a0f84e06af9a668517eea78cd7432103909 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 22,954 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewayNatRulesOperations(object):
"""VirtualNetworkGatewayNatRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGatewayNatRule"
"""Retrieves the details of a nat rule.
:param resource_group_name: The resource group name of the Virtual Network Gateway.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the gateway.
:type virtual_network_gateway_name: str
:param nat_rule_name: The name of the nat rule.
:type nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGatewayNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.VirtualNetworkGatewayNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGatewayNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/natRules/{natRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
nat_rule_name, # type: str
nat_rule_parameters, # type: "_models.VirtualNetworkGatewayNatRule"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGatewayNatRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(nat_rule_parameters, 'VirtualNetworkGatewayNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGatewayNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/natRules/{natRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
nat_rule_name, # type: str
nat_rule_parameters, # type: "_models.VirtualNetworkGatewayNatRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGatewayNatRule"]
"""Creates a nat rule to a scalable virtual network gateway if it doesn't exist else updates the
existing nat rules.
:param resource_group_name: The resource group name of the Virtual Network Gateway.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the gateway.
:type virtual_network_gateway_name: str
:param nat_rule_name: The name of the nat rule.
:type nat_rule_name: str
:param nat_rule_parameters: Parameters supplied to create or Update a Nat Rule.
:type nat_rule_parameters: ~azure.mgmt.network.v2021_02_01.models.VirtualNetworkGatewayNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGatewayNatRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.VirtualNetworkGatewayNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
nat_rule_name=nat_rule_name,
nat_rule_parameters=nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/natRules/{natRuleName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/natRules/{natRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a nat rule.
:param resource_group_name: The resource group name of the Virtual Network Gateway.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the gateway.
:type virtual_network_gateway_name: str
:param nat_rule_name: The name of the nat rule.
:type nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
nat_rule_name=nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/natRules/{natRuleName}'} # type: ignore
def list_by_virtual_network_gateway(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualNetworkGatewayNatRulesResult"]
"""Retrieves all nat rules for a particular virtual network gateway.
:param resource_group_name: The resource group name of the virtual network gateway.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualNetworkGatewayNatRulesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.ListVirtualNetworkGatewayNatRulesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualNetworkGatewayNatRulesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_virtual_network_gateway.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualNetworkGatewayNatRulesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_virtual_network_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/natRules'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
48e5cb61a39dc6de5e2779ebd8c76b7464dea846 | 1002160fec10d11ded23ffe3b555cb382a3568ca | /PY4E/exercisesANS/ex10_1.py | 9a1ad3c10fb9b947ee93a73956c6fe6efb4afe19 | [] | no_license | AREKKUSU-hyper/Python-Libraries | 8358a1093f9083286dd02e263f6c9d11d5221bd3 | 07bada98d250d8370706294d78bce04c6b39b0e4 | refs/heads/master | 2022-11-17T02:13:59.933905 | 2020-06-28T09:26:43 | 2020-06-28T09:26:43 | 275,550,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # The top ten most common words # tuple用value排序
name=input("Enter file:")
handle=open(name)
counts=dict()
for line in handle:
words=line.split()
for word in words:
counts[word]=counts.get(word,0)+1
lst=list()
for key,val in counts.items():
newtuple=(val,key)
lst.append(newtuple)
lst=sorted(lst,reverse=True) # 從大到小排序
for val,key in lst[:10]:
print(key,val) | [
"ettoday.alex@gmail.com"
] | ettoday.alex@gmail.com |
367db01aab0f8c4d48fb74c5b3ca470707f4ab2c | 69c5e02afaa3c8b7547b4bdbf2f094a83bd994ce | /AdvLnUtils.py | 9cf8c4e6d6596078a302c9b149ac71ca6b7ff2fc | [] | no_license | yelled1/CarND-Advanced-Lane-Lines | c1e6755a5d73e0b19a9eed5c84b2a789833c6c43 | 78b48f914c2da03cc01f88d94e3ae42586e676e2 | refs/heads/master | 2021-01-22T05:47:02.641520 | 2017-02-14T02:56:55 | 2017-02-14T02:56:55 | 81,703,752 | 0 | 0 | null | 2017-02-12T04:53:26 | 2017-02-12T04:53:26 | null | UTF-8 | Python | false | false | 14,169 | py | import numpy as np
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import imgUtils as iu
from moviepy.editor import VideoFileClip
def absSobelThresh(img, orient='x', sobel_kernel=3, thresh=(20, 255), dbg=0):
if dbg: print('dirThresh=', orient, thresh)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ox, oy = 1,0
if orient != 'x': ox, oy = 0,1 # (the 0, 1 at the end denotes y-direction)
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, ox, oy, ksize=sobel_kernel))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
gradBinary = np.zeros_like(scaled_sobel)
gradBinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return gradBinary
def magThresh(img, sobel_kernel=3, mThresh=(20, 255), dbg=0):
if dbg: print('magThresh=', mThresh)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)
abs_sobel = np.sqrt(sobelx**2+sobely**2)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
magBinary = np.zeros_like(scaled_sobel)
magBinary[(scaled_sobel >= mThresh[0]) & (scaled_sobel <= mThresh[1])] = 1
return magBinary
def dirThresh(img, sobel_kernel=3, thresh=(0.7, np.pi/2), dbg=0):
if dbg: print('dirThresh=', thresh)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)
dirGradient = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
dirBinary = np.zeros_like(dirGradient) # NOTE No np.int8 (8bit conversion)!
dirBinary[(dirGradient >= thresh[0]) & (dirGradient <= thresh[1])] = 1
return dirBinary
def hlsSelect(img, sel='S', thresh=(90, 255), dbg=0):
if dbg: print('hlsSelect', sel, thresh)
hlsSel={'H':0, 'L':1, 'S':2,}
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) #float
X = hls[:,:, hlsSel[sel.upper()]] # Apply a threshold to the 1 of HLS channel
binary = np.zeros_like(X)
binary[(X > thresh[0]) & (X <= thresh[1])] = 1
return binary
def combGradMagDir(Img, ksize=3, dbg=0):
# Apply each of the thresholding functions
gradx = absSobelThresh(Img, orient='x', sobel_kernel=ksize, thresh=(20, 255), dbg=dbg)
grady = absSobelThresh(Img, orient='y', sobel_kernel=ksize, thresh=(20, 255), dbg=dbg)
mag_binary = magThresh(Img, sobel_kernel=ksize, mThresh=(20, 255), dbg=dbg)
dir_binary = dirThresh(Img, sobel_kernel=ksize, )#thresh=(.5, 1.)) #np.pi/1.2))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
return combined
def combGradMagDirWYmask(Img, ksize=3, dbg=0):
# Apply each of the thresholding functions
gradx = absSobelThresh(Img, orient='x', sobel_kernel=ksize, thresh=(20, 100), dbg=dbg)
grady = absSobelThresh(Img, orient='y', sobel_kernel=ksize, thresh=(20, 100), dbg=dbg)
mag_binary = magThresh(Img, sobel_kernel=ksize, mThresh=(35, 100), dbg=dbg)
dir_binary = dirThresh(Img, sobel_kernel=ksize, thresh=(.7, 1.3)) #np.pi/1.2))
combined = np.zeros_like(dir_binary)
combined[ ((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) &
( iu.addMask_yellowMask(Img) == 1) ] = 1
return combined
def pipeline(Img, s_thresh=(170, 255), sx_thresh=(20, 100), kernelSz=3, dbg=0):
img = np.copy(Img)
s_binary = hlsSelect(img, sel='S', thresh=s_thresh)
l_layer = np.zeros_like(img)
l_layer[:,:,1] = cv2.cvtColor(Img, cv2.COLOR_RGB2HLS)[:,:,1].astype(np.float)
sxbinary = combGradMagDir(l_layer, ksize=kernelSz, thresh=sx_thresh, dbg=dbg)
# Stack each channel
# Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might
# be beneficial to replace this channel with something else.
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))
return color_binary
def outlineRegionOfIntest(Img, vertices):
#Outline the image defined by the polygon from 'vertices.' Beyond the outln is set to black=255
mask = np.zeros_like(Img)
# Setting 3 or 1 channel to fill the mask based on input Img: Color vs Gray
if len(Img.shape) > 2:
channelCount = Img.shape[2] # i.e. 3 or 4 depending on your image
ignoreMaskColor = (255,) * channelCount
else: ignoreMaskColor = 255
# Fill pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignoreMaskColor)
# Feturning the image only where mask pixels are nonzero
maskedImg = cv2.bitwise_and(Img, mask)
return maskedImg
def warp(Img):
imgSz = (Img.shape[1], Img.shape[0])
y_bot = Img.shape[0] #=720
src = { 'tR': [730, 460], 'tL': [570, 460],
'bR': [1180, y_bot], 'bL': [180, y_bot] }
vertices = np.array([[ src['bL'], src['tL'], src['tR'], src['bR'] ]], dtype=np.int32)
regionOfInterest = outlineRegionOfIntest(Img, vertices)
# src coordinates
src = np.float32([ src['tR'], src['bR'], src['bL'], src['tL'] ], dtype=np.int32)
dst = { 'tR': [980, 0], 'tL': [320, 0],
'bR': [960, y_bot],'bL': [320, y_bot],}
# Dst coordinates
Dst = np.float32([dst['tR'], dst['bR'], dst['bL'], dst['tL'] ], dtype=np.int32)
# perspective transform Calc
M = cv2.getPerspectiveTransform(src, Dst)
# the inverse matrix Calc (will be used in the last steps)
Minv = cv2.getPerspectiveTransform(Dst, src)
#Create waped image But keep the same size as input image
warped = cv2.warpPerspective(regionOfInterest, M, imgSz, flags=cv2.INTER_LINEAR)
return warped, Minv
def combineGradientColor(Img):
## Combine gradient thresholds & color space to better detect the lines
rgbImg = cv2.cvtColor(Img, cv2.COLOR_BGR2RGB)
hlsImg = hlsSelect(rgbImg, thresh=(90, 255))
result = np.zeros_like(hlsImg)
result[(combGradMagDirWYmask(rgbImg) == 1) | (hlsImg == 1) ] = 1
return result
def onScrnCurvatureMeasrs(Img, left_curverad, right_curverad, dst_frm_center):
CLR = (255,255,255)
## Print left & right Radius on each sides of the Image
cv2.putText(Img, 'Left Radius', ( 50, 600), fontFace=5, fontScale=1.5, color=CLR, thickness=2)
cv2.putText(Img, '{}m'.format(int(left_curverad)), (70, 650),
fontFace=5, fontScale=1.5, color=CLR,thickness=2)
cv2.putText(Img, 'RightRadius', (1000, 600), fontFace=5, fontScale=1.5, color=CLR, thickness=2)
cv2.putText(Img, '{}m'.format(int(right_curverad)), (1070, 650),
fontFace=5, fontScale=1.5, color=CLR, thickness=2)
# Print distance from center on top center of the Image
cv2.putText(Img, 'CenterOffSet', (530, 100), fontFace=5, fontScale=1.5, color=CLR, thickness=2)
cv2.putText(Img, '{0:.2f}m'.format(dst_frm_center), (560, 160),
fontFace = 5, fontScale = 2, color=CLR, thickness = 2)
return Img
def getPolynomialsCurve(Img, dbg=0):
# Id the x & y positions of All nonzero pixels in the image
nonzero = Img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
margin = 100 # Set the width of the windows +/- margin
minpix = 50 # Set minimum number of pixels found to recenter window
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Slice the image in 10 horizonally
slices = iu.sliceImage(Img)
window_height = np.int(slices[0].shape[0])
for i in range(0, len(slices)):
win_y_low = Img.shape[0] - (i+1)*window_height
win_y_high = Img.shape[0] - i*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if dbg:
# Generate x and y values for plotting
ploty = np.linspace(0, Img.shape[0]-1, Img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.show()
return left_fit, right_fit
def getLineCurvature(Img, left_fit, right_fit, dbg=0):
ploty = np.linspace(0, Img.shape[0]-1, Img.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
y_eval = np.max(ploty)
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dim
xm_per_pix = 3.7/700 # meters per pixel in x dim
# Fit polynomials to x,y in Image space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calc the new radius curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Calc the center offset
center_of_image_in_meters = (Img.shape[1] / 2) * xm_per_pix
actual_centers_in_meters = np.mean([left_fitx, right_fitx]) * xm_per_pix
dst_from_center = center_of_image_in_meters - actual_centers_in_meters
# Transform radius of curvature is in meters
return left_curverad, right_curverad, dst_from_center
def drawLines(origImg, warpdImg, left_fit, right_fit, Minv):
ploty = np.linspace(0, warpdImg.shape[0]-1, warpdImg.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw the lines on
warp_zero = np.zeros_like(warpdImg).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (warpdImg.shape[1], warpdImg.shape[0]))
# Combine the result w/ the original image
result = cv2.addWeighted(origImg, 1, newwarp, 0.3, 0)
return result
def processImg(image, dbg=0):
undistImg = iu.undistortImage(image)
filtrdImg = combineGradientColor(undistImg)
warpedImg, Minv = warp(filtrdImg) #Bird Eye view
# Get polynomial coeff fitting the curvature of the lane lines
left_fit, right_fit = getPolynomialsCurve(warpedImg, dbg=dbg)
# Measure the curvature of the two lines, and get the distance from the center
left_curvrad, right_curvrad, dst_from_center = getLineCurvature(warpedImg, left_fit, right_fit, dbg=dbg)
if dbg: print(warpedImg.shape)
# Draw the detected lines on the input image
ImgWlines = drawLines(undistImg, warpedImg, left_fit, right_fit, Minv)
#if dbg: iu.plt1Image(ImgWlines)
# put the Curvature Measures on Screen
ImgWlnsLbls = onScrnCurvatureMeasrs(ImgWlines, left_curvrad, right_curvrad, dst_from_center)
return ImgWlnsLbls
def proccessVideo(inClipFnm, outClipFnm='./output_images/outPut.mp4'):
inVclip = VideoFileClip(inClipFnm)
outClip = inVclip.fl_image(processImg)
outClip.write_videofile(outClipFnm, audio=False)
if __name__ == '__main__':
#image = mpimg.imread('signs_vehicles_xygrad.jpg')
#image = mpimg.imread('bridge_shadow.jpg')
#combin = warp(image)[0]
#iu.plt2Images(image, combin)
#iu.plt1Image( processImg( mpimg.imread('./test_images/straight_lines1.jpg'), 1))
#proccessVideo("./project_video.mp4")
#proccessVideo("./challenge_video.mp4", outClipFnm='./output_images/ChallengeOutPut.mp4')
#proccessVideo("./harder_challenge_video.mp4", outClipFnm='./output_images/harderChallengeOutPut.mp4')
xx = processImg( mpimg.imread('./test_images/straight_lines1.jpg'), dbg=1)
| [
"mailhyoon@gmail.com"
] | mailhyoon@gmail.com |
ef00affcca72b6372fc92fd1d4ac34052967c4d8 | b186dcdf7e429997ea11c9e8cfc22077c060e489 | /experiment/experiment1/tools_psychopy.py | b628bb6bca5672d9f79afea9e78c98af0e064ceb | [] | no_license | behinger/fixdur | 841599cbd231052dbc77ed0213d9a95c0d7faa1e | 8f6f4a8837b4ca1dd0dfcf9a96fddccc568e51cf | refs/heads/master | 2021-03-27T19:08:37.449607 | 2017-09-25T10:41:58 | 2017-09-25T10:41:58 | 68,902,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,976 | py | import numpy as np
import pygame, random, scipy, os
from psychopy import visual, core, event, monitors
from math import atan2, degrees,sqrt,atan,sin,cos,exp,log
from scipy import stats
from pygame.locals import *
from collections import deque
try:
import fixdur_tracker as tracker
import pylink
except ImportError:
print 'pylink and fixdur_tracker cannot be imported'
#import collections
#from numpy.compat import long
TRIAL_LENGTH = 50#2000 #how long do we want to wait for a saccade:
TRACKING_FREQ = 500
PPD = 50
#PPD = 76
MAT = 154
def paths():
'''return paths'''
if os.path.exists('/home_local/tracking/experiments/fixdur/'):
path_to_fixdur_files = '/home_local/tracking/experiments/fixdur/'
path_to_fixdur_code = '/home_local/tracking/experiments/fixdur/expcode/'
elif os.path.exists('/net/store/nbp/fixdur/'):
path_to_fixdur_files = '/net/store/nbp/fixdur/'
path_to_fixdur_code = '/home/student/l/lkaufhol/fixdur/expcode/'
elif os.path.exists('/home/lkaufhol/Dokumente/studium/master/master_thesis/fixdur/'):
path_to_fixdur_files = '/home/lkaufhol/Dokumente/studium/master/master_thesis/fixdur/'
path_to_fixdur_code = '/home/lkaufhol/Dokumente/studium/master/master_thesis/fixdur/expcode/'
return path_to_fixdur_files, path_to_fixdur_code
def deg_2_px(visual_degree):
""" Convert given Visual Degree Size into Number of Pixels """
r = 1080 # Vertical resolution of the monitor
h = (.276*r)/10 # Monitor height in cm (1px = 0.276mm)
d = 80 # Distance between monitor and participant in cm
size_in_deg = visual_degree # The stimulus size in visual_degree
# number of degrees that correspond to a single pixel
deg_per_px = degrees(atan2(.5*h, d)) / (.5*r)
#print '%s degrees correspond to a single pixel' % deg_per_px
# Calculate the size of the stimulus in degrees
size_in_px = size_in_deg / deg_per_px
#print 'The size of the stimulus is %s pixels and %s visual degrees' \
#% (size_in_px, size_in_deg)
return size_in_px
''' generize, save and return randomization for given subject'''
def randomization(subject, trial_time):
# ratio of number of bubbles: 1=50, 2=25, 3 =12.5, 4=6.25, 5=3.125 -> normalization factor to get to 100%: x=100/sum(ratios)
x=100/96.875
# custom distribution for ratios:
xk = np.arange(5)+1
pk = (0.5*x,0.25*x,0.125*x,0.0625*x,0.03125*x)
custm = stats.rv_discrete(name='custm', values=(xk, pk))
# num_of_bubbles_ratio = [[i] for i in [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,4,4,5]]
images = os.listdir(path_to_fixdur_files+'stimuli/single_bubble_images/')
np.random.shuffle(images)
types = []
for a in range(int(len(images)/4)):
types.append('all')
for a in range(int((len(images)/4)*3)):
types.append('seq')
random.shuffle(types)
# dimensions of output array:
trials = [] # image number
trial_type = [] # all_bubbles or sequential
num_bubbles = [] # number of bubbles
disp_time = [] # time of bubble display
#bubble_sums = []
#fix_nums = []
a = 0;
for image in images:
# reset counter
time = 0
#bubble_sum = 0
#fix_num = 0
while time<trial_time:
# image
trials = np.append(trials,image)
# if new trial beginns
try:
if (time == 0):
trial_type = np.append(trial_type,types[0])
types.remove(types[0])
a = a+1
# if we are still in the same trial
else:
trial_type = np.append(trial_type,trial_type[-1])
except:
IndexError
# num of bubbles '''
num_bubble = custm.rvs(size=1)
num_bubbles = np.append(num_bubbles,num_bubble[0])
#bubble_sum = bubble_sum + num_bubble[0]
# display time of bubble
disp = scipy.random.exponential(295,1)
disp_time = np.append(disp_time,int(disp))
# increase counter
if int(disp) == 0:
disp = 1
time = time + int(disp)
#fix_num = fix_num + 1
#bubble_sums.append(bubble_sum)
#fix_nums.append(fix_num)
trials = np.reshape(trials,(len(trials),1))
trial_type = np.reshape(trial_type,(len(trial_type),1))
num_bubbles = np.reshape(num_bubbles,(len(num_bubbles),1))
disp_time = np.reshape(disp_time,(len(disp_time),1))
trial_mat = np.append(trials,trial_type,axis=1)
trial_mat = np.append(trial_mat,num_bubbles,axis=1)
trial_mat = np.append(trial_mat,disp_time,axis=1)
np.save(path_to_fixdur_code+'/data/'+str(subject)+'/rand_'+str(subject),trial_mat)
return trial_mat
#get center bubble location relative to center of screen
def get_bubble_pos(bubble):
x,y = int(bubble.split('_',1)[1].split('_')[0]),int(bubble.split('_',1)[1].split('_')[1].split('.')[0])
x = x + 320 + 77 - 960
y = y + 60 + 77 - 540
return x,y
''' copied from Simons tool file'''
def slideshow(surf, ims):
ims = [visual.SimpleImageStim(surf,im.tostring()) for im in ims]
i = 0
while True:
ims[i].draw()
surf.flip()
#key = wait_for_key(keylist = [276, 275])
key = event.waitKeys(keyList=['left', 'right'])
if key == ['right']:
if (i == len(ims)-1):
return
else:
i += 1
elif (key == ['left'] and i>0):
i -= 1
def wait_for_key(keylist = None):
while 1:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if keylist is not None:
if event.key in keylist:
return event
else:
return event
pygame.time.delay(50)
'''display memory task, return displayed bubbles and decision'''
def memory_task(all_bubbles,loaded_bubbles,bubble_image,memory_image,surf):
correct = 'No valid answer yet'
#positions for bubble display
positions = [(-150,0),(150,0)]
pos_same = random.choice(positions)
positions.remove(pos_same)
pos_other = positions[0]
#load bubble from shown image
all_bubbles_same = os.listdir(path_to_fixdur_files+'stimuli/single_bubble_images/'+bubble_image)
same_mem = random.choice(all_bubbles_same)
same_mem_loc = [int(same_mem.split('_',1)[1].split('_')[0]),int(same_mem.split('_',1)[1].split('_')[1].split('.')[0])]
same_mem_loaded = visual.SimpleImageStim(surf, image = path_to_fixdur_files+'stimuli/single_bubble_images/'+bubble_image+'/'+bubble, pos=pos_same)
#bubble from other image
bubble_mat = np.load(path_to_fixdur_code+'all_bubble_mat.npy')
other_mem_loc = random.choice(bubble_mat)
#make sure it"s from another image
while other_mem_loc[0] == bubble_image:
other_mem_loc = random.choice(bubble_mat)
#load bubble
other_mem_loaded = visual.SimpleImageStim(surf,path_to_fixdur_files+'stimuli/single_bubble_images/'+other_mem_loc[0]+'/'+other_mem_loc[1], pos=pos_other)
memory_image.draw(surf)
same_mem_loaded.draw(surf)
other_mem_loaded.draw(surf)
surf.flip()
key = event.waitKeys(keyList=['left','right'])
#if left bubble is correct and left bubble was choosen
if (((pos_same == (-150,0)) and (key == ['left'])) or \
#if right bubble is correct and right bubble was choosen
((pos_same == (150,0)) and (key == ['right']))):
correct = True
else:
correct = False
if pos_same == (-150,0):
left_bubble = same_mem_loc
right_bubble = other_mem_loc
if pos_same == (150,0):
left_bubble = other_mem_loc
right_bubble = same_mem_loc
return [correct,left_bubble,right_bubble]
def wait_for_saccade(el):
start = pylink.currentTime()
bufferx, buffery = deque(maxlen=3), deque(maxlen=3)
saccade = False
lastFixTime = -50
lastSaccadeOnset = -20
fixbufferx = []
fixbuffery = []
while (pylink.currentTime() - start) < TRIAL_LENGTH:
# Anfragen, welcher Typ von daten gerade in der pipe wartet.
i = el.getNextData()
# wenn es etwas anderes ist als RAW_DATA(=200), naechster schleifendurchlauf
if i!=200: continue
lastSampleTime = pylink.currentTime()
# actuelle position direkt vom eye-tracker
x, y = el.getNewestSample().getLeftEye().getGaze()
if pylink.currentTime()-lastSampleTime > 15: #falls zu lange keine neuen sample-points, beginn von vorne mit neuen decks
bufferx, buffery = deque(maxlen=3), deque(maxlen=3)
bufferx.append(x)
buffery.append(y)
continue
bufferx.append(x)
buffery.append(y)
if len(fixbufferx)<1:
fixbufferx.append(x)
fixbuffery.append(y)
el.trialmetadata("FIXATION", 0.0) # Take first sample as first fix.
el.trialmetadata("FIXCOOX", x) # markiere fixation als tag in eye-tracking data
el.trialmetadata("FIXCOOY", y)
# Compute velocity in degrees per second
v = np.mean(((np.diff(np.array(bufferx))**2+np.diff(np.array(buffery))**2)**.5) * TRACKING_FREQ)/float(PPD)
## Saccade onset
if v > 70 and not saccade and (pylink.currentTime() - lastFixTime) > 50:
lastSaccadeOnset = pylink.currentTime()
saccade = True
el.trialmetadata("SACCADE", v)
## Saccade offset
if v < 30 and saccade and (pylink.currentTime() - lastSaccadeOnset) > 20:
saccade = False
lastFixTime = pylink.currentTime()
el.trialmetadata("FIXATION", v)
# Calculate the angle of the current saccade
el.trialmetadata("FIXCOOX", x)
el.trialmetadata("FIXCOOY", y)
fixbufferx.append(x)
fixbuffery.append(y)
return fixbufferx,fixbuffery
return [-1,-1],[-1,-1]
'''return displayed bubble closest to fixation'''
def get_fixated_bubble(used_bubble,fix_x,fix_y):
distances = []
for bubble in used_bubble:
#add 77 to get center of bubble, add 320/60 for higher monitor resolution
distances.append(sqrt((((bubble[0]+(MAT/2)+320)-fix_x[1])**2) + (((bubble[1]+(MAT/2)+60)-fix_y[1])**2)))
index_chosen = distances.index(min(distances))
return used_bubble[index_chosen] #
'''return bubble when fixation on bubble and velocity of saccade<30'''
def wait_for_fix(el,used_bubble):
#print "-----"
bufferx, buffery = deque(maxlen=3), deque(maxlen=3)
start = pylink.currentTime()
while (pylink.currentTime() - start) < TRIAL_LENGTH:
i = el.getNextData()
# wenn es etwas anderes ist als RAW_DATA(=200), naechster schleifendurchlauf
if i!=200: continue
lastSampleTime = pylink.currentTime()
# actuelle position direkt vom eye-tracker
x, y = el.getNewestSample().getLeftEye().getGaze()
bufferx.append(x)
buffery.append(y)
# Compute velocity in degrees per second
v = np.mean(((np.diff(np.array(bufferx))**2+np.diff(np.array(buffery))**2)**.5) * TRACKING_FREQ)/float(PPD)
if v<30:
for bubble in used_bubble:
#add 77 to get center of bubble, add 320/60 for higher monitor resolution
# if ((sqrt((((bubble[0]+(MAT/2)+320)-x)**2) + (((bubble[1]+(MAT/2)+60)-y)**2))) < 77):
# print "Bubble Detected, current speed: %f - %f"%(v,lastSampleTime)
if ((sqrt((((bubble[0]+(MAT/2)+320)-x)**2) + (((bubble[1]+(MAT/2)+60)-y)**2))) < 77):
return bubble
return random.choice(used_bubble) #if no fixation on bubble during trial_length
'''
predict saccade end point
return bubble if in distance of diameter(MAT) of bubble center
'''
def sacc_detection(el,used_bubble):
#buffer for x coordiante, y coordinate, velocity
bufferx, buffery, bufferv = deque(maxlen=3), deque(maxlen=3), deque(maxlen=4)
start = pylink.currentTime()
saccade = 0
#start_time = []
while (pylink.currentTime() - start) < TRIAL_LENGTH:
i = el.getNextData()
# wenn es etwas anderes ist als RAW_DATA(=200), naechster schleifendurchlauf
if i!=200: continue
# actuelle position direkt vom eye-tracker
x, y = el.getNewestSample().getLeftEye().getGaze()
bufferx.append(float(x))
buffery.append(float(y))
# Compute velocity in degrees per second
bufferv.append(np.mean(((np.diff(np.array(bufferx))**2+np.diff(np.array(buffery))**2)**.5) * TRACKING_FREQ)/float(PPD))
#saccade_onset
if bufferv[-1] < 30:
saccade = 0
#check if sample already in next bubble
for bubble in used_bubble:
if ((sqrt((((bubble[0]+(MAT/2)+320)-x)**2) + (((bubble[1]+(MAT/2)+60)-y)**2))) < MAT/2):
el.trialmetadata('start_x', bufferx[-1])
el.trialmetadata('start_y', buffery[-1])
el.trialmetadata('start_velocity', bufferv[-1])
el.trialmetadata('end_x', bufferx[-1])
el.trialmetadata('end_y', buffery[-1])
el.trialmetadata('end_velocity', bufferv[-1])
el.trialmetadata('sacc_detection', 'start_in_bubble')
return bubble
if saccade == 0 and bufferv[-1]>70:
start_x = float(bufferx[-1])
start_y = float(buffery[-1])
#start_time = pylink.currentTime()
saccade = 1
el.trialmetadata('start_x', start_x)
el.trialmetadata('start_y', start_y)
el.trialmetadata('start_velocity', bufferv[-1])
#continue
'''
#saccade end
if start_time and np.all(np.diff(bufferv)<0):
#if abs((start_x - bufferx[-1])) < 0.00000001:
# alpha = 3.1415926535897931 / 2 # pi/2 = 90deg
#else:
alpha = atan2((buffery[-1]-start_y),(bufferx[-1]-start_x))
predLength = exp((log(bufferv[0]) - 4.6)/.55)*PPD
predX = start_x + cos(alpha) * predLength
predY = start_y + sin(alpha) * predLength
el.trialmetadata('predX', predX)
el.trialmetadata('predY', predY)
el.trialmetadata('end_velocity', bufferv)
start_time = []
for bubble in used_bubble:
if ((sqrt((((bubble[0]+(MAT/2)+320)-predX)**2) + (((bubble[1]+(MAT/2)+60)-predY)**2))) < MAT):
print "predicted bubble found"
return bubble
'''
if bufferv[-1] < 50 and saccade:
for bubble in used_bubble:
if ((sqrt((((bubble[0]+(MAT/2)+320)-x)**2) + (((bubble[1]+(MAT/2)+60)-y)**2))) < 2*MAT/3):
el.trialmetadata('end_x', bufferx[-1])
el.trialmetadata('end_y', buffery[-1])
el.trialmetadata('end_velocity', bufferv[-1])
el.trialmetadata('sacc_detection', 'pred_in_bubble')
return bubble
#check if sample near bubble (in distance of 2 * radius MAT/2)
#print "random bubble returned"
el.trialmetadata('sacc_detection', 'random')
return random.choice(used_bubble) #if no prediction on bubble during trial_length
path_to_fixdur_files, path_to_fixdur_code = paths()
def debug_time(dispstr,start):
print "%s : %.2f"%(dispstr,pygame.time.get_ticks()-start)
| [
"behinger@uos.de"
] | behinger@uos.de |
6c68f9b39b16ce655d14903518141908ac505150 | 9be2ec73d639d3a481dcec50a0496e529de3b105 | /venv/Scripts/day7/lists.py | 0c2208c3e9f8b29e5973e67ac610511d21e1c354 | [] | no_license | huyangh/Learn-python-100-days | 3fd0a107da4d10db0f39db30c64ab610dcd8b123 | 923d5bbcaa28bafdb6b8d0a5d413c1ac17dba3f5 | refs/heads/master | 2022-11-06T04:56:30.809629 | 2020-06-20T07:24:59 | 2020-06-20T07:24:59 | 271,553,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,479 | py | """
Time : 2020/6/11 15:47
Author : huyangh
演示列表的使用
"""
import sys
def list_basic():
"""
演示列表基本操作
"""
list1 = [1, 3, 5, 7, 100]
print(list1) # [1, 3, 5, 7, 100]
# 乘号表示列表元素的重复
list2 = ['hello'] * 3
print(list2) # ['hello', 'hello', 'hello']
# 计算列表长度(元素个数)
print(len(list1)) # 5
# 下标(索引)运算
print(list1[0]) # 1
print(list1[4]) # 100
# print(list1[5]) # IndexError: list index out of range
print(list1[-1]) # 100
print(list1[-3]) # 5
list1[2] = 300
print(list1) # [1, 3, 300, 7, 100]
# 通过循环用下标遍历列表元素
for index in range(len(list1)):
print(list1[index])
# 通过for循环遍历列表元素
for elem in list1:
print(elem)
# 通过enumerate函数处理列表之后再遍历可以同时获得元素索引和值
for index, elem in enumerate(list1):
print(index, elem)
print()
def list_methods():
"""
演示添加和移除元素
"""
list1 = [1, 3, 5, 7, 100]
# 添加元素
list1.append(200)
list1.insert(1, 400)
# 合并两个列表
# list1.extend([1000, 2000])
list1 += [1000, 2000]
print(list1) # [1, 400, 3, 5, 7, 100, 200, 1000, 2000]
print(len(list1)) # 9
# 先通过成员运算判断元素是否在列表中,如果存在就删除该元素
if 3 in list1:
list1.remove(3)
if 1234 in list1:
list1.remove(1234)
print(list1) # [1, 400, 5, 7, 100, 200, 1000, 2000]
# 从指定的位置删除元素
list1.pop(0)
list1.pop(len(list1) - 1)
print(list1) # [400, 5, 7, 100, 200, 1000]
# 清空列表元素
list1.clear()
print(list1) # []
def list_slice():
"""
演示列表切片操作
"""
fruits = ['grape', 'apple', 'strawberry', 'waxberry']
fruits += ['pitaya', 'pear', 'mango']
# 列表切片
fruits2 = fruits[1:4]
print(fruits2) # apple strawberry waxberry
# 可以通过完整切片操作来复制列表
fruits3 = fruits[:]
print(fruits3) # ['grape', 'apple', 'strawberry', 'waxberry', 'pitaya', 'pear', 'mango']
fruits4 = fruits[-3:-1]
print(fruits4) # ['pitaya', 'pear']
# 可以通过反向切片操作来获得倒转后的列表的拷贝
fruits5 = fruits[::-1]
print(fruits5) # ['mango', 'pear', 'pitaya', 'waxberry', 'strawberry', 'apple', 'grape']
def list_sort():
"""
演示列表排序操作
"""
list1 = ['orange', 'apple', 'zoo', 'internationalization', 'blueberry']
list2 = sorted(list1)
# sorted函数返回列表排序后的拷贝不会修改传入的列表
# 函数的设计就应该像sorted函数一样尽可能不产生副作用
list3 = sorted(list1, reverse=True)
# 通过key关键字参数指定根据字符串长度进行排序而不是默认的字母表顺序
list4 = sorted(list1, key=len)
print(list1)
print(list2)
print(list3)
print(list4)
# 给列表对象发出排序消息直接在列表对象上进行排序
list1.sort(reverse=True)
print(list1)
def list_generator():
"""
演示列表生成式和生成器
"""
f = [x for x in range(1, 10)]
print(f)
f = [x + y for x in 'ABCDE' for y in '1234567']
print(f)
# 用列表的生成表达式语法创建列表容器
# 用这种语法创建列表之后元素已经准备就绪所以需要耗费较多的内存空间
f = [x ** 2 for x in range(1, 1000)]
print(sys.getsizeof(f)) # 查看对象占用内存的字节数
print(f)
# 请注意下面的代码创建的不是一个列表而是一个生成器对象
# 通过生成器可以获取到数据但它不占用额外的空间存储数据
# 每次需要数据的时候就通过内部的运算得到数据(需要花费额外的时间)
f = (x ** 2 for x in range(1, 1000))
print(sys.getsizeof(f)) # 相比生成式生成器不占用存储数据的空间
print(f)
for val in f:
print(val, end=' ')
def fib(n):
"""
使用yield关键字实现生成器函数
:param n:数列项数
"""
a, b = 0, 1
for _ in range(n):
a, b = b, a + b
yield a
def main():
# list_basic()
# list_methods()
# list_slice()
# list_sort()
# list_generator()
for val in fib(20):
print(val)
if __name__ == '__main__':
main()
| [
"huyangh@sina.com"
] | huyangh@sina.com |
fa8bc5d8dc16ab1a75229528b86ee19088cb0fbb | 675a3767def4356c44c01e75297cca352667a887 | /hw3/load.py | 599fd56336baaa89cdf0dec937dad5f34e7b771e | [] | no_license | XuKaze/NYCdatascience | c63d3f386abba50e4c7b78e95ea9120172ed2b2b | 8d20b19a42746d7c8b00eb350c35d239fa81b5ca | refs/heads/main | 2023-02-28T07:45:52.061059 | 2021-01-31T11:01:36 | 2021-01-31T11:01:36 | 320,242,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | import os
import numpy as np
import pickle
import random
'''
Note: This code is just a hint for people who are not familiar with text processing in python. There is no obligation to use this code, though you may if you like.
'''
def folder_list(path,label):
'''
PARAMETER PATH IS THE PATH OF YOUR LOCAL FOLDER
'''
filelist = os.listdir(path)
review = []
for infile in filelist:
file = os.path.join(path,infile)
r = read_data(file)
r.append(label)
review.append(r)
return review
def read_data(file):
'''
Read each file into a list of strings.
Example:
["it's", 'a', 'curious', 'thing', "i've", 'found', 'that', 'when', 'willis', 'is', 'not', 'called', 'on',
...'to', 'carry', 'the', 'whole', 'movie', "he's", 'much', 'better', 'and', 'so', 'is', 'the', 'movie']
'''
f = open(file)
lines = f.read().split(' ')
symbols = '${}()[].,:;+-*/&|<>=~" '
words = map(lambda Element: Element.translate(str.maketrans("", "", symbols)).strip(), lines)
# For python 3 users: use the following instead
# words = list(filter(None, words))
words = filter(None, words)
return words
###############################################
######## YOUR CODE STARTS FROM HERE. ##########
###############################################
def shuffle_data():
'''
pos_path is where you save positive review data.
neg_path is where you save negative review data.
'''
pos_path = "data/pos"
neg_path = "data/neg"
pos_review = folder_list(pos_path,1)
neg_review = folder_list(neg_path,-1)
review = pos_review + neg_review
random.shuffle(review)
'''
Now you have read all the files into list 'review' and it has been shuffled.
Save your shuffled result by pickle.
*Pickle is a useful module to serialize a python object structure.
*Check it out. https://wiki.python.org/moin/UsingPickle
'''
| [
"46765635+XuKaze@users.noreply.github.com"
] | 46765635+XuKaze@users.noreply.github.com |
1525fa01ca88e86a1491f6968ca7daf25bda962c | c086a38a366b0724d7339ae94d6bfb489413d2f4 | /PythonEnv/Lib/site-packages/win32com/server/exception.py | f84cccdf5e349025e91ae2f9bdf4e87a0bb9e8d9 | [] | no_license | FlowkoHinti/Dionysos | 2dc06651a4fc9b4c8c90d264b2f820f34d736650 | d9f8fbf3bb0713527dc33383a7f3e135b2041638 | refs/heads/master | 2021-03-02T01:14:18.622703 | 2020-06-09T08:28:44 | 2020-06-09T08:28:44 | 245,826,041 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,452 | py | """Exception Handling
Exceptions
To better support COM exceptions, the framework allows for an instance to be
raised. This instance may have a certain number of known attributes, which are
translated into COM exception details.
This means, for example, that Python could raise a COM exception that includes details
on a Help file and location, and a description for the user.
This module provides a class which provides the necessary attributes.
"""
import sys, pythoncom
# Note that we derive from com_error, which derives from exceptions.Exception
# Also note that we dont support "self.args", as we dont support tuple-unpacking
class COMException(pythoncom.com_error):
"""An Exception object that is understood by the framework.
If the framework is presented with an exception of type class,
it looks for certain known attributes on this class to provide rich
error information to the caller.
It should be noted that the framework supports providing this error
information via COM Exceptions, or via the ISupportErrorInfo interface.
By using this class, you automatically provide rich error information to the
server.
"""
def __init__(self, description=None, scode=None,
source=None, helpfile=None, helpContext=None,
desc=None, hresult=None):
"""Initialize an exception
**Params**
description -- A string description for the exception.
scode -- An integer scode to be returned to the server, if necessary.
The pythoncom framework defaults this to be DISP_E_EXCEPTION if not specified otherwise.
source -- A string which identifies the source of the error.
helpfile -- A string which points to a help file which contains details on the error.
helpContext -- An integer context in the help file.
desc -- A short-cut for description.
hresult -- A short-cut for scode.
"""
# convert a WIN32 error into an HRESULT
scode = scode or hresult
if scode and scode != 1: # We dont want S_FALSE mapped!
if scode >= -32768 and scode < 32768:
# this is HRESULT_FROM_WIN32()
scode = -2147024896 | (scode & 0x0000FFFF)
self.scode = scode
self.description = description or desc
if scode == 1 and not self.description:
self.description = "S_FALSE"
elif scode and not self.description:
self.description = pythoncom.GetScodeString(scode)
self.source = source
self.helpfile = helpfile
self.helpcontext = helpContext
# todo - fill in the exception value
pythoncom.com_error.__init__(self, scode, self.description, None, -1)
def __repr__(self):
return "<COM Exception - scode=%s, desc=%s>" % (self.scode, self.description)
# Old name for the COMException class.
# Do NOT use the name Exception, as it is now a built-in
# COMException is the new, official name.
Exception = COMException
def IsCOMException(t=None):
if t is None:
t = sys.exc_info()[0]
try:
return issubclass(t, pythoncom.com_error)
except TypeError: # 1.5 in -X mode?
return t is pythoncon.com_error
def IsCOMServerException(t=None):
if t is None:
t = sys.exc_info()[0]
try:
return issubclass(t, COMException)
except TypeError: # String exception
return 0
| [
"="
] | = |
a92c0b47b1e9b1abfaeb5b067f6adb15500dcacc | 615185b5584e3031e3b78498104e52adeb0c49d6 | /example/mpii_kd.py | 683121b3221891c75e3e53c66adec26e5ef1595c | [] | no_license | Alixing/Fast_Human_Pose_Estimation_Pytorch | 55548c195a36f37238829c2a5f13824bbbaf8d15 | 2fcec179084d47b585ba0a1e2b46d36a9f1576fa | refs/heads/master | 2020-04-18T17:46:38.492222 | 2019-01-25T11:32:50 | 2019-01-25T11:32:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,672 | py | from __future__ import print_function, absolute_import
import os
import argparse
import time
import matplotlib.pyplot as plt
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torchvision.datasets as datasets
from pose import Bar
from pose.utils.logger import Logger, savefig
from pose.utils.evaluation import accuracy, AverageMeter, final_preds
from pose.utils.misc import save_checkpoint, save_pred, adjust_learning_rate
from pose.utils.osutils import mkdir_p, isfile, isdir, join
from pose.utils.imutils import batch_with_heatmap
from pose.utils.transforms import fliplr, flip_back
import pose.models as models
import pose.datasets as datasets
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
idx = [1,2,3,4,5,6,11,12,15,16]
best_acc = 0
def load_teacher_network(arch, stacks, blocks, t_checkpoint):
tmodel = models.__dict__[arch](num_stacks=stacks, num_blocks=blocks, num_classes=16, mobile=False)
tmodel = torch.nn.DataParallel(tmodel).cuda()
checkpoint = torch.load(t_checkpoint)
tmodel.load_state_dict(checkpoint['state_dict'])
tmodel.eval()
return tmodel
def main(args):
global best_acc
# create checkpoint dir
if not isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# load teacher network
print("==> creating teacher model '{}', stacks={}, blocks={}".format(args.arch, args.teacher_stack, args.blocks))
tmodel = load_teacher_network(args.arch, args.teacher_stack, args.blocks, args.teacher_checkpoint)
# create model
print("==> creating model '{}', stacks={}, blocks={}".format(args.arch, args.stacks, args.blocks))
model = models.__dict__[args.arch](num_stacks=args.stacks, num_blocks=args.blocks, num_classes=args.num_classes, mobile=args.mobile)
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = torch.nn.MSELoss(size_average=True).cuda()
optimizer = torch.optim.RMSprop(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
title = 'mpii-' + args.arch
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
logger = Logger(join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
# Data loading code
train_loader = torch.utils.data.DataLoader(
datasets.Mpii('data/mpii/mpii_annotations.json', 'data/mpii/images',
sigma=args.sigma, label_type=args.label_type,
inp_res=args.in_res, out_res=args.in_res//4),
batch_size=args.train_batch, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.Mpii('data/mpii/mpii_annotations.json', 'data/mpii/images',
sigma=args.sigma, label_type=args.label_type, train=False,
inp_res=args.in_res, out_res=args.in_res // 4),
batch_size=args.test_batch, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
print('\nEvaluation only')
loss, acc, predictions = validate(val_loader, model, criterion, args.num_classes, args.in_res//4, args.debug, args.flip)
save_pred(predictions, checkpoint=args.checkpoint)
return
lr = args.lr
for epoch in range(args.start_epoch, args.epochs):
lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule, args.gamma)
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))
# decay sigma
if args.sigma_decay > 0:
train_loader.dataset.sigma *= args.sigma_decay
val_loader.dataset.sigma *= args.sigma_decay
# train for one epoch
train_loss, train_acc = train(train_loader, model, tmodel, criterion, optimizer, args.kdloss_alpha, args.debug, args.flip)
# evaluate on validation set
valid_loss, valid_acc, predictions = validate(val_loader, model, criterion, args.num_classes,
args.in_res//4, args.debug, args.flip)
# append logger file
logger.append([epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])
# remember best acc and save checkpoint
is_best = valid_acc > best_acc
best_acc = max(valid_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, predictions, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot(['Train Acc', 'Val Acc'])
savefig(os.path.join(args.checkpoint, 'log.eps'))
def train(train_loader, model, tmodel, criterion, optimizer, kdloss_alpha, debug=False, flip=True):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
tslosses = AverageMeter()
gtlosses = AverageMeter()
acces = AverageMeter()
# switch to train mode
model.train()
end = time.time()
gt_win, pred_win = None, None
bar = Bar('Processing', max=len(train_loader))
for i, (inputs, target, meta) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input_var = torch.autograd.Variable(inputs.cuda())
target_var = torch.autograd.Variable(target.cuda(async=True))
# compute output
output = model(input_var)
score_map = output[-1].data.cpu()
# compute teacher network output
toutput = tmodel(input_var)
toutput = toutput[-1].detach()
# lmse : student vs ground truth
gtloss = criterion(output[0], target_var)
for j in range(1, len(output)):
gtloss += criterion(output[j], target_var)
# loss from teacher, student vs teacher
tsloss = criterion(output[0], toutput)
for j in range(1, len(output)):
tsloss += criterion(output[j], toutput)
total_loss = kdloss_alpha * tsloss + (1 - kdloss_alpha)*gtloss
acc = accuracy(score_map, target, idx)
if debug: # visualize groundtruth and predictions
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, score_map)
teacher_batch_img = batch_with_heatmap(inputs, toutput)
if not gt_win or not pred_win or not pred_teacher:
ax1 = plt.subplot(131)
ax1.title.set_text('Groundtruth')
gt_win = plt.imshow(gt_batch_img)
ax2 = plt.subplot(132)
ax2.title.set_text('Prediction')
pred_win = plt.imshow(pred_batch_img)
ax2 = plt.subplot(133)
ax2.title.set_text('teacher')
pred_teacher = plt.imshow(teacher_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
pred_teacher.set_data(teacher_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
gtlosses.update(gtloss.item(), inputs.size(0))
tslosses.update(tsloss.item(), inputs.size(0))
losses.update(total_loss.item(), inputs.size(0))
acces.update(acc[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} ' \
'| Loss: {loss:.4f} | TsLoss:{tsloss:.4f}| GtLoss:{gtloss:.4f} | Acc: {acc: .4f}'.format(
batch=i + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
tsloss=tslosses.avg,
gtloss=gtlosses.avg,
acc=acces.avg
)
bar.next()
bar.finish()
return losses.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, out_res, debug=False, flip=True):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
# predictions
predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)
# switch to evaluate mode
model.eval()
gt_win, pred_win = None, None
end = time.time()
bar = Bar('Processing', max=len(val_loader))
for i, (inputs, target, meta) in enumerate(val_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
score_map = output[-1].data.cpu()
if flip:
flip_input_var = torch.autograd.Variable(
torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(),
volatile=True
)
flip_output_var = model(flip_input_var)
flip_output = flip_back(flip_output_var[-1].data.cpu())
score_map += flip_output
loss = 0
for o in output:
loss += criterion(o, target_var)
acc = accuracy(score_map, target.cpu(), idx)
# generate predictions
preds = final_preds(score_map, meta['center'], meta['scale'], [out_res, out_res])
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
if debug:
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, score_map)
if not gt_win or not pred_win:
plt.subplot(121)
gt_win = plt.imshow(gt_batch_img)
plt.subplot(122)
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.item(), inputs.size(0))
acces.update(acc[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
batch=i + 1,
size=len(val_loader),
data=data_time.val,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=acces.avg
)
bar.next()
bar.finish()
return losses.avg, acces.avg, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Model structure
parser.add_argument('--arch', '-a', metavar='ARCH', default='hg',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-s', '--stacks', default=8, type=int, metavar='N',
help='Number of hourglasses to stack')
parser.add_argument('--features', default=256, type=int, metavar='N',
help='Number of features in the hourglass')
parser.add_argument('-b', '--blocks', default=1, type=int, metavar='N',
help='Number of residual modules at each location in the hourglass')
parser.add_argument('--num-classes', default=16, type=int, metavar='N',
help='Number of keypoints')
parser.add_argument('--mobile', default=False, type=bool, metavar='N',
help='use depthwise convolution in bottneck-block')
# Training strategy
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=6, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=6, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=2.5e-4, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0, type=float,
metavar='W', help='weight decay (default: 0)')
parser.add_argument('--schedule', type=int, nargs='+', default=[60, 90],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1,
help='LR is multiplied by gamma on schedule.')
# Data processing
parser.add_argument('-f', '--flip', dest='flip', action='store_true',
help='flip the input during validation')
parser.add_argument('--sigma', type=float, default=1,
help='Groundtruth Gaussian sigma.')
parser.add_argument('--sigma-decay', type=float, default=0,
help='Sigma decay rate for each epoch.')
parser.add_argument('--label-type', metavar='LABELTYPE', default='Gaussian',
choices=['Gaussian', 'Cauchy'],
help='Labelmap dist type: (default=Gaussian)')
parser.add_argument('--in_res', default=256, type=int,
choices=[256, 192],
help='input resolution for network')
parser.add_argument('--teacher_checkpoint', required=True, type=str,
help='teacher network')
parser.add_argument('--teacher_stack', default=8, type=int,
help='teacher network stack')
parser.add_argument('--kdloss_alpha', default=0.5, type=float,
help='weight of kdloss')
# Miscs
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='show intermediate results')
main(parser.parse_args()) | [
"yuanyuan.li85@gmail.com"
] | yuanyuan.li85@gmail.com |
ca07900e223be5a28c11b92692e858889ed02102 | af26b01d2f308ece3bcd7124d65f854fc0d29a76 | /const.py | 00ff1e5880f26786b6771204117107738557043f | [] | no_license | Xm798/kotlin2docset | ec0f01078aadb9e5b5f57cfe11afa86ad7c8ca51 | c40e4a8d4b093daeae3e7ca1f63cc948ad653f85 | refs/heads/master | 2023-07-19T05:19:15.988644 | 2021-05-05T21:22:43 | 2021-05-05T21:22:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | WEB_DOCS_URL = 'https://kotlinlang.org/api/latest/jvm/stdlib/index.html'
DATABASE_PATH = "kotlin.docset/Contents/Resources/docSet.dsidx"
STATIC_ASSET_ICON_PATH = "./static/icon.ico"
STATIC_ASSET_PLIST_PATH = "./static/Info.plist"
DOCSET_DOCUMENT_PATH = "kotlin.docset/Contents/Resources/Documents/"
DOCSET_PLIST_PATH = "kotlin.docset/Contents/"
DOCSET_ICON_PATH = "kotlin.docset/"
| [
"igorkurek96@gmail.com"
] | igorkurek96@gmail.com |
2f664839edc5b9cc1165a9ba2088d827fb8ab9da | 316632e975cd61d8f7868bba5e28deb95d242944 | /lib/movie.py | 07a7aaa42110af8518d853cf4f4591692fd1f032 | [] | no_license | HumasLin/RingDNA_coding_challenge | 62fefb581da67d8bfba820c14935af921759880e | 5e7a56609f76f1c9e8429f207f59e169568bb482 | refs/heads/main | 2023-05-09T23:11:15.261290 | 2021-05-21T21:25:37 | 2021-05-21T21:25:37 | 369,649,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from lib.utils import *
""" Define the class of movie """
class Movie:
def __init__(self, title: str, price_code: int):
self.title = title
""" avoid invalid price code """
if price_code not in logic["amount"]:
raise ValueError("Movie type doesn't exist!")
self.price_code = price_code
def set_price_code(price_code: int):
self.price_code = price_code | [
"humaslin97@hotmail.com"
] | humaslin97@hotmail.com |
fbd25b29bc32a6570a96d03a9301b261600cabff | f92ae857495064956c039d61f006cde8763dbcfe | /api/views.py | 5ed6c8c869b166d250ca914ceddc6ad70d00a2f3 | [] | no_license | jona-young/django_react | bcfda24a21c9fa1493ee72ab5012092782d4e69e | d43d45930938ccedcd6bc482aed6f2346b0df8d5 | refs/heads/master | 2022-11-26T06:42:29.903611 | 2020-08-11T05:02:08 | 2020-08-11T05:02:08 | 286,154,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Task
from .serializers import TaskSerializer
# Create your views here.
@api_view(['GET'])
def api_overview(request):
api_urls = {
'List': '/task-list/',
'Detail View': '/task-detail/<str:pk>/',
'Create': '/task-create/',
'Update': '/task-update/<str:pk>/',
'Delete': '/task-delete/<str:pk>/'
}
return Response(api_urls)
@api_view(['GET'])
def task_list(request):
task = Task.objects.all()
serializer = TaskSerializer(task, many=True)
return Response(serializer.data)
@api_view(['GET'])
def task_detail(request, pk):
task = Task.objects.get(id=pk)
serializer = TaskSerializer(task, many=False)
return Response(serializer.data)
@api_view(['POST'])
def task_create(request):
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['POST'])
def task_update(request, pk):
task = Task.objects.get(id=pk)
serializer = TaskSerializer(instance=task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def task_delete(request, pk):
task = Task.objects.get(id=pk)
task.delete()
return Response('Item Successfully deleted!')
| [
"young.jon@icloud.com"
] | young.jon@icloud.com |
f1de4f284f6ae6dcbf0e216dae4bd4020b7fe948 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Mastering-Machine-Learning-scikit-learn/NumPy-Cookbook/NumPy Cookbook 2nd Edition_CodeBundle/Final Code/0945OS_05_Final Code/ch5code/sobel.py | 7a60c93500bba9e0a6d9825f564f9b66bfa7ba43 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 623 | py | import scipy
import scipy.ndimage
import matplotlib.pyplot as plt
lena = scipy.misc.lena()
plt.subplot(221)
plt.imshow(lena)
plt.title('Original')
plt.axis('off')
# Sobel X filter
sobelx = scipy.ndimage.sobel(lena, axis=0, mode='constant')
plt.subplot(222)
plt.imshow(sobelx)
plt.title('Sobel X')
plt.axis('off')
# Sobel Y filter
sobely = scipy.ndimage.sobel(lena, axis=1, mode='constant')
plt.subplot(223)
plt.imshow(sobely)
plt.title('Sobel Y')
plt.axis('off')
# Default Sobel filter
default = scipy.ndimage.sobel(lena)
plt.subplot(224)
plt.imshow(default)
plt.title('Default Filter')
plt.axis('off')
plt.show()
| [
"GreenJedi@protonmail.com"
] | GreenJedi@protonmail.com |
ecfe77500cfc272df2fb4419bc0d4d2ae359e5c6 | c0289248e92097f7225e641c3dbdc080b5e4a9f6 | /project/apps/courses/migrations/0001_initial.py | 2d257c104c65850bc7147f803b2eb89ef4d33342 | [] | no_license | q897586834/MXOnline | fc37f0b97806c1b7277b10ed6fd9f90d01529fd5 | 90d1c604ba87ce044e453dbb6f87179ce414f8b7 | refs/heads/master | 2020-04-06T09:02:22.822156 | 2018-11-20T06:59:47 | 2018-11-20T06:59:47 | 157,326,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,811 | py | # Generated by Django 2.1.1 on 2018-11-15 16:48
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='课程名')),
('desc', models.CharField(max_length=300, verbose_name='课程描述')),
('detail', models.TextField(max_length='课程详情')),
('degree', models.CharField(choices=[('cj', '初级'), ('zj', '中级'), ('gj', '高级')], max_length=2)),
('learn_times', models.IntegerField(default=0, verbose_name='学习时长(分钟数)')),
('students', models.IntegerField(default=0, verbose_name='学习人数')),
('fav_nums', models.IntegerField(default=0, verbose_name='收藏人数')),
('image', models.ImageField(upload_to='courses/%Y/%m', verbose_name='封面')),
('click_nums', models.IntegerField(default=0, verbose_name='课程点击数')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '课程',
'verbose_name_plural': '课程',
},
),
migrations.CreateModel(
name='CourseResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='章节名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('download', models.FileField(upload_to='courses/%Y/%m', verbose_name='封面')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='课程')),
],
options={
'verbose_name': '课程资源',
'verbose_name_plural': '课程资源',
},
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='章节名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='课程')),
],
options={
'verbose_name': '章节',
'verbose_name_plural': '章节',
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='视频名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('lesson', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Lesson', verbose_name='章节')),
],
options={
'verbose_name': '视频',
'verbose_name_plural': '视频',
},
),
]
| [
"897586834@qq.com"
] | 897586834@qq.com |
bc72caa9c796f2dc83450ced23422f91699e8492 | 9a32ef47da65f9776c8c64b95ececf716b30e11e | /tile base template/Bullet.py | 771dd038a6277fd557d1753dfb1e38abbb3caac5 | [] | no_license | Ericsb52/2021-pygame-projects | 900ca54a81a0131e11966fef520c8b85868eb595 | 2b338c8b4c2ffbfbd0a89f0a65268f37ffdf9b58 | refs/heads/main | 2023-04-19T04:36:04.046921 | 2021-05-04T13:22:02 | 2021-05-04T13:22:02 | 356,379,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # Pygame template - skeleton for a new pygame project
import pygame as pg
import random
import os
from settings import *
vec = pg.math.Vector2
class Bullet(pg.sprite.Sprite):
def __init__(self,game,pos,dir):
self.groups = game.all_sprites,game.bullet_group
pg.sprite.Sprite.__init__(self,self.groups)
self.g = game
self.image = self.g.bullet_img
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.hit_rect = self.rect
self.pos = vec(pos)
self.rect.center = self.pos
self.spread = random.uniform(-GUN_SPREAD,GUN_SPREAD)
self.vel = dir.rotate(self.spread) * BULLET_SPEED
self.spawn_time = pg.time.get_ticks()
def update(self):
self.pos += self.vel *self.g.dt
self.rect.center = self.pos
if pg.sprite.spritecollideany(self,self.g.walls_group):
self.kill()
if pg.time.get_ticks() -self.spawn_time > BULLET_LIFETIME:
self.kill()
| [
"ebroadbent@tooeleschools.org"
] | ebroadbent@tooeleschools.org |
1efa0ea37b215cf4aef1d922effc760f4f26fe1e | 76909159ed818b35f121c549a7dccf5494cf814a | /guvi/python pip/intro.py | 0cd522e6be77b333d2acb80e1b9f10dd1c4c93ec | [] | no_license | kiranbakale/guvipysamples | 74dc7760765039af01a97f633137aeea5493401e | 426cd81e79c8666a84f19101ecbe05aaa7cd8ca5 | refs/heads/master | 2023-04-10T11:48:25.253536 | 2021-04-26T08:46:15 | 2021-04-26T08:46:15 | 361,656,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | /packages
# NumPy
# Pendulum
# MoviePy
# Requests
# Tkinter
# PyQt
# Pandas
# py32
# pyTest
| [
"kiranbakale9@gmail.com"
] | kiranbakale9@gmail.com |
b60dc9b090445d8a1dd0b386f24ac12b84b75313 | 9eeb904f78da5b20f388392cd03932a9ac8b12a3 | /simplebot.py | 15dfdb5575db086d6c9923f4f852ea10d1ada0d3 | [] | no_license | jorgerpo/simplebot | 544adc8ee8310c205a26e7b6d771bbb20a350a44 | fab3b655959d7c5b9a60da7284e13eef9c5a10de | refs/heads/master | 2023-03-07T00:26:14.142833 | 2021-02-19T14:01:44 | 2021-02-19T14:01:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | """ Simple rule base chatbot with emotional monitoring """
from textblob import TextBlob
import twily_classifier as cl
import stop_words as stopwords
import json
with open('twilybot.json', 'r') as f:
array = json.load(f)
CONVERSATION = array["conversations"]
BOT_NAME = 'Twily'
STOP_WORDS = stopwords.sw_list
neg_distribution = []
def sentiment(u_input):
"""Utilitarian function: Appends 'neg_distribution'
with negative probability, returns Negative Probability"""
blob_it = cl.trainer().prob_classify(u_input)
npd = round(blob_it.prob("neg"), 2)
neg_distribution.append(npd)
return npd
def simplebot(user):
"""Rule base bot, takes an argument, user input in form of a string.
In sequence will pre-process the string. Lower case, tokenize and remove
stop words. iterates through CONVERSATION, if filtered_input intersects
response_set is updated. if the set is empty, it returns a message,
else it returns the longest string in the set"""
user_input = user
user_blob = TextBlob(user_input)
lower_input = user_blob.lower()
token_input = lower_input.words
filtered_input = [w for w in token_input if w not in STOP_WORDS]
response_set = set()
for con_list in CONVERSATION:
for sentence in con_list:
sentence_split = sentence.split()
if set(filtered_input).intersection(sentence_split):
response_set.update(con_list)
if not response_set:
return "I am sorry, I don't have an answer, ask again"
else:
return max(response_set, key=len)
def escalation(uinput):
"""Monitors user sentiment index, takes an argument,
user_input, in form of a string. If the emotional index,
set by sentiment() and taken from neg_distribution,
increases above a set threshold and it is sustained
an automatic respose/action is triggered.
simultaneously sending user_input to simplebot() for a
response"""
live_rep = f"We apologize {BOT_NAME} is unable to assist \
you, we are getting a live representative for you, \
please stay with us ..."
sentiment(uinput)
list_len = len(neg_distribution)
bot_response = simplebot(uinput)
if list_len > 3:
last_3 = neg_distribution[-3:]
if last_3[0] > .40 and last_3[0] <= last_3[1]: # <= last_3[2]:
return live_rep
else:
return bot_response
else:
return bot_response
if __name__ == '__main__':
while True:
try:
user_input = input('You: ')
print(escalation(user_input))
print(neg_distribution)
except (KeyboardInterrupt, EOFError, SystemExit):
break
| [
"bruzual.enrique@outlook.com"
] | bruzual.enrique@outlook.com |
cfb6bb053299436ff3650f8d3757f18b2d75b66f | 98ad82f686b4f3557705c9124dc77334b99d472a | /Lesson 4/Задача 6.py | 918ae8053eb1ac3f1805d47691f53770aa2a60f6 | [] | no_license | BaranovTOP1/Python | b7cc2e6b86d0432e03996f31a85c5f09da1f5840 | dcb251e36b955422ca8915ac2c72d1a38ef441dd | refs/heads/master | 2023-07-13T17:02:57.389664 | 2021-08-13T20:34:06 | 2021-08-13T20:34:06 | 367,494,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | kostya = int(input('Кубик Кости: '))
vlad = int(input('Кубик владельца: '))
suma = kostya + vlad
suma1 = kostya - vlad
if kostya >= vlad:
print(suma1)
print('Костя платит!')
print('Игра окончена.')
else:
print(suma)
print('Владелиц платит!')
print('Игра окончена.')
| [
"sanbaranoff@yandex.ru"
] | sanbaranoff@yandex.ru |
51af0996835493174bf340129078926632a85db3 | f2802e4bbf492553544f8d84ecc52716e72e7d49 | /finxiapp/models.py | bee44acf1b259736b21f0e4852dd658b09c87194 | [] | no_license | leilaapsilva/TesteFinxi | 6bc8b6ac7c7fdb22ea60ba91558293848e1e1ff9 | 7a555495ed4977d1eec05af02ef58f9a210cc116 | refs/heads/master | 2022-11-28T05:20:39.638186 | 2020-08-06T02:48:21 | 2020-08-06T02:48:21 | 285,158,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | from django.db import models
from django.contrib.auth.models import User
#from django.core.exceptions import ValidationError
#from django.utils.translation import gettext_lazy as _
MAX_LENGTH = 200 # Constante que define o tamanho máximo padrão dos campos de texto
class Administrador(models.Model):
user = models.OneToOneField(User, related_name="administrador", on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Anunciante(models.Model):
user = models.OneToOneField(User, related_name="anunciante", on_delete=models.CASCADE)
def __str__(self):
return self.user.username
#def valida_telefone(tel): #exemplo
# if value % 2 != 0:
# raise ValidationError(
# _('%(value)s is not an even number'),
# params={'value': value},
# )
# https://docs.djangoproject.com/en/3.0/ref/validators/
class DemandaDePecas(models.Model):
descricao = models.CharField(max_length=200, verbose_name="Descrição", null=True)
contato_email = models.EmailField(max_length=200, verbose_name="Email de contato", null=True) #email/telefone
contato_telefone = models.CharField(max_length=50, verbose_name="Telefone", null=True)
# anunciante = models.ForeignKey(Anunciante, on_delete=models.CASCADE, null=True)
user_anunciante = models.ForeignKey('auth.User', related_name='demanda', on_delete=models.CASCADE, null=True)
status = models.BooleanField( null=True) #aberta 0/finalizada 1
endereco_rua = models.CharField(max_length=MAX_LENGTH, verbose_name="Rua", null=True)
endereco_numero = models.CharField(max_length=10, verbose_name="Número", null=True)
endereco_complemento = models.CharField(max_length=MAX_LENGTH, verbose_name="Complemento", null=True)
endereco_bairro = models.CharField(max_length=MAX_LENGTH, verbose_name="Bairro", null=True)
endereco_cidade = models.CharField(max_length=MAX_LENGTH, verbose_name="Cidade", null=True)
endereco_estado = models.CharField(max_length=2, verbose_name="Estado", null=True) # (Sigla)
def __str__(self):
return self.descricao | [
"leilaapsilva26@gmail.com"
] | leilaapsilva26@gmail.com |
d9703a6be1c3daea019c9d1cbd396380fa794f67 | 6537df8cee33033f21afcee1cac0d8b266025a83 | /poems/models.py | c744e4902e28c49dc9c4940a5ae9b52d6d1df5d4 | [] | no_license | AceKnightWalker/Majesty-Link | 66a1caed81e875ad890137f9632ac87148fd926c | c98e272fe1a0c2f7b8ae3914e05572437627d510 | refs/heads/master | 2021-04-10T20:32:26.396350 | 2020-03-21T11:53:06 | 2020-03-21T11:53:06 | 248,963,284 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from django.utils import timezone
from django.urls import reverse
from django.utils.text import slugify
class Poem(models.Model):
title = models.CharField(max_length=200, unique=True)
slug = models.SlugField(default='', blank=True, unique=True)
author = models.CharField(max_length=200, unique=True)
thumbnail = models.ImageField(blank=True)
uploaded_date = models.DateTimeField(default=timezone.now)
text = RichTextUploadingField()
class Meta:
ordering = ['-uploaded_date']
def save(self):
self.uploaded_date = timezone.now()
self.slug = slugify(self.title)
super(Poem, self).save()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('poem:detail', kwargs={'slug': self.slug})
class Comment(models.Model):
name = models.CharField(max_length=200)
text = models.TextField()
post = models.ForeignKey(Poem, on_delete=models.CASCADE)
created_date = models.DateField(default=timezone.now)
moderation = models.BooleanField(default=True)
def __str__(self):
return self.text
| [
"lawalj99@gmail.com"
] | lawalj99@gmail.com |
2abe407839c5f726bf995257481c7bf77080119a | d24ce7c81aa297c2cd5a8383413481ce418d2aa6 | /petshop/urls.py | 1865af22d538240ed43f958aafe4d781e3c3d917 | [] | no_license | usac201314832/ProyectoDjango | 3f22a57cb6abc35abaa088754c81dc3a7ba55de4 | e5b51827d314f7f5de775677f87fd5d1b7610406 | refs/heads/main | 2023-01-07T21:52:08.537992 | 2020-11-10T05:05:54 | 2020-11-10T05:05:54 | 309,443,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | """petshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from petshop import views as general
from users import views as users
urlpatterns = [
path('', general.homepage, name = "homepage"),
path('admin/', admin.site.urls, name = "administration"),
path('login/', users.logon, name = "logon"),
path('logout/', users.logout_view, name = "logout"),
] + static(settings.MEDIA_URL, document_root =settings.MEDIA_ROOT)
| [
"2661546380101@ingenieria.usac.edu.gt"
] | 2661546380101@ingenieria.usac.edu.gt |
061a748ff2d46a97b47e03985daf0b96c6b6f048 | fec5ef5564e303e618472192c90a2705bc1cfcc9 | /checkout/apps.py | e15de7f8bd606597b1f84dcfea3118a3dce49349 | [] | no_license | marcin-kli/MS4 | 87b7cbc99eda1aa218ab94ce2865050f43a85c09 | b36c8e719192213d5698b5d30b10cd6221c1fdf7 | refs/heads/master | 2023-05-02T14:51:33.431263 | 2021-05-30T07:02:20 | 2021-05-30T07:02:20 | 345,659,870 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | from django.apps import AppConfig
class CheckoutConfig(AppConfig):
name = 'checkout'
def ready(self):
import checkout.signals # this is imported to get signals working
| [
"klimaszewski.m@gmail.com"
] | klimaszewski.m@gmail.com |
9cf95f6dafcc794e9f8a8ef7e764f0e06a0d831b | cd00ece473a09a75754613c83862d4dfaf54cd61 | /CMSSW_10_5_0/src/L5_Cor/Data/test/crabConfig_Data_2018A_JERCL5_cfg.py | 4e4cdff1713429757fa625b15a51587b2c4af4ac | [] | no_license | Sumantifr/ElectronicTopTagger_CMS | ae578c4b6911570516f55b661e14c5035e3b5947 | 76d31243d8595549655d9ca015a94629564e3f84 | refs/heads/master | 2023-01-21T05:22:57.239808 | 2021-04-21T16:08:55 | 2021-04-21T16:08:55 | 232,769,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'crab_L5JERC_SingleMuon_2018A_Autumn18_JECV19'
config.General.workArea = 'crab_L5JERC_SingleMuon_2018A_Autumn18_JECV19'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'JEC_Data_2018A_MINIAOD_cfg.py'
config.JobType.inputFiles = ['Autumn18_RunA_V19_DATA','Autumn18_V7_MC_SF_AK8PFPuppi.txt','Autumn18_V7_MC_SF_AK4PFchs.txt','Autumn18_V7_MC_PtResolution_AK4PFchs.txt','Autumn18_V7_MC_PtResolution_AK8PFPuppi.txt']
config.JobType.disableAutomaticOutputCollection = True
config.JobType.outputFiles = ['hist_jerc_l5.root','rootuple_jerc_l5.root']
config.JobType.maxJobRuntimeMin = 2700
#config.JobType.maxMemoryMB = 2480
config.JobType.allowUndistributedCMSSW = True
config.Data.inputDataset = '/SingleMuon/Run2018A-17Sep2018-v2/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 10
config.Data.lumiMask ='/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/ReReco/Cert_314472-325175_13TeV_17SeptEarlyReReco2018ABC_PromptEraD_Collisions18_JSON.txt'
#config.Data.runRange = '258214-258287' # '193093-194075'
config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = False
#config.Data.publishDataName = 'May2015_Data_analysis'
config.Site.storageSite = 'T2_IN_TIFR'
#config.Site.whitelist = ["T2_IN_TIFR"]
| [
"s7384705218@gmail.com"
] | s7384705218@gmail.com |
909311dad73eabab7545e458675d1fa25cb23fde | ada2e259d86145dbdafecccfc66cc518d099b999 | /primal_dual_interior_point_NT_scaling.py | d18387156eef101a5f7bd48d7ae0c8f9b3bb87ed | [] | no_license | goodfish94/optimization_non_commuting_variables | 6f32d329c924925f20449bf965a985b86827089a | fa7f9c4bf1ff5a716823660937de4d3c20d8e347 | refs/heads/master | 2022-12-09T04:14:56.812431 | 2020-09-04T23:16:16 | 2020-09-04T23:16:16 | 292,963,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,227 | py | from dual import dual
from primal import primal
from constraint import constraint
import numpy as np
import time
from numpy.linalg import inv
from numpy.linalg import eigvals
from numpy.linalg import cholesky
from numpy.linalg import svd
from numpy.linalg import eig
from aux_function import solve
from aux_function import vectorize, get_vec_index, transform_vec_to_matrix, generate_mat_basis, print_mat
class primal_dual_interior_point_for_nc():
' interior point solver '
def __init__(self, L, ham_vec,t, U,max_iter):
self.L = L
self.max_iter = max_iter
self.t_ = t
self.U_ = U
self.time_start = time.time()
g1_vec = np.zeros(2 * self.L, dtype=np.float32)
z1_vec = np.zeros(2 * self.L, dtype=np.float32)
g2_ph_vec = np.zeros(4 * L * L * L, dtype=np.float32)
z2_ph_vec = np.zeros(4 * L * L * L, dtype=np.float32)
g2_pair1_vec = np.zeros(L * L * L, dtype=np.float32)
z2_pair1_vec = np.zeros(L * L * L, dtype=np.float32)
g2_pair2_vec = np.zeros(L * L * L, dtype=np.float32)
z2_pair2_vec = np.zeros(L * L * L, dtype=np.float32)
self.primal_var = primal(L, g1_vec, g2_ph_vec, g2_pair1_vec, g2_pair2_vec)
self.dual_var = dual(L, z1_vec, z2_ph_vec, z2_pair1_vec, z2_pair2_vec)
self.const_var = constraint(L)
self.const_var.generate_constraint_matrix()
#self.const_var.generate_constraint_matrix_test()
self.duality_gap = np.zeros(5 * L)
self.dim_var = len(g1_vec) + len(g2_ph_vec) + len(g2_pair1_vec) + len(g2_pair2_vec)
self.dim_const = self.const_var.dim_const
self.w = np.zeros( (self.dim_var,self.dim_var), dtype=np.float32 )
self.inv_wT = np.zeros( (self.dim_var,self.dim_var), dtype=np.float32 )
self.vec_rx = np.zeros(self.dim_var, dtype=np.float32)
self.vec_ry = np.zeros(self.dim_const, dtype=np.float32)
self.vec_rz = np.zeros(self.dim_var, dtype=np.float32)
self.vec_rd = np.zeros(self.dim_var, dtype=np.float32)
self.wt_lambda_inv_I = np.zeros(self.dim_var,dtype=np.float32)
self.var_y = np.zeros(self.dim_const, dtype=np.float32)
self.del_x = np.zeros(self.dim_var, dtype=np.float32)
self.del_z = np.zeros(self.dim_var, dtype=np.float32)
self.del_y = np.zeros(self.const_var.dim_const, dtype=np.float32)
self.vec_ham = ham_vec
self.debug = False
if (self.debug):
self.eig_val_x = np.zeros(2 * self.L + self.L * self.L * 2 + self.L * self.L + self.L * self.L,
dtype=np.complex64)
self.eig_val_z = np.zeros(2 * self.L + self.L * self.L * 2 + self.L * self.L + self.L * self.L,
dtype=np.complex64)
self.if_combined_direction = True # if calculate combined direction
self.convg_criteria = 0.001
self.prev_error = 1000.0
self.truncation = 0.001
def single_block_solver(self):
"""
:param self:
:return: set up single particle duality gap, x_coeff,z_coeff , vec_rz
"""
for p in range(0, self.L):
xplus = self.primal_var.get_g1(p)
xminus = self.primal_var.get_g1(p + self.L)
zplus = self.dual_var.get_z1(p)
zminus = self.dual_var.get_z1(p + self.L)
if (self.debug):
if (xplus < 0.0 or xminus < 0.0 or zplus < 0.0 or zminus < 0.0):
print("single block")
print(xplus, xminus, zplus, zminus)
exit(2)
self.eig_val_x[p] = xplus
self.eig_val_x[p + self.L] = xminus
self.eig_val_z[p] = zplus
self.eig_val_z[p + self.L] = zminus
self.duality_gap[p] = np.real( xplus * zplus)
self.duality_gap[p + self.L] = np.real(xminus * zminus)
self.w[p,p]=np.sqrt(xplus/zplus)
self.inv_wT[p,p] = 1.0/self.w[p,p]
self.wt_lambda_inv_I[p] =1.0/zplus #* self.duality_gap[p]
self.w[p+self.L, p+self.L] =np.sqrt( xminus / zminus )
self.inv_wT[p+self.L,p+self.L] = 1.0/self.w[p+self.L, p+self.L]
self.wt_lambda_inv_I[p+self.L] = 1.0/zminus #* self.duality_gap[p+self.L]
self.vec_rz[p] += self.duality_gap[p]
self.vec_rz[p + self.L] += self.duality_gap[p + self.L]
def two_ph_block_solver(self):
"""
:param self:
:return: set up two_ph, duality gap, x_coeff,z_coeff
"""
for p in range(0, (self.L)):
x_mat = self.primal_var.generate_two_ph_primal_matrix(p)
z_mat = self.dual_var.generate_two_ph_dual_zmatrix(p)
if (self.debug):
try:
np.linalg.cholesky(x_mat)
except:
print(" two ph xmat")
exit()
try:
np.linalg.cholesky(z_mat)
except:
print(" two ph zmat")
exit()
ind_st = 2 * self.L + 2 * self.L * p
ind_en = 2 * self.L + 2 * self.L * (p + 1)
eig_tmp = eigvals(x_mat)
self.eig_val_x[ind_st:ind_en] += eig_tmp[:]
if (np.min(eig_tmp) < 0.001):
print("two ph xmat, too small eigvalue")
print(eig_tmp)
print("xmat")
print(x_mat)
exit()
eig_tmp = eigvals(z_mat)
self.eig_val_z[ind_st:ind_en] += eig_tmp[:]
if (np.min(eig_tmp) < 0.001):
print("two ph zmat, too small eigvalue")
print(eig_tmp)
print("zmat")
print(x_mat)
exit()
xz_dot_product = np.dot(x_mat, z_mat)
xz_dot_product += np.transpose(np.conj(xz_dot_product))
self.duality_gap[2 * self.L + p] += 0.5*np.real(np.trace(xz_dot_product)) / (2.0 * self.L)
[block_w,block_inv_wT] = self.cal_scaling_mat(x_mat,z_mat,2*self.L)
index_st = 2*self.L + 4*self.L*self.L * p
index_en = 2*self.L + 4*self.L*self.L * (p+1)
self.w[index_st:index_en, index_st:index_en] += block_w[:,:]
self.inv_wT[index_st:index_en, index_st:index_en] += block_inv_wT[:, :]
rz_mat = np.identity(2 * self.L, dtype=np.complex64)
vec_d_block = vectorize(rz_mat, 2 * self.L)
self.vec_rz[index_st: index_en] += self.duality_gap[2 * self.L + p] *vec_d_block[:]
vec_lambda = np.dot(block_w, self.dual_var.vec_z[index_st:index_en])
for i in range(0,2*self.L):
rz_mat[i,i] = rz_mat[i,i]/vec_lambda[i]
self.wt_lambda_inv_I[index_st: index_en] += np.dot(np.transpose(block_w),vectorize(rz_mat, 2 * self.L))
def two_pair1_block_solver(self):
"""
:param self:
:return: set up two_ph, duality gap, x_coeff,z_coeff
"""
for p in range(0, (self.L)):
x_mat = self.primal_var.generate_two_pair1_primal_matrix(p)
z_mat = self.dual_var.generate_two_pair1_dual_zmatrix(p)
if (self.debug):
try:
np.linalg.cholesky(x_mat)
except:
print(" two pair1 xmat")
exit()
try:
np.linalg.cholesky(z_mat)
except:
print(" two pair1 zmat")
exit()
ind_st = 2 * self.L + 2 * self.L * self.L + self.L * p
ind_en = 2 * self.L + 2 * self.L * self.L + self.L * (p + 1)
eig_tmp = eigvals(x_mat)
self.eig_val_x[ind_st:ind_en] += eig_tmp[:]
if (np.min(eig_tmp) < 0.00001):
print("two pair1 xmat, too small eigvalue")
print(eig_tmp)
print("xmat")
print(x_mat)
print(z_mat)
w,v=eig(x_mat)
print("eigval")
print(w)
print("eigvec")
print(v)
exit()
eig_tmp = eigvals(z_mat)
self.eig_val_z[ind_st:ind_en] += eig_tmp[:]
if (np.min(eig_tmp) < 0.001):
print("two pair2 zmat, too small eigvalue")
print(eig_tmp)
print("zmat")
print(x_mat)
exit()
xz_dot_product = np.dot(x_mat, z_mat)
xz_dot_product += np.transpose(np.conj(xz_dot_product))
self.duality_gap[2 * self.L + self.L + p] = 0.5*np.real(np.trace(xz_dot_product)) / float(self.L)
[block_w,block_inv_wT] = self.cal_scaling_mat(x_mat, z_mat, self.L)
index_st = 2 * self.L + 4 * self.L * self.L * self.L + (p)*self.L*self.L
index_en = 2 * self.L + 4 * self.L * self.L * self.L + (p+1)*self.L*self.L
self.w[index_st:index_en, index_st:index_en] += block_w[:, :]
self.inv_wT[index_st:index_en, index_st:index_en] += block_inv_wT[:, :]
rz_mat = np.identity(self.L,dtype=np.complex64)
vec_d_block = vectorize(rz_mat, self.L)
self.vec_rz[index_st:index_en] += self.duality_gap[2 * self.L + self.L + p] *vec_d_block[:]
vec_lambda = np.dot(block_w, self.dual_var.vec_z[index_st:index_en])
for i in range(0, self.L):
rz_mat[i, i] = rz_mat[i, i] / vec_lambda[i]
self.wt_lambda_inv_I[index_st: index_en] += np.dot( np.transpose(block_w),vectorize(rz_mat, self.L) )
def two_pair2_block_solver(self):
"""
:param self:
:return: set up two_ph, duality gap, x_coeff,z_coeff
"""
for p in range(0, (self.L)):
x_mat = self.primal_var.generate_two_pair2_primal_matrix(p)
z_mat = self.dual_var.generate_two_pair2_dual_zmatrix(p)
if (self.debug):
try:
np.linalg.cholesky(x_mat)
except:
print(" two pair2 xmat")
exit()
try:
np.linalg.cholesky(z_mat)
except:
print(" two pair2 zmat")
exit()
ind_st = 2 * self.L + 3 * self.L * self.L + self.L * p
ind_en = 2 * self.L + 3 * self.L * self.L + self.L * (p + 1)
eig_tmp = eigvals(x_mat)
self.eig_val_x[ind_st:ind_en] += eig_tmp[:]
if (np.min(eig_tmp) < 0.001):
print("two pair2 xmat, too small eigvalue")
print(eig_tmp)
print("xmat")
print(x_mat)
exit()
eig_tmp = eigvals(z_mat)
self.eig_val_z[ind_st:ind_en] += eig_tmp[:]
if (np.min(eig_tmp) < 0.001):
print("two pair2 zmat, too small eigvalue")
print(eig_tmp)
print("zmat")
print(x_mat)
exit()
xz_dot_product = np.dot(x_mat, z_mat)
xz_dot_product += np.transpose(np.conj(xz_dot_product))
self.duality_gap[2 * self.L + self.L + self.L + p] = 0.5*np.real(np.trace(xz_dot_product)) / float(self.L)
[block_w,block_inv_wT] = self.cal_scaling_mat(x_mat, z_mat, self.L)
index_st = 2 * self.L + 5 * self.L * self.L * self.L + p * self.L * self.L
index_en = 2 * self.L + 5 * self.L * self.L * self.L + (p+1) * self.L * self.L
self.w[index_st:index_en, index_st:index_en] += block_w[:, :]
self.inv_wT[index_st:index_en, index_st:index_en] += block_inv_wT[:, :]
rz_mat = np.identity(self.L,dtype=np.complex64)
vec_d_block = vectorize(rz_mat, self.L)
self.vec_rz[index_st:index_en] += self.duality_gap[2 * self.L + self.L + self.L + p] *vec_d_block[:]
vec_lambda = np.dot(block_w, self.dual_var.vec_z[index_st:index_en])
for i in range(0, self.L):
rz_mat[i, i] = rz_mat[i, i] / vec_lambda[i]
self.wt_lambda_inv_I[index_st: index_en] += np.dot( np.transpose(block_w),vectorize(rz_mat, self.L))
def cal_scaling_mat(self, xmat,zmat,blocksize):
block_w = np.zeros( (blocksize*blocksize,blocksize*blocksize), dtype=np.float32)
block_inv_wT = np.zeros((blocksize * blocksize, blocksize * blocksize), dtype=np.float32)
L1 = cholesky(xmat)
L2 = cholesky(zmat)
u, s, vh = svd(np.dot( np.conj(np.transpose(L2)), L1 ))
R = np.dot(L1, np.transpose(np.conj(vh)))
Rinv = np.dot( np.transpose(np.conj(u)), np.transpose(np.conj(L2)))
for i in range(0,len(s)):
Rinv[i,:] = Rinv[i,:]/np.sqrt(s[i])
Rinvdag = np.transpose(np.conj(Rinv))
for i in range(0,len(s)):
R[:,i] = R[:,i] /np.sqrt(s[i])
Rdag = np.transpose(np.conj(R))
for alpha in range(0,blocksize*blocksize):
basis_alpha = generate_mat_basis(alpha,blocksize)
for gamma in range(0,blocksize*blocksize):
basis_gamma = generate_mat_basis(gamma,blocksize)
block_w[alpha,gamma] += np.real(np.trace( np.dot(np.dot( np.dot(Rdag,basis_gamma),R ), basis_alpha) ))
block_inv_wT[alpha,gamma] += np.real(np.trace( np.dot(np.dot( np.dot(Rinv,basis_gamma),Rinvdag ), basis_alpha) ))
return [block_w,block_inv_wT]
def initialization(self):
L=self.L
mu=0.1
linear_eq_A=np.dot(self.const_var.mat_C,np.transpose(self.const_var.mat_C))
linear_eq_b = -self.const_var.vec_b - np.dot( self.const_var.mat_C, self.vec_ham)
self.var_y = solve(linear_eq_A, linear_eq_b)
temp_x = -np.dot(np.transpose(self.const_var.mat_C),self.var_y)-self.vec_ham
self.var_y = self.var_y
temp_x = temp_x
for i in range(0,2*L):
if(temp_x[i]>0.0):
self.primal_var.vec_x[i]=temp_x[i]
self.dual_var.vec_z[i] = mu/temp_x[i]
else:
self.primal_var.vec_x[i] = 1.0
self.dual_var.vec_z[i] = mu
for p in range(0,L):
index_st=2*L+p*(L*L)*4
index_en=index_st + 4*L*L
temp_mat = transform_vec_to_matrix(temp_x[index_st:index_en],2*L)
eig = eigvals(temp_mat)
eig_min = np.min(eig)
if(eig_min>self.truncation):
self.primal_var.vec_x[index_st:index_en] = temp_x[index_st:index_en].copy()
else:
temp_mat = temp_mat + (1.0+abs(eig_min))*np.identity(2*L,dtype=np.complex64)
self.primal_var.vec_x[index_st:index_en] = vectorize(temp_mat,2*L)
self.dual_var.vec_z[index_st:index_en] = vectorize(mu*inv(temp_mat),2*L)
for p in range(0, L):
index_st = 2 * L +4* L*L*L + p * (L * L)
index_en = index_st + (L * L)
temp_mat = transform_vec_to_matrix(temp_x[index_st:index_en], L)
eig = eigvals(temp_mat)
eig_min = np.min(eig)
if (eig_min > self.truncation):
self.primal_var.vec_x[index_st:index_en] = temp_x[index_st:index_en].copy()
else:
temp_mat = temp_mat + (1.0 + abs(eig_min)) * np.identity( L, dtype=np.complex64)
self.primal_var.vec_x[index_st:index_en] = vectorize(temp_mat, L)
self.dual_var.vec_z[index_st:index_en] = vectorize(mu*inv(temp_mat), L)
for p in range(0, L):
index_st = 2 * L +5* L*L*L + p * (L * L)
index_en = index_st + (L * L)
temp_mat = transform_vec_to_matrix(temp_x[index_st:index_en], L)
eig = eigvals(temp_mat)
eig_min = np.min(eig)
if (eig_min > self.truncation):
self.primal_var.vec_x[index_st:index_en] = temp_x[index_st:index_en].copy()
else:
temp_mat = temp_mat + (1.0 + abs(eig_min)) * np.identity( L, dtype=np.complex64)
self.primal_var.vec_x[index_st:index_en] = vectorize(temp_mat, L)
self.dual_var.vec_z[index_st:index_en] = vectorize(mu*inv(temp_mat), L)
rx = self.vec_ham + np.dot(np.transpose(self.const_var.mat_C), (self.var_y )) - ( self.dual_var.vec_z )
ry = -self.const_var.vec_b + np.dot(self.const_var.mat_C, (self.primal_var.vec_x ))
def if_del_solution(self):
x = self.primal_var.vec_x +self.del_x
z = self.dual_var.vec_z + self.del_z
y = self.var_y + self.del_y
rx = self.vec_ham + np.dot(np.transpose(self.const_var.mat_C), y ) - z
ry = np.dot(self.const_var.mat_C,x) - self.const_var.vec_b
print("rx,ry if_solution")
print(np.max(np.abs(rx)), np.max(np.abs(ry)))
def test_direction(self,alpha):
print("###################################")
print("rx,ry,dual previous")
rx_prev = self.vec_ham + np.dot(np.transpose(self.const_var.mat_C), (self.var_y )) - ( self.dual_var.vec_z )
ry_prev = -self.const_var.vec_b + np.dot(self.const_var.mat_C, (self.primal_var.vec_x ))
print(np.max(np.abs(rx_prev)),np.max(np.abs(ry_prev)))
rx = self.vec_ham + np.dot(np.transpose(self.const_var.mat_C), (self.var_y + alpha*self.del_y)) - ( self.dual_var.vec_z + alpha*self.del_z)
ry = -self.const_var.vec_b + np.dot(self.const_var.mat_C, (self.primal_var.vec_x + alpha*self.del_x))
print("rx,ry,alpha=%f"%alpha)
print(np.max(np.abs(rx)), np.max(np.abs(ry)))
rx = self.vec_ham + np.dot(np.transpose(self.const_var.mat_C), (self.var_y+self.del_y)) - (self.dual_var.vec_z+self.del_z)
ry = -self.const_var.vec_b + np.dot(self.const_var.mat_C, (self.primal_var.vec_x+self.del_x))
print("rx,ry,alpha=1")
print(np.max(np.abs(rx)),np.max(np.abs(ry)))
if(self.if_debug and np.max(np.abs(rx)) - np.max(np.abs(rx_prev)) > 10.0*self.convg_criteria and np.max(np.abs(rx))>self.convg_criteria):
print("rx failed")
print(np.max(np.abs(rx)) - np.max(np.abs(rx_prev) ))
self.if_del_solution()
exit()
if (self.if_debug and np.max(np.abs(ry)) - np.max(np.abs(ry_prev)) > 10.0*self.convg_criteria and np.max(np.abs(ry))>self.convg_criteria):
print("ry failed")
print(np.max(np.abs(ry)) - np.max(np.abs(ry_prev)))
self.if_del_solution()
exit()
print("###################################")
def each_iter_solver(self):
self.duality_gap.fill(0.0)
self.w.fill(0.0)
self.inv_wT.fill(0.0)
self.wt_lambda_inv_I.fill(0.0)
self.vec_rx.fill(0.0)
self.vec_ry.fill(0.0)
self.vec_rz.fill(0.0)
self.vec_rd.fill(0.0)
self.del_x.fill(0.0)
self.del_y.fill(0.0)
self.del_z.fill(0.0)
if (self.debug):
self.eig_val_x.fill(0.0)
self.eig_val_z.fill(0.0)
self.single_block_solver()
self.two_ph_block_solver()
self.two_pair1_block_solver()
self.two_pair2_block_solver()
if (self.debug):
print("eig for x", np.min(self.eig_val_x))
print("eig for z", np.min(self.eig_val_z))
print("eig pos for x", np.argmin(self.eig_val_x))
print("eig pos for z", np.argmin(self.eig_val_z))
self.vec_rx += self.vec_ham + np.dot(np.transpose(self.const_var.mat_C), self.var_y) - self.dual_var.vec_z
self.vec_ry += -self.const_var.vec_b + np.dot(self.const_var.mat_C, self.primal_var.vec_x)
convg_error = np.max([np.max(self.duality_gap), np.max(np.abs(self.vec_rx)), np.max(np.abs(self.vec_ry))])
if(self.debug and convg_error-self.prev_error>self.convg_criteria):
print(convg_error,self.prev_error)
print(np.max(self.duality_gap))
print("failed update")
exit()
self.prev_error = convg_error
self.solve_kkt(self.vec_rx, self.vec_ry, self.primal_var.vec_x)
alpha_x = self.primal_var.find_feasibile_alpha(self.del_x, 30,0.0001*self.convg_criteria)
alpha_z = self.dual_var.find_feasibile_alpha(self.del_z, 30,0.0001*self.convg_criteria)
alpha = np.min([alpha_x,alpha_z])
if(self.debug):
self.test_direction(alpha)
if(self.if_combined_direction):
rho = np.dot(self.primal_var.vec_x+ self.del_x *alpha, self.dual_var.vec_z+ self.del_z *alpha)
rho = rho/np.dot(self.primal_var.vec_x , self.dual_var.vec_z )
sigma = np.min([1.0,rho])
sigma = np.max([0.0,sigma])
sigma = np.power(sigma,3)
mu = np.sum(self.duality_gap[0:2*self.L])+np.sum(self.duality_gap[2*self.L:3*self.L])*2*self.L + np.sum( self.duality_gap[3*self.L:5*self.L])*self.L
mu = mu/(2*self.L+ 4*self.L*self.L)
self.vec_rd = self.primal_var.vec_x - sigma*self.wt_lambda_inv_I*mu
self.res = np.max( [np.max(np.abs(self.vec_rx)), np.max(np.abs(self.vec_ry)) ] )
self.vec_rx = (1.0-sigma) * self.vec_rx
self.vec_ry = (1.0-sigma)*self.vec_ry
self.solve_kkt(self.vec_rx,self.vec_ry,self.vec_rd)
alpha_x = self.primal_var.find_feasibile_alpha(self.del_x, 100,0.0001*self.convg_criteria)
alpha_z = self.dual_var.find_feasibile_alpha(self.del_z, 100,0.0001*self.convg_criteria)
alpha = np.min([alpha_x, alpha_z])
if(self.debug):
self.test_direction(alpha)
print("error=",convg_error)
if(convg_error<self.convg_criteria):
print("solved")
print("time of calculation = %f"%(time.time()-self.time_start) )
self.print_solution()
self.print_density_density()
exit()
self.primal_var.vec_x += alpha*0.9 * self.del_x
self.dual_var.vec_z += alpha*0.9 * self.del_z
self.var_y += alpha*0.9 * self.del_y
def main_solver(self):
self.initialization()
self.vec_rx.fill(0.0)
self.vec_ry.fill(0.0)
self.vec_rx += self.vec_ham + np.dot(np.transpose(self.const_var.mat_C), self.var_y) - self.dual_var.vec_z
self.vec_ry += -self.const_var.vec_b + np.dot(self.const_var.mat_C, self.primal_var.vec_x)
file = open("iteration_log.txt","w+")
file.write("iteration_number \t maximum_of_duality_gap \t maximum_of_residues \t time \n ")
for i in range(1, self.max_iter):
print("iteration ", i)
self.each_iter_solver()
time_now = time.time() - self.time_start
file.write(str(i) + "\t" + str(np.max(self.duality_gap)) + "\t" + str(self.res)+ "\t" + str(time_now) + "\n" )
if( i%10 ==0 ):
self.print_solution()
file.close()
print("Doesn't converge after %d iterations"%self.max_iter)
def solve_kkt(self,rx,ry,rz):
wTw = np.dot(np.transpose(self.w),self.w)
mat_A = np.zeros([2 * self.dim_var + self.dim_const, 2 * self.dim_var + self.dim_const], dtype=np.float32)
mat_A[0:self.dim_var, self.dim_var:self.dim_var + self.dim_const] += np.transpose(self.const_var.mat_C)
index_st = self.dim_var + self.dim_const
index_en = 2 * self.dim_var + self.dim_const
mat_A[0:self.dim_var, index_st:index_en] += -np.identity(self.dim_var, dtype=np.float32)
index_st = self.dim_var
index_en = self.dim_var + self.dim_const
mat_A[index_st:index_en, 0:self.dim_var] += self.const_var.mat_C
index_st = self.dim_var + self.dim_const
index_en = 2 * self.dim_var + self.dim_const
mat_A[index_st:index_en, 0:self.dim_var] += np.identity(self.dim_var, dtype=np.float32)
index_st = self.dim_var + self.dim_const
index_en = index_st + self.dim_var
mat_A[index_st:index_en, index_st:index_en] = wTw
vec_b = -rx
vec_b = np.hstack( (vec_b,-ry) )
vec_b = np.hstack( (vec_b,-rz) )
x = solve(mat_A,vec_b)
if(self.debug):
tmp = np.dot(mat_A,x)-vec_b
print("linear solver accuracy:",np.max(np.abs(tmp)))
self.del_x.fill(0.0)
self.del_y.fill(0.0)
self.del_z.fill(0.0)
self.del_x += x[0:self.dim_var]
self.del_y += x[self.dim_var : self.dim_var + self.dim_const]
self.del_z += x[self.dim_var+self.dim_const: 2*self.dim_var + self.dim_const]
return 1
#
# def cal_Mehrotra_correction(self):
#
# L= self.L
#
# m= np.zeros(self.dim_var,dtype=np.float32)
#
# winvTdel_x = np.dot(self.inv_wT,self.del_x)
# wdel_z = np.dot(self.w, self.del_z)
#
#
# lambda_ = np.dot(self.w, self.dual_var.vec_z)
#
#
# for i in range(0,2*self.L):
# m[i] += wdel_z[i]*winvTdel_x[i]
# m[i] = m[i]/lambda_[i]
#
# for p in range(0,L):
#
# index_st = 2*L+4*p*L*L
# index_en = index_st + 4*L*L
#
# z_mat = transform_vec_to_matrix(wdel_z[index_st:index_en],2*L)
# x_mat = transform_vec_to_matrix(winvTdel_x[index_st:index_en],2*L)
#
# xz_mat =0.5 * (np.dot(z_mat,x_mat) + np.dot(x_mat,z_mat))
#
# vec_lambda = lambda_[index_st:index_st+2*L]
#
# for alpha in range(0,2*L):
# for gamma in range(0,2*L):
# xz_mat[alpha,gamma] = xz_mat[alpha,gamma]*2.0/ (vec_lambda[alpha] + vec_lambda[gamma])
#
# m[index_st:index_en] += vectorize(xz_mat, 2*L)
#
# for p in range(0, L):
#
# index_st = 2 * L + 4 * L * L * L + p*L*L
# index_en = index_st + L*L
#
# z_mat = transform_vec_to_matrix(wdel_z[index_st:index_en], L)
# x_mat = transform_vec_to_matrix(winvTdel_x[index_st:index_en], L)
#
# xz_mat = 0.5 * (np.dot(z_mat, x_mat) + np.dot(x_mat, z_mat))
#
# vec_lambda = lambda_[index_st:index_st + L]
#
# for alpha in range(0,L):
# for gamma in range(0,L):
# xz_mat[alpha,gamma] = xz_mat[alpha,gamma]*2.0/ (vec_lambda[alpha] + vec_lambda[gamma])
#
#
#
# m[index_st:index_en] += vectorize(xz_mat, L)
#
# for p in range(0, L):
#
# index_st = 2 * L + 5 * L * L * L + p*L*L
# index_en = index_st + L*L
#
# z_mat = transform_vec_to_matrix(wdel_z[index_st:index_en], L)
# x_mat = transform_vec_to_matrix(winvTdel_x[index_st:index_en], L)
#
# xz_mat = 0.5 * (np.dot(z_mat, x_mat) + np.dot(x_mat, z_mat))
#
#
# vec_lambda = lambda_[index_st:index_st + L]
#
# for alpha in range(0,L):
# for gamma in range(0,L):
# xz_mat[alpha,gamma] = xz_mat[alpha,gamma]*2.0/ (vec_lambda[alpha] + vec_lambda[gamma])
#
#
#
# m[index_st:index_en] = vectorize(xz_mat, L)
#
#
#
# m = np.dot(np.transpose(self.w),m)
#
# return m
#
#
#
#
#
#
def print_solution(self):
L= self.L
file = open("solution.txt","w+")
if( not file):
print( " Can't create solution.txt file" )
file.write("Spinless Fermi Hubbard Model\n")
file.write("Lattice size L = %d \n"%self.L)
file.write("Hopping amplitude t = %f \n "% self.t_)
file.write("Hubbard interaction U = %f \n"%self.U_)
energy = np.dot( self.primal_var.vec_x,self.vec_ham )
file.write( "p^star(lowerst energy) = %f"%energy)
file.write("\n\n\n")
file.write("******************************************\n")
file.write("\n\n\n")
file.write("One particle green's function\n\n")
for i in range(0,L):
file.write("<a^dag(p) a(p)>=%f\n"%self.primal_var.vec_x[i])
for i in range(0, L):
file.write("<a(p) a^dag (p)>=%f \n" % self.primal_var.vec_x[i+L])
file.write("\n\n\n\n")
file.write("******************************************************")
file.write("\n\n\n\n")
file.write("Two particle green's function in the particle hole channel with basis { a^dag_{i}a_{i-p} , a_{i}a_{i+p}^dag }\n \n\n" )
for p in range(0,L):
file.write("\n\n---------At momentum p=%f------------ \n\n"% p)
mat = self.primal_var.generate_two_ph_primal_matrix(p)
for i in range(0,2*L):
for j in range(0,2*L):
file.write(str(mat[i][j]) + " ")
file.write("\n")
file.write("\n\n\n\n")
file.write("******************************************************")
file.write("\n\n\n\n")
file.write("Two particle green's function in the pariticle-partilce channel 1 with basis { a^dag_{i}a^\dag_{p-i} } \n\n")
for p in range(0, L):
file.write("\n\n---------At momentum p=%f------------ \n\n"% p)
mat = self.primal_var.generate_two_pair1_primal_matrix(p)
for i in range(0, L):
for j in range(0, L):
file.write(str(mat[i][j]) + " ")
file.write("\n")
file.write("\n\n\n\n")
file.write("******************************************************")
file.write("\n\n\n\n")
file.write( "Two particle green's function in the pariticle-partilce channel 2 with basis { a_{i}a_{p-i} } \n\n")
for p in range(0, L):
file.write("\n\n---------At momentum p=%f------------ \n\n"% p)
mat = self.primal_var.generate_two_pair2_primal_matrix(p)
for i in range(0, L):
for j in range(0, L):
file.write(str(mat[i][j]) + " ")
file.write("\n")
file.close()
def print_density_density(self):
L=self.L
Pi = 3.141592653
nn = np.zeros(L,dtype=np.float32)
for p in range(0,L):
xmat = self.primal_var.generate_two_ph_primal_matrix(p)
for x in range(0,L):
for alpha in range(0,L):
for gamma in range(0,L):
nn[x] += np.real(xmat[alpha,gamma])*np.cos(2*Pi/float(L) * p*x)
file = open("nn.txt","w+")
for i in range(0,L):
file.write( str(i) + " " + str(nn[i]/float(L)) + "\n")
file.close()
| [
"noreply@github.com"
] | goodfish94.noreply@github.com |
f997fd9069d35aaaa8ecba2736f34ac589f5ecc0 | f7cf204ba2def42a3b5916b95133ac00c75a03ff | /myprojectvenv/bin/easy_install-3.8 | 0afa2864e46f80881695edb95b8bacef9d6d871d | [] | no_license | nishi1231/myproject_adocare | 4789271e129459ff8e74f3f4d6043b4ff56207a8 | b97272e729f77f2139421e7871827c1226dccc97 | refs/heads/master | 2023-07-31T09:52:58.269336 | 2021-09-30T06:30:06 | 2021-09-30T06:30:06 | 260,941,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | 8 | #!/Users/nishiyamashota/myproject/myprojectvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"n.i.s.h.i.4869@gmail.com"
] | n.i.s.h.i.4869@gmail.com |
c2712213a260a6c740b3ea4534a5daea6bd5ff27 | 202fab9762530adfd642ed52fa8088f13bbb7a44 | /Fima/app2/admin.py | 7a2800d355467981f956caed9ec77cd0abe47937 | [] | no_license | Prateek1337/Financial_Management_Webapp | 3d06d820ed4dace2fd07edac88088711c1efe2d3 | 71bf8401ffeaf6ab041cdb4031a545fc04950303 | refs/heads/master | 2020-08-24T17:29:08.075742 | 2020-06-10T16:03:22 | 2020-06-10T16:03:22 | 216,871,542 | 0 | 2 | null | 2019-10-27T16:25:15 | 2019-10-22T17:28:39 | Python | UTF-8 | Python | false | false | 148 | py | from django.contrib import admin
from app1.models import CurrentTransaction
# Register your models here.
# admin.site.register(CurrentTransaction)
| [
"saharshsonu40@gmail.com"
] | saharshsonu40@gmail.com |
0f34936dc8754dd1b30e5c0928f7b5b5f17b7bcd | 9f798471407af5567c9daf3ffc64922142808404 | /Programming_a2/quadratic.py | 4c4937a675331fa4fa5c4bf868e133c1ba2d1334 | [] | no_license | juliafox8/cm-codes | 4440cd75cfa143c35edcafd87330c0940b0d2532 | 7386eb7359a881dc8c9d8a275f7f53d2c4c5ce33 | refs/heads/main | 2023-07-13T22:22:09.959756 | 2021-08-30T15:42:38 | 2021-08-30T15:42:38 | 401,397,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import math
def quadratic(a, b, c):
x1 = (-b + ((b ** 2) - (4 * a * c))) / (2 * a)
x2 = (-b - ((b ** 2) - (4 * a * c))) / (2 * a)
print ("[x1: ", x1, ", x2: ", x2,"]", sep = "")
| [
"noreply@github.com"
] | juliafox8.noreply@github.com |
2ca20828dac11e26d8d290a5d42bdc6d292f2dd4 | a3aabb5536ed0d71f61b7a99265020652c939af2 | /timetable/management/commands/getdata.py | 41c6fe8d345f34f90cfb317813e322dbae031e8f | [] | no_license | riddlore/InterTime | 911a8c9444f93fc3150fe341f8e2f1f80ecca806 | 565bb56c934106edf64ad03f07d639483fac9841 | refs/heads/master | 2021-01-24T22:02:19.825321 | 2014-01-23T22:17:48 | 2014-01-23T22:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | from django.core.management.base import BaseCommand, CommandError
from timetable.models import Event, Modul, Source, Sportkurs, SportkursEvent
from timetable.parser import process
class Command(BaseCommand):
args = ''
help = 'Parser all sources.'
def handle(self, *args, **options):
kurse = Sportkurs.objects.count()
sevents = SportkursEvent.objects.count()
modul = Modul.objects.count()
events = Event.objects.count()
for source in Source.objects.all():
process.process_source(source)
kurse = Sportkurs.objects.count() - kurse
sevents = SportkursEvent.objects.count() - sevents
modul = Modul.objects.count() - modul
events = Event.objects.count() - events
self.stdout.write('Added ' + str(kurse) + ' Kurse.')
self.stdout.write('Added ' + str(sevents) + ' Sporkurs events.')
self.stdout.write('Added ' + str(modul) + ' Module.')
self.stdout.write('Added ' + str(events) + ' Events.') | [
"jnphilipp@gmail.com"
] | jnphilipp@gmail.com |
c339690c353b42c303c4c6cbac2d545ed7d08393 | bf574334a21f94fd012acf8b8672e7981454c981 | /alert.py | b3f6f0ed5e5a2375389465407369816494a6cfbe | [] | no_license | The-Turing-Machine/Erectus | 27d1ef2f39478cf6d865900bbb5bb233bbd12504 | d54a131a82df706238642f47aa32bf5e653b02b5 | refs/heads/master | 2021-01-10T15:10:05.561764 | 2016-04-11T17:00:57 | 2016-04-11T17:00:57 | 55,841,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | import urllib
params = {
'api_key': 'c59837ab',
'api_secret': '4ff06601e5826199',
'to': 917838968853,
'from': 918588926652,
'text': 'WARNING! Your area may experience Earthquake after shocks. Kindly move to the government setted relief camps for your saftey. Nearest Relief Camp : [77.227420,28.656295]'
}
url = 'https://rest.nexmo.com/sms/json?' + urllib.urlencode(params)
response = urllib.urlopen(url)
print response.read() | [
"ashishgupta.3197@gmail.com"
] | ashishgupta.3197@gmail.com |
3061d0751ab9b36b5cf5fa87cc94c7401bd2f4ab | a0bc8b2a66903d32550a620e679f644aefc629e1 | /app/views.py | e2502439034acc42d6fbf72329e109f182badf47 | [] | no_license | Saharsh904/razor_pay | 6a2ced740d5e34b5f3c1f764b2b8bc8173e4db68 | f5e818dac57ef07337a54bf843f02eb13017a626 | refs/heads/master | 2023-06-19T12:41:43.036927 | 2021-07-21T01:19:42 | 2021-07-21T01:19:42 | 387,865,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | from django.shortcuts import render
import razorpay
from .models import Donation
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
def home(request):
if request.method=="POST":
name=request.POST.get("name")
email=request.POST.get("email")
amount=int(request.POST.get("amount"))*100
# print(name,amount)
client=razorpay.Client(auth=("rzp_test_79DzZck4nL0Nmp","vJRxYu3ShkhyNhvevmqsVzAv"))
payment=client.order.create({"amount":amount,"currency":"INR","payment_capture":'1'})
print(payment)
donation=Donation(name=name,amount=amount,email=email,order_id=payment['id'])
donation.save()
return render(request,"index.html",{"payment":payment})
return render(request,"index.html")
@csrf_exempt
def success(request):
if request.method=="POST":
a=request.POST
order_id=''
for key,val in a.items():
if key=="razorpay_order_id":
order_id=val
break
# print(order_id)
user=Donation.objects.filter(order_id=order_id).first()
user.paid=True
user.save()
return render(request,"success.html") | [
"kumarsaharsh440@gmail.com"
] | kumarsaharsh440@gmail.com |
3f2079b1e4c24c815959e7a54257986eb1c35628 | 82199bfad7b77d62aa265c8ea463e20df6901801 | /global_variables.py | 0349063285f925772377b500255d2fdee5a359ce | [] | no_license | hyzcn/interactive-behaviour-design | 6119f8685b91226916f06678735fcfea5e6c27ab | 26faa63f0d1494dedd7dd9c3757ab08ec6473119 | refs/heads/master | 2020-05-16T09:04:42.342957 | 2019-04-22T19:26:27 | 2019-04-22T19:38:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # ALE is generally safe to use from multiple threads, but we do need to be careful about
# two threads creating environments at the same time:
# https://github.com/mgbellemare/Arcade-Learning-Environment/issues/86
# Any thread which creates environments (which includes restoring from a reset state)
# should acquire this lock before attempting the creation.
env_creation_lock = None
segment_save_mode = None
max_segs = None | [
"matthew.rahtz@gmail.com"
] | matthew.rahtz@gmail.com |
142ba9e50afd87b782e27f5d0b0ed1509ac430e6 | 3f2d72e80a44044fccaeaab7073d24d95e6c62f1 | /exercise1/reverse_complement.py | 380c090d3215432aa973391f850fa7c63217aaa2 | [
"CC-BY-4.0",
"MIT"
] | permissive | glennwli/bootcamp | 61b78032a5598e1d312deec9fc4aa69a1c70f864 | f72cc5ee29ba2ac2bc80c221041f0c938268ce9a | refs/heads/master | 2020-12-02T08:14:59.656775 | 2017-07-10T23:04:06 | 2017-07-10T23:04:06 | 96,794,077 | 0 | 0 | null | 2017-07-10T15:40:06 | 2017-07-10T15:40:06 | null | UTF-8 | Python | false | false | 936 | py | def complement(base, material = 'DNA'):
"""This makes a complement of each base"""
# we will use this to complement each base individually
if base in 'Aa':
if material == 'DNA':
return 'T'
elif material == 'RNA':
return 'U'
elif base in 'UuTt':
return 'A'
elif base in 'Gg':
return 'C'
elif base in 'Cc':
return 'G'
def reverse_complement(seq, material = 'DNA'):
"""takes the sequence and retruns each complement, inreverse"""
#initialize reverse
rev_complement = ''
for base in seq[::-1]:
rev_complement += complement(base)
return rev_complement
def display(seq):
""""print and formats things nicely"""
#print the sequence
print(seq.upper())
#print the spacers
for base in seq:
print ('|', end='')
print ('')
#print the reverse_complement
print(reverse_complement(seq))
| [
"glennli@mit.edu"
] | glennli@mit.edu |
8062794caa0e591317615274788b655fd21e5d0b | 002add10dd206a38482d8641dd0df3117316f5dd | /migrationlib/os/utils/restore/RestoreStateOpenStack.py | d72c9792252729c41179a3eb1de1a082d401be3a | [
"Apache-2.0"
] | permissive | asvechnikov/CloudFerry | 42ececf48b54f7c9c0779119de7214aaf5eef3b0 | 054fd818c5ac65c5b99c9d4cae6793f67aff0d65 | refs/heads/master | 2021-01-18T09:24:43.011471 | 2014-11-07T11:48:39 | 2014-11-07T11:48:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from RestoreState import RestoreState
from RestoreInstances import RestoreInstances
from RestoreImages import RestoreImages
from RestoreVolumes import RestoreVolumes
from Report import Report
__author__ = 'mirrorcoder'
class RestoreStateOpenStack(RestoreState):
def __init__(self, cloud, list_subclass=[RestoreInstances, RestoreImages, RestoreVolumes]):
super(RestoreStateOpenStack, self).__init__(cloud, list_subclass)
def restore(self, diff_snapshot):
report = Report()
for report_class in self.list_subclass:
report.union(report_class.restore(diff_snapshot))
return report | [
"dsozinov@mirantis.com"
] | dsozinov@mirantis.com |
e79e06cdb3c892d0335ad8066cd1086df0be5530 | 72dbfd37a43199c4a74cb7696ea2b28dbc3c4702 | /tutorial/tutorial/items.py | 26592086ae71388b4b096975cf9c3fe07f1c9ba9 | [] | no_license | denmouse/smtcrawel | da0ef41627e745ac8d48c5a6172f79f7fbd31a9e | d220aaa89f5a450f51cf6d951d68bbb4ad3e60a2 | refs/heads/master | 2021-01-12T01:02:42.053518 | 2017-08-18T23:08:11 | 2017-08-18T23:08:11 | 78,336,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
class TutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = Field()
movieInfo = Field()
star = Field()
quote = Field()
| [
"root@li82-254.members.linode.com"
] | root@li82-254.members.linode.com |
72d0ee4f03607613f6c8d8670c52490c128c5169 | d4165cff4f3f009420749b85197431bfc1df5922 | /frontend/frontendfactory.py | 9fda688f23638e3473da419242b730859d5077d7 | [] | no_license | smuniz/point-source | 16e39a3aed4e3b2570177e7c21397d56e45b2675 | a35a25cdb810b7e86457cb74d0641df23bdfdac2 | refs/heads/master | 2021-03-27T16:37:46.102970 | 2017-03-15T14:10:29 | 2017-03-15T14:10:29 | 24,759,005 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | #
# Copyright (c) 2017 Sebastian Muniz
#
# This code is part of point source decompiler
#
from misc.factory import Factory, FactoryException
import frontend_x86
reload(frontend_x86)
from frontend_x86 import FrontEndX86, FrontEndX86Exception
import frontend_x86_64
reload(frontend_x86_64)
from frontend_x86_64 import FrontEndX86_64, FrontEndX86_64Exception
import frontend_arm
reload(frontend_arm)
from frontend_arm import FrontEndArm, FrontEndArmException
import frontend_aarch64
reload(frontend_aarch64)
from frontend_aarch64 import FrontEndAArch64, FrontEndAArch64Exception
import frontend_powerpc
reload(frontend_powerpc)
from frontend_powerpc import FrontEndPowerPc, FrontEndPowerPcException
import frontend_mips
reload(frontend_mips)
from frontend_mips import FrontEndMips, FrontEndMipsException
__all__ = ["FrontEndFactory", "FrontEndFactoryException"]
class FrontEndFactoryException(FactoryException):
"""Front-end factory base exception class."""
pass
class FrontEndFactory(Factory):
"""
Factory for different front-ends to support multiple decompilable
architectures.
"""
def __init__(self):
"""Perform factory instance initialization."""
#
# Register known front-ends for further creation.
#
# This is where any new architecture should be added (appart from its
# support under the current debugger).
#
self.register("create_x86", FrontEndX86)
self.register("create_x86_64", FrontEndX86_64)
self.register("create_ARM", FrontEndArm)
self.register("create_AArch64", FrontEndAArch64)
self.register("create_MIPS", FrontEndMips)
self.register("create_PowerPC", FrontEndPowerPc)
| [
"sebastianmuniz@gmail.com"
] | sebastianmuniz@gmail.com |
ed1ac5ec2a73929c3a74c239f69b4b514bdcdec1 | 3e594aedf182ccaa7958314fd8d7407b86526731 | /data.py | 1427da596745877804917eea37f6ad4234577bbc | [] | no_license | agrimwood/multi_task | b83c8d8a016ba7e593a9115891ad9b1ec55c23dd | c2b97eaebfe8380f332bd567aa83b32c0038c3e2 | refs/heads/master | 2023-02-20T23:16:57.369863 | 2021-01-25T18:20:46 | 2021-01-25T18:20:46 | 305,517,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,362 | py | """ Data loader for LTSM-net """
import datetime
import pandas as pd
from sklearn.utils import class_weight
import os
from tensorflow.keras.preprocessing import image as krs
from tensorflow.keras import applications as mn
from PIL import Image , ImageOps
import random
import numpy as np
# write user-defined parameters to file
#with open(os.path.join(log_dir, 'params.txt'), 'w') as f:
# print(args, file=f)
#with open(os.path.join(log_dir, 'params.txt'), 'a') as f:
# print(os.path.basename(sys.argv[0]), file=f)
def us_generator(dataframe, img_path, msk_path, batch_size, imdimensions=(512,512)):
# for each sample, list image name and calculate class weights
images_list = dataframe.index.tolist()
#pths_list = dataframe.pths.tolist()
pos_list = dataframe.position.tolist()
dir_list = dataframe.direction.tolist()
pos_weight = class_weight.compute_sample_weight('balanced', pos_list)
dir_weight = class_weight.compute_sample_weight('balanced', dir_list)
# perform initial shuffle
random.Random(30).shuffle(images_list)
random.Random(30).shuffle(pos_weight)
random.Random(30).shuffle(dir_weight)
i = 0
while True:
# preallocate output for this batch
batch_x = {'imagesS': [], 'rotsS': []}
batch_y = {'prostate_out': [], 'direction_out': [], 'segment_out': []}
batch_w = {'prostate_out': [], 'direction_out': []}
# loop through each sample (b) assigned to this batch
b = 0
while b < batch_size:
# refresh shuffle
if i == len(images_list):
i = 0
random.Random(42).shuffle(images_list)
random.Random(42).shuffle(pos_weight)
random.Random(42).shuffle(dir_weight)
if int(images_list[i][-7:-4]) > 9:
filelist = [os.path.join(img_path, images_list[i][:-7]+str(int(images_list[i][-7:-4])-9+n).zfill(3) + images_list[i][-4:]) for n in range(0,10,2)]
masklist = [os.path.join(msk_path, 'msk_'+images_list[i][:-7]+str(int(images_list[i][-7:-4])-9+n).zfill(3) + images_list[i][-4:]) for n in range(0,10,2)]
ixlist = [images_list[i][:-7]+str(int(images_list[i][-7:-4])-9+n).zfill(3) + images_list[i][-4:] for n in range(0,10,2)]
# augmentation parameters
rnd_angle = random.randint(-25, 25)
#rnd_lr = random.randint(0, 1)
#rnd_ud = random.randint(0, 1)
rnd_x = random.randint(-100, -100)
rnd_y = random.randint(-100, 100)
# preassign probe angle vector list
rotfeatures=[]
# load 5 images in sequence
for n in range(5):
# augment images
image1 = krs.load_img(filelist[n],color_mode='rgb', target_size=imdimensions)
image1.rotate(rnd_angle)
image1.transform(image1.size, Image.AFFINE,(1,0,rnd_x,0,1,rnd_y))
# augment masks
if n==4:
mask1 = krs.load_img(masklist[n],color_mode='grayscale', target_size=imdimensions)
mask1.rotate(rnd_angle)
mask1.transform(mask1.size, Image.AFFINE,(1,0,rnd_x,0,1,rnd_y))
# ensure correct range and scaling
mask1 = krs.img_to_array(mask1)
mask1 = np.clip(mask1,0,1)
mask=mask1+1
image1 = krs.img_to_array(image1)
image1 = mn.mobilenet_v2.preprocess_input(image1) # ensure scaling is appropriate to model
if n == 0:
image = np.expand_dims(image1, axis=0)
else:
image1 = np.expand_dims(image1, axis=0)
image = np.concatenate((image, image1))
# assign probe angle vector to sequence
csv_row = dataframe.loc[ixlist[n], :]
rotfeatures.append([csv_row['rot_si'], csv_row['rot_ap'], csv_row['rot_lr']])
# embed probe vectors into array of equal size to images (necessary for TimeDistributed model wrapper)
csv_features = np.array(rotfeatures)
rsz_features = np.concatenate((csv_features, abs(csv_features)), axis=-1)
# record sequence-level labels from csv
csv_row = dataframe.loc[images_list[i], :]
labelPos = np.array([csv_row['outside'], csv_row['periphery'], csv_row['centre']])
labelDir = np.array([csv_row['left'], csv_row['stop'], csv_row['right']])
# record class weights to balance class sizes
wt_pos = pos_weight[i]
wt_dir = dir_weight[i]
# append each record to the batch
batch_x['imagesS'].append(image)
batch_x['rotsS'].append(rsz_features)
batch_y['prostate_out'].append(labelPos)
batch_y['direction_out'].append(labelDir)
batch_y['segment_out'].append(mask)
batch_w['prostate_out'].append(wt_pos)
batch_w['direction_out'].append(wt_dir)
i += 1
b += 1
else:
i += 1
batch_x['imagesS'] = np.array(batch_x['imagesS'])
batch_x['rotsS'] = np.array(batch_x['rotsS'])
batch_y['prostate_out'] = np.array(batch_y['prostate_out'])
batch_y['direction_out'] = np.array(batch_y['direction_out'])
batch_y['segment_out'] = np.array(batch_y['segment_out'])
batch_w['prostate_out'] = np.array(batch_w['prostate_out'])
batch_w['direction_out'] = np.array(batch_w['direction_out'])
yield(batch_x, batch_y, batch_w)
##### non-sequential generator
def us_single(dataframe, img_path, msk_path, batch_size,imdimensions=(640,480)):
# for each sample, list image name and calculate class weights
images_list = dataframe.index.tolist()
#pths_list = dataframe.pths.tolist()
pos_list = dataframe.position.tolist()
dir_list = dataframe.direction.tolist()
pos_weight = class_weight.compute_sample_weight('balanced', pos_list)
dir_weight = class_weight.compute_sample_weight('balanced', dir_list)
# perform initial shuffle
random.Random(30).shuffle(images_list)
random.Random(30).shuffle(pos_weight)
random.Random(30).shuffle(dir_weight)
i = 0
while True:
# preallocate output for this batch
batch_x = {'images': [], 'rots': []}
batch_y = {'prostate_out': [], 'direction_out': [], 'segment_out': []}
batch_w = {'prostate_out': [], 'direction_out': []}
# loop through each sample (b) assigned to this batch
b = 0
while b < batch_size:
# refresh shuffle
if i == len(images_list):
i = 0
random.Random(42).shuffle(images_list)
random.Random(42).shuffle(pos_weight)
random.Random(42).shuffle(dir_weight)
filelist = os.path.join(img_path, images_list[i])
masklist = os.path.join(msk_path, 'msk_'+images_list[i])
ixlist = images_list[i]
# augmentation parameters
rnd_angle = random.randint(-25, 25)
#rnd_lr = random.randint(0, 1)
#rnd_ud = random.randint(0, 1)
rnd_x = random.randint(-100, -100)
rnd_y = random.randint(-100, 100)
# augment images
image1 = (krs.load_img(filelist,color_mode='rgb', target_size=imdimensions))
image1.rotate(rnd_angle)
image1.transform(image1.size, Image.AFFINE,(1,0,rnd_x,0,1,rnd_y))
# augment masks
mask1 = (krs.load_img(masklist,color_mode='grayscale', target_size=imdimensions))
mask1.rotate(rnd_angle)
mask1.transform(mask1.size, Image.AFFINE,(1,0,rnd_x,0,1,rnd_y))
# ensure correct range and scaling
mask1 = krs.img_to_array(mask1)
mask1 = np.clip(mask1,0,1)
mask=mask1+1
image1 = krs.img_to_array(image1)
image = mn.mobilenet_v2.preprocess_input(image1) # ensure scaling is appropriate to model
# assign probe angle vector to sequence
csv_row = dataframe.loc[ixlist, :]
# embed probe vectors into array of equal size to images (necessary for TimeDistributed model wrapper)
csv_features = np.array([csv_row['rot_si'], csv_row['rot_ap'], csv_row['rot_lr']])
rsz_features = np.concatenate((csv_features, abs(csv_features)), axis=-1)
# record sequence-level labels from csv
labelPos = np.array([csv_row['outside'], csv_row['periphery'], csv_row['centre']])
labelDir = np.array([csv_row['left'], csv_row['stop'], csv_row['right']])
# record class weights to balance class sizes
wt_pos = pos_weight[i]
wt_dir = dir_weight[i]
# append each record to the batch
batch_x['images'].append(image)
batch_x['rots'].append(rsz_features)
batch_y['prostate_out'].append(labelPos)
batch_y['direction_out'].append(labelDir)
batch_y['segment_out'].append(mask)
batch_w['prostate_out'].append(wt_pos)
batch_w['direction_out'].append(wt_dir)
i += 1
b += 1
else:
i += 1
batch_x['images'] = np.array(batch_x['images'])
batch_x['rots'] = np.array(batch_x['rots'])
batch_y['prostate_out'] = np.array(batch_y['prostate_out'])
batch_y['direction_out'] = np.array(batch_y['direction_out'])
batch_y['segment_out'] = np.array(batch_y['segment_out'])
batch_w['prostate_out'] = np.array(batch_w['prostate_out'])
batch_w['direction_out'] = np.array(batch_w['direction_out'])
yield(batch_x, batch_y, batch_w)
| [
"alexander.grimwood@ucl.ac.uk"
] | alexander.grimwood@ucl.ac.uk |
3db39a881966fe91978f0466727d388dbb672d3a | 7d006c958af758436343d6650d1047f3e6ef5240 | /crowmountain/crowmountain/settings.py | aa56377d446ec3d85d9b138c8588008c611f19c7 | [] | no_license | SarahBethCox/CrowMntFire | 012186ae94b5ec118acae68d800a42c086d7a350 | a147a23a2b096897a469173da99db8cd350f2fb8 | refs/heads/master | 2021-03-09T20:07:55.340303 | 2020-05-04T13:48:48 | 2020-05-04T13:48:48 | 246,375,369 | 0 | 7 | null | 2020-05-04T13:32:17 | 2020-03-10T18:17:07 | Python | UTF-8 | Python | false | false | 3,223 | py | """
Django settings for crowmountain project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&1&j+60*2kcu$r2ba%w=_(vcyvhb+4e$8)y3(oqzarx+04-0f4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'firedepartment',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'crowmountain.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'crowmountain.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS =[
os.path.join(BASE_DIR, 'static/')
] | [
"scox20@atu.edu"
] | scox20@atu.edu |
be5db0eeae5cf26bdc23b243a94011090ecce1d9 | 7a851e802f5021bd4c7dd5440e429d60c26649b1 | /pyworks/mp2.py | f5af2222a64533be4ae53fcb6eef07cd2b736899 | [] | no_license | kumak127/Python-introducing | 3c692d629402dc72417d12dae4068b85bd2dd15a | 73cd887f950cd91160b5e1dae9f61cca3c593b10 | refs/heads/master | 2022-12-28T12:27:46.379814 | 2020-09-29T10:00:03 | 2020-09-29T10:00:03 | 299,574,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # python3
# mp2.py - multiprocess.terminate関数のテスト用
import multiprocessing, time, os
def whoami(name):
print(f"i'm {name}, in process {os.getpid()}")
def loopy(name):
whoami(name)
start = 1
stop= 100
for num in range(start, stop):
print(f"\tNumber {num} of {stop}. Honk!")
time.sleep(1)
if __name__ == "__main__":
whoami("main")
p = multiprocessing.Process(target=loopy, args=("loopy",))
p.start()
time.sleep(5)
p.terminate() | [
"noreply@github.com"
] | kumak127.noreply@github.com |
9b43ee53672fb7b8aa059524c4d04d2b92fd2289 | 689a557b32161faafeb0b68076bca96b65c320ce | /restourant/migrations/0003_auto_20170726_1525.py | 3156e7bad15655147d6acc6853903542146c11b9 | [] | no_license | FNSalimov/new | 5d957a5e2543bcecece2fa88e4ff61030eb58203 | e2b15e5e83dbc22d776112fc5859219d7f625e4f | refs/heads/master | 2021-01-01T18:36:54.171096 | 2017-07-27T06:27:24 | 2017-07-27T06:27:24 | 98,386,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-26 12:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restourant', '0002_order_orderdish'),
]
operations = [
migrations.RemoveField(
model_name='orderdish',
name='dish',
),
migrations.RemoveField(
model_name='orderdish',
name='order',
),
migrations.AddField(
model_name='order',
name='dishes',
field=models.ManyToManyField(to='restourant.Dish'),
),
migrations.DeleteModel(
name='OrderDish',
),
]
| [
"you@example.com"
] | you@example.com |
1fa7e9fa75d8b459cfa080a0d6e94a8b934b495e | 25f4fb575faa264f39f4044c18012a375e7bafc0 | /intent_handlers/get_intents.py | 3292a40d2b1607bce2481f5b7750fda3da4ac204 | [] | no_license | Kabilesh93/Chatbot | f3d4a400e15dfb544a93815774c5497089bd7335 | 8656f0073bb091bd2c5ce2dc619be8aa52042542 | refs/heads/master | 2021-07-16T19:20:39.472436 | 2019-05-02T06:22:13 | 2019-05-02T06:22:13 | 179,050,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,831 | py | import fasttext
def get_intent_en(sentence):
root_english = fasttext.load_model('./intent_models/english_intents/root_english.bin', label_prefix='__label__')
english_all = fasttext.load_model('./intent_models/english_intents/english_all.bin', label_prefix='__label__')
english_cpl = fasttext.load_model('./intent_models/english_intents/english_cpl.bin', label_prefix='__label__')
english_dtv = fasttext.load_model('./intent_models/english_intents/english_dtv.bin', label_prefix='__label__')
english_hbb = fasttext.load_model('./intent_models/english_intents/english_hbb.bin', label_prefix='__label__')
english_mob = fasttext.load_model('./intent_models/english_intents/english_mob.bin', label_prefix='__label__')
english_pln = fasttext.load_model('./intent_models/english_intents/english_pln.bin', label_prefix='__label__')
english_pst = fasttext.load_model('./intent_models/english_intents/english_pst.bin', label_prefix='__label__')
english_rmg = fasttext.load_model('./intent_models/english_intents/english_rmg.bin', label_prefix='__label__')
english_udf = fasttext.load_model('./intent_models/english_intents/english_udf.bin', label_prefix='__label__')
english_user = fasttext.load_model('./intent_models/english_intents/english_user.bin', label_prefix='__label__')
english_other = fasttext.load_model('./intent_models/english_intents/english_other.bin', label_prefix='__label__')
root_label = root_english.predict_proba([sentence], k=1)
root_intent = root_label[0][0][0]
intent = ''
probability = ''
if root_intent == 'all':
lable = english_all.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'cpl':
lable = english_cpl.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'dtv':
lable = english_dtv.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'hbb':
lable = english_hbb.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'mbb':
lable = english_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'mob':
lable = english_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pln':
lable = english_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pln':
lable = english_pln.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pst':
lable = english_pst.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'rmg':
lable = english_rmg.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'udf':
lable = english_udf.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'user':
lable = english_user.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'other':
lable = english_other.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
return intent, probability
def get_intent_si(sentence):
root_sinhala = fasttext.load_model('./intent_models/sinhala_intents/root_sinhala.bin', label_prefix='__label__')
sinhala_all = fasttext.load_model('./intent_models/sinhala_intents/sinhala_all.bin', label_prefix='__label__')
sinhala_cpl = fasttext.load_model('./intent_models/sinhala_intents/sinhala_cpl.bin', label_prefix='__label__')
sinhala_dtv = fasttext.load_model('./intent_models/sinhala_intents/sinhala_dtv.bin', label_prefix='__label__')
sinhala_hbb = fasttext.load_model('./intent_models/sinhala_intents/sinhala_hbb.bin', label_prefix='__label__')
sinhala_mob = fasttext.load_model('./intent_models/sinhala_intents/sinhala_mob.bin', label_prefix='__label__')
sinhala_pln = fasttext.load_model('./intent_models/sinhala_intents/sinhala_pln.bin', label_prefix='__label__')
sinhala_pst = fasttext.load_model('./intent_models/sinhala_intents/sinhala_pst.bin', label_prefix='__label__')
sinhala_rmg = fasttext.load_model('./intent_models/sinhala_intents/sinhala_rmg.bin', label_prefix='__label__')
sinhala_udf = fasttext.load_model('./intent_models/sinhala_intents/sinhala_udf.bin', label_prefix='__label__')
sinhala_user = fasttext.load_model('./intent_models/sinhala_intents/sinhala_user.bin', label_prefix='__label__')
sinhala_other = fasttext.load_model('./intent_models/sinhala_intents/sinhala_other.bin', label_prefix='__label__')
test_result = root_sinhala.test('./data/test/test_sinhala/test_root.txt')
print('P@1:', test_result.precision)
print('R@1:', test_result.recall)
print('Number of examples:', test_result.nexamples)
root_label = root_sinhala.predict_proba([sentence], k=1)
root_intent = root_label[0][0][0]
intent = ''
probability = ''
if root_intent == 'all':
test_result = sinhala_all.test('./data/test/test_sinhala/test_all.txt')
print('P@1:', test_result.precision)
print('R@1:', test_result.recall)
print('Number of examples:', test_result.nexamples)
lable = sinhala_all.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'cpl':
lable = sinhala_cpl.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'dtv':
test_result = sinhala_dtv.test('./data/test/test_sinhala/test_dtv.txt')
print('P@1:', test_result.precision)
print('R@1:', test_result.recall)
print('Number of examples:', test_result.nexamples)
lable = sinhala_dtv.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'hbb':
lable = sinhala_hbb.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'mbb':
lable = sinhala_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'mob':
lable = sinhala_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pln':
lable = sinhala_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pln':
lable = sinhala_pln.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pst':
lable = sinhala_pst.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'rmg':
lable = sinhala_rmg.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'udf':
lable = sinhala_udf.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'user':
lable = sinhala_user.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'other':
lable = sinhala_other.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
return intent, probability
def get_intent_ta(sentence):
root_tamil = fasttext.load_model('./intent_models/tamil_intents/root_tamil.bin', label_prefix='__label__')
tamil_all = fasttext.load_model('./intent_models/tamil_intents/tamil_all.bin', label_prefix='__label__')
tamil_cpl = fasttext.load_model('./intent_models/tamil_intents/tamil_cpl.bin', label_prefix='__label__')
tamil_dtv = fasttext.load_model('./intent_models/tamil_intents/tamil_dtv.bin', label_prefix='__label__')
tamil_hbb = fasttext.load_model('./intent_models/tamil_intents/tamil_hbb.bin', label_prefix='__label__')
tamil_mob = fasttext.load_model('./intent_models/tamil_intents/tamil_mob.bin', label_prefix='__label__')
tamil_pln = fasttext.load_model('./intent_models/tamil_intents/tamil_pln.bin', label_prefix='__label__')
tamil_pst = fasttext.load_model('./intent_models/tamil_intents/tamil_pst.bin', label_prefix='__label__')
tamil_rmg = fasttext.load_model('./intent_models/tamil_intents/tamil_rmg.bin', label_prefix='__label__')
tamil_udf = fasttext.load_model('./intent_models/tamil_intents/tamil_udf.bin', label_prefix='__label__')
tamil_user = fasttext.load_model('./intent_models/tamil_intents/tamil_user.bin', label_prefix='__label__')
tamil_other = fasttext.load_model('./intent_models/tamil_intents/tamil_other.bin', label_prefix='__label__')
root_label = root_tamil.predict_proba([sentence], k=1)
root_intent = root_label[0][0][0]
intent = ''
probability = ''
if root_intent == 'all':
lable = tamil_all.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'cpl':
lable = tamil_cpl.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'dtv':
lable = tamil_dtv.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'hbb':
lable = tamil_hbb.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'mbb':
lable = tamil_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'mob':
lable = tamil_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pln':
lable = tamil_mob.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pln':
lable = tamil_pln.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'pst':
lable = tamil_pst.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'rmg':
lable = tamil_rmg.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'udf':
lable = tamil_udf.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'user':
lable = tamil_user.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
elif root_intent == 'other':
lable = tamil_other.predict_proba([sentence], k=1)
intent = lable[0][0][0]
probability = lable[0][0][1]
return intent, probability
def get_intent_te(sentence, te_intent_model):
model = te_intent_model
label = model.predict_proba([sentence], k=1)
intent = label[0][0][0]
probability = label[0][0][1]
return intent, probability
| [
"kumar.kabilesh93@gmail.com"
] | kumar.kabilesh93@gmail.com |
8519508e4603dd2e130c752354be03bd1e5116b5 | 6cb32cc2ee3ced7ea1a710283633d2cd76c42232 | /commercialoperator/components/organisations/emails.py | cc8911455d416c0462204f47e0455ba75fcf812b | [
"Apache-2.0"
] | permissive | dbca-wa/commercialoperator | 913889973066a5e8bd399835cfbaf948af4ea596 | e29306b1c6213f0f37a6a190e439745965ee3e32 | refs/heads/master | 2023-08-31T01:19:06.803451 | 2023-07-28T01:40:38 | 2023-07-28T01:40:38 | 239,469,350 | 0 | 10 | NOASSERTION | 2023-09-07T07:25:58 | 2020-02-10T09:07:54 | Python | UTF-8 | Python | false | false | 16,300 | py | import logging
from django.core.mail import EmailMultiAlternatives, EmailMessage
from django.utils.encoding import smart_text
from django.core.urlresolvers import reverse
from django.conf import settings
from commercialoperator.components.emails.emails import TemplateEmailBase
logger = logging.getLogger(__name__)
SYSTEM_NAME = settings.SYSTEM_NAME_SHORT + ' Automated Message'
class OrganisationRequestAcceptNotificationEmail(TemplateEmailBase):
subject = 'Your organisation request has been accepted.'
html_template = 'commercialoperator/emails/organisation_request_accept_notification.html'
txt_template = 'commercialoperator/emails/organisation_request_accept_notification.txt'
class OrganisationAccessGroupRequestAcceptNotificationEmail(TemplateEmailBase):
subject = 'New organisation request has been submitted.'
html_template = 'commercialoperator/emails/org_access_group_request_accept_notification.html'
txt_template = 'commercialoperator/emails/org_access_group_request_accept_notification.txt'
class OrganisationRequestNotificationEmail(TemplateEmailBase):
subject = 'An organisation request has been submitted for approval'
html_template = 'commercialoperator/emails/organisation_request_notification.html'
txt_template = 'commercialoperator/emails/organisation_request_notification.txt'
class OrganisationRequestDeclineNotificationEmail(TemplateEmailBase):
subject = 'Your organisation request has been declined.'
html_template = 'commercialoperator/emails/organisation_request_decline_notification.html'
txt_template = 'commercialoperator/emails/organisation_request_decline_notification.txt'
class OrganisationLinkNotificationEmail(TemplateEmailBase):
subject = '{} - Confirmation - Account linked.'.format(settings.DEP_NAME)
html_template = 'commercialoperator/emails/organisation_link_notification.html'
txt_template = 'commercialoperator/emails/organisation_link_notification.txt'
class OrganisationUnlinkNotificationEmail(TemplateEmailBase):
subject = 'You have been unlinked from an organisation.'
html_template = 'commercialoperator/emails/organisation_unlink_notification.html'
txt_template = 'commercialoperator/emails/organisation_unlink_notification.txt'
class OrganisationContactAdminUserNotificationEmail(TemplateEmailBase):
subject = 'You have been linked as Company Admin Role.'
html_template = 'commercialoperator/emails/organisation_contact_admin_notification.html'
txt_template = 'commercialoperator/emails/organisation_contact_admin_notification.txt'
class OrganisationContactUserNotificationEmail(TemplateEmailBase):
subject = 'You have been linked as Company User Role.'
html_template = 'commercialoperator/emails/organisation_contact_user_notification.html'
txt_template = 'commercialoperator/emails/organisation_contact_user_notification.txt'
class OrganisationContactSuspendNotificationEmail(TemplateEmailBase):
subject = 'You have been suspended as Company User.'
html_template = 'commercialoperator/emails/organisation_contact_suspend_notification.html'
txt_template = 'commercialoperator/emails/organisation_contact_suspend_notification.txt'
class OrganisationContactReinstateNotificationEmail(TemplateEmailBase):
subject = 'You have been Reinstated as Company User.'
html_template = 'commercialoperator/emails/organisation_contact_reinstate_notification.html'
txt_template = 'commercialoperator/emails/organisation_contact_reinstate_notification.txt'
class OrganisationContactDeclineNotificationEmail(TemplateEmailBase):
subject = 'Your organisation link request has been declined.'
html_template = 'commercialoperator/emails/organisation_contact_decline_notification.html'
txt_template = 'commercialoperator/emails/organisation_contact_decline_notification.txt'
class OrganisationAddressUpdatedNotificationEmail(TemplateEmailBase):
subject = 'An organisation''s address has been updated'
html_template = 'commercialoperator/emails/organisation_address_updated_notification.html'
txt_template = 'commercialoperator/emails/organisation_address_updated_notification.txt'
class OrganisationIdUploadNotificationEmail(TemplateEmailBase):
subject = 'An organisation''s identification has been uploaded'
html_template = 'commercialoperator/emails/organisation_id_upload_notification.html'
txt_template = 'commercialoperator/emails/organisation_id_upload_notification.txt'
class OrganisationRequestLinkNotificationEmail(TemplateEmailBase):
subject = 'An organisation request to be linked has been sent for approval'
html_template = 'commercialoperator/emails/organisation_request_link_notification.html'
txt_template = 'commercialoperator/emails/organisation_request_link_notification.txt'
def send_organisation_id_upload_email_notification(emails, organisation, org_contact, request):
email = OrganisationIdUploadNotificationEmail()
context = {
'organisation': organisation
}
msg = email.send(emails, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, org_contact, sender=sender)
def send_organisation_request_link_email_notification(
org_request, request, contact):
email = OrganisationRequestLinkNotificationEmail()
url = request.build_absolute_uri(
'/external/organisations/manage/{}'.format(org_request.id))
context = {
'request': org_request,
'url': url,
}
msg = email.send(contact, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, org_request, request.user, sender=sender)
def send_organisation_reinstate_email_notification(linked_user,linked_by,organisation,request):
email = OrganisationContactReinstateNotificationEmail()
context = {
'user': linked_user,
'linked_by': linked_by,
'organisation': organisation
}
all_ccs = []
if organisation.email:
cc_list = organisation.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(linked_user.email,cc=all_ccs, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, linked_user, sender=sender)
def send_organisation_contact_suspend_email_notification(linked_user,linked_by,organisation,request):
email = OrganisationContactSuspendNotificationEmail()
context = {
'user': linked_user,
'linked_by': linked_by,
'organisation': organisation
}
all_ccs = []
if organisation.email:
cc_list = organisation.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(linked_user.email,cc=all_ccs, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, linked_user, sender=sender)
def send_organisation_contact_decline_email_notification(user_contact,deleted_by,organisation,request):
email = OrganisationContactDeclineNotificationEmail()
context = {
'user': user_contact,
'linked_by': deleted_by,
'organisation': organisation
}
all_ccs = []
if organisation.email:
cc_list = organisation.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(user_contact.email, cc=all_ccs, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, user_contact, sender=sender)
def send_organisation_contact_user_email_notification(linked_user,linked_by,organisation,request):
email = OrganisationContactUserNotificationEmail()
context = {
'user': linked_user,
'linked_by': linked_by,
'organisation': organisation
}
all_ccs = []
if organisation.email:
cc_list = organisation.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(linked_user.email,cc=all_ccs, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, linked_user, sender=sender)
def send_organisation_contact_adminuser_email_notification(linked_user,linked_by,organisation,request):
email = OrganisationContactAdminUserNotificationEmail()
context = {
'user': linked_user,
'linked_by': linked_by,
'organisation': organisation
}
all_ccs = []
if organisation.email:
cc_list = organisation.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(linked_user.email,cc=all_ccs, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, linked_user, sender=sender)
def send_organisation_link_email_notification(linked_user,linked_by,organisation,request):
email = OrganisationLinkNotificationEmail()
context = {
'user': linked_user,
'linked_by': linked_by,
'organisation': organisation
}
all_ccs = []
if organisation.email:
cc_list = organisation.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(linked_user.email,cc=all_ccs, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, linked_user, sender=sender)
def send_organisation_request_email_notification(org_request, request, contact):
email = OrganisationRequestNotificationEmail()
url = request.build_absolute_uri('/internal/organisations/access/{}'.format(org_request.id))
if "-internal" not in url:
url = "{0}://{1}{2}.{3}{4}".format(request.scheme, settings.SITE_PREFIX, '-internal', settings.SITE_DOMAIN,
url.split(request.get_host())[1])
context = {
'request': request.data,
'url': url,
}
msg = email.send(contact, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_request_email(msg, org_request, sender=sender)
def send_organisation_unlink_email_notification(unlinked_user,unlinked_by,organisation,request):
email = OrganisationUnlinkNotificationEmail()
context = {
'user': unlinked_user,
'unlinked_by': unlinked_by,
'organisation': organisation
}
all_ccs = []
if organisation.email:
cc_list = organisation.email
if cc_list:
all_ccs = [cc_list]
msg = email.send(unlinked_user.email,cc=all_ccs, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_email(msg, organisation, unlinked_user, sender=sender)
def send_organisation_request_accept_email_notification(org_request,organisation,request):
email = OrganisationRequestAcceptNotificationEmail()
context = {
'request': org_request
}
msg = email.send(org_request.requester.email, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_request_email(msg, org_request, sender=sender)
_log_org_email(msg, organisation, org_request.requester, sender=sender)
def send_org_access_group_request_accept_email_notification(org_request, request, recipient_list):
email = OrganisationAccessGroupRequestAcceptNotificationEmail()
url = request.build_absolute_uri('/internal/organisations/access/{}'.format(org_request.id))
if "-internal" not in url:
url = '-internal.{}'.format(settings.SITE_DOMAIN).join(url.split('.' + settings.SITE_DOMAIN))
context = {
'name': request.data.get('name'),
'abn': request.data.get('abn'),
'url': url,
}
msg = email.send(recipient_list, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_request_email(msg, org_request, sender=sender)
# commenting out because Organisation does not yet exist - only OrganisationRequest exists
#_log_org_email(msg, organisation, org_request.requester, sender=sender)
def send_organisation_request_decline_email_notification(org_request,request):
email = OrganisationRequestDeclineNotificationEmail()
context = {
'request': org_request
}
msg = email.send(org_request.requester.email, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
_log_org_request_email(msg, org_request, sender=sender)
#_log_org_email(msg, organisation, org_request.requester, sender=sender)
def send_organisation_address_updated_email_notification(address_updated_by,ledger_organisation,wc_organisation,request):
from commercialoperator.components.organisations.models import OrganisationContact
email = OrganisationAddressUpdatedNotificationEmail()
context = {
'address_updated_by': address_updated_by,
'organisation': ledger_organisation
}
for org_contact in OrganisationContact.objects.filter(user_role='organisation_admin',organisation=wc_organisation):
msg = email.send(org_contact.email, context=context)
sender = request.user if request else settings.DEFAULT_FROM_EMAIL
def _log_org_request_email(email_message, request, sender=None):
from commercialoperator.components.organisations.models import OrganisationRequestLogEntry
if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
# TODO this will log the plain text body, should we log the html instead
text = email_message.body
subject = email_message.subject
fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# the to email is normally a list
if isinstance(email_message.to, list):
to = ','.join(email_message.to)
else:
to = smart_text(email_message.to)
# we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
all_ccs = []
if email_message.cc:
all_ccs += list(email_message.cc)
if email_message.bcc:
all_ccs += list(email_message.bcc)
all_ccs = ','.join(all_ccs)
else:
text = smart_text(email_message)
subject = ''
to = request.requester.email
fromm = smart_text(sender) if sender else SYSTEM_NAME
all_ccs = ''
customer = request.requester
staff = sender
kwargs = {
'subject': subject,
'text': text,
'request': request,
'customer': customer,
'staff': staff,
'to': to,
'fromm': fromm,
'cc': all_ccs
}
email_entry = OrganisationRequestLogEntry.objects.create(**kwargs)
return email_entry
def _log_org_email(email_message, organisation, customer ,sender=None):
from commercialoperator.components.organisations.models import OrganisationLogEntry
if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)):
# TODO this will log the plain text body, should we log the html instead
text = email_message.body
subject = email_message.subject
fromm = smart_text(sender) if sender else smart_text(email_message.from_email)
# the to email is normally a list
if isinstance(email_message.to, list):
to = ','.join(email_message.to)
else:
to = smart_text(email_message.to)
# we log the cc and bcc in the same cc field of the log entry as a ',' comma separated string
all_ccs = []
if email_message.cc:
all_ccs += list(email_message.cc)
if email_message.bcc:
all_ccs += list(email_message.bcc)
all_ccs = ','.join(all_ccs)
else:
text = smart_text(email_message)
subject = ''
to = request.requester.email
fromm = smart_text(sender) if sender else SYSTEM_NAME
all_ccs = ''
customer = customer
staff = sender
kwargs = {
'subject': subject,
'text': text,
'organisation': organisation,
'customer': customer,
'staff': staff,
'to': to,
'fromm': fromm,
'cc': all_ccs
}
email_entry = OrganisationLogEntry.objects.create(**kwargs)
return email_entry
| [
"asi@dpaw.wa.gov.au"
] | asi@dpaw.wa.gov.au |
6bf9cdfd3d12f9b6b70c53f90d4cc51c17ecdebb | b83bd5b9b0403223e6a1fbd370dbe74091835534 | /annealing_simulations/FlexWilliams7TL_DF2003_2019ffb_1024xC16_anneal_340-240-340K_100ns/HPC_input_gen_annealing.py | 7a723731800776ee3063b05f5f5596ef4687738d | [] | no_license | eboek/AlkaneStudy.Gromacs | e7403c2de9a16c7df3f7927952c773af8748346c | 88ac8d2248a8638f34ec5106bef0a549d99a3620 | refs/heads/master | 2020-09-03T16:49:12.200265 | 2019-11-01T14:40:29 | 2019-11-01T14:40:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,480 | py |
import sys, os
# Tell script directiory of HPC input gen module
gmxModDir = os.path.join(os.pardir, 'gromacs_hpc_input_gen')
sys.path.append(gmxModDir)
# Load default mdp dictionaries, simulation class
import default_mdp_dicts as mdp
from sim_class import SimGromacs, finalize_simulation
outputDir = os.getcwd()
currentCoords = '1024xC16-AA_6nsEq-FWL7-2019.gro'
hpcHeader = os.path.join(gmxModDir, 'MMM_header_2016-3.sh')
mdrunCmd = 'gerun mdrun_mpi'
# Strings to replace in shell header
pbsVars = {'ncpus': '96', 'walltime': '48:00:00', 'budgetname': 'QMUL_SMOUKOV'}
# Set force field parameters
mdpFF = mdp.FlexWilliamsLincs
mdpFF['vdwtype'] = 'User' # Use tabulated potential
mdpFF['energygrps'] = 'C H'
mdpFF['energygrp-table'] = 'C H H H'
# Params for annealing (start from NPT)
mdp_anneal = dict(mdp.NPT)
mdp_anneal['annealing'] = 'single'
mdp_anneal['annealing-npoints'] = '2'
mdp_anneal['annealing-time'] = '0 50000' # ps
mdp_anneal['nsteps'] = '50000000' # 50ns - 1fs time step!
mdp_anneal['nstxout-compressed'] = '50000' # 1 frame = 0.1K, 1000 frames each half
# Simulated annealing, decreasing T
shellFile = open(os.path.join(outputDir, 'run_gromacs_cooling.sh'), 'w', newline='\n')
# Write HPC header file (with job description)
for line in open(hpcHeader):
for var, rep in pbsVars.items():
line = line.replace(var, rep)
shellFile.write(line)
T_m = 290
anneal_temps = [T_m+50, T_m-50]
mdp_anneal['annealing-temp'] = ' '.join(map(str,anneal_temps))
newSim = SimGromacs([mdpFF, mdp_anneal], shellFile,
mdrun=mdrunCmd,
suffix='NPT_anneal_'+'-'.join(map(str,anneal_temps))+'K',
table='table.xvg',
indexFile='index.ndx',
coords=currentCoords)
currentCoords = newSim.coordsOut
finalize_simulation(newSim, shellFile, outputDir)
shellFile.close()
# Simulated annealing, increasing T
shellFile = open(os.path.join(outputDir, 'run_gromacs_heating.sh'), 'w', newline='\n')
# Write HPC header file (with job description)
for line in open(hpcHeader):
for var, rep in pbsVars.items():
line = line.replace(var, rep)
shellFile.write(line)
anneal_temps = [T_m-50, T_m+50]
mdp_anneal['annealing-temp'] = ' '.join(map(str,anneal_temps))
newSim = SimGromacs([mdpFF, mdp_anneal], shellFile,
mdrun=mdrunCmd,
suffix='NPT_anneal_'+'-'.join(map(str,anneal_temps))+'K',
table='table.xvg',
indexFile='index.ndx',
coords=currentCoords)
currentCoords = newSim.coordsOut
finalize_simulation(newSim, shellFile, outputDir)
shellFile.close()
| [
"stephen.burrows@hotmail.com"
] | stephen.burrows@hotmail.com |
11b133f2953ec9b1dd48a1bc46505c9513aa2dd5 | 4f399f904c1c751299cc60470ff675aba08d66c1 | /elice-algorithm/행성 조사.py | 183d9cce58f347ec45d18cbee5fff3eb9b98b331 | [] | no_license | rheehot/codingtest-2 | 677bf0ead976e1d9b66b36017a29057946316119 | cf9308f8445e5277dc91d06a93fa67d89074d6b1 | refs/heads/master | 2023-04-24T05:10:46.487001 | 2021-05-18T14:56:42 | 2021-05-18T14:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,065 | py | def solution(m,n,maps,a,b,c,d):
info = {'S' : 0, 'O' : 0, 'R' : 0}
for i in range((a-1), (c)):
for j in range((b-1), (d)):
info[maps[i][j]] += 1
return list(info.values())
if __name__ == '__main__':
m,n = map(int,input().split())
k = int(input())
maps = [input() for _ in range(m)]
for i in range(k):
a,b,c,d = list(map(int,input().split()))
results = solution(m,n,maps,a,b,c,d)
[print(result, end=' ') for result in results]
print()
# [구현] 행성 조사
# [실행시간] O((c-a+1)(d-b+1))
# [해법]
# - 문제조건대로, 입력을 받는다.
# - 땅(S),바다(O),강(R)을 키로 하는 딕셔너리를 만들고, 주어진 (a,b) ~ (c,d) 좌표를 돌면서
# - 딕셔너리 키에 해당하는 값을 카운팅하는 방식
# 서기 21xx년. 황폐해진 지구에서 더 살 수 없게 된 코더랜드 주민들은 새로 거주할 행성을 찾기 위해 우주선을 보냈습니다.
# 이 막중한 책임을 지고 떠난 엘리스 토끼 호의 함장 엘리스 토끼는 마침내 지구인들이 거주할 수 있는 행성을 찾았습니다.
# 이 행성은 육지, 바다, 강으로 이루어져 있습니다. 엘리스 토끼 호의 대원들은 행성을 조사하여 지도로 만든 뒤 지구로 데이터를 보냈습니다.
# 엘리스 토끼 호가 보내온 지도는 가로 N cm, 세로 M cm 직사각형 모양입니다. 지도는 1cm 크기의 정사각형으로 나누어져 있고, 각 구역의 지형이 알파벳으로 표시되어 있습니다.
# 지형은 육지, 바다, 강 중 하나이며, 육지는 S, 바다는 O, 강은 R로 표시되어 있습니다.
# 지구에 있는 과학자들은 행성을 좀 더 자세히 조사할 조사 영역을 K개 만들었습니다.
# 예를 들어, 아래와 같이 주어졌을 경우
# 4 4
# 1
# OOOO
# OSRO
# ORSO
# OOOO
# 1 1 3 3
# 조사 영역에 육지 2개, 바다 5개, 강 2개가 포함됩니다.
# 조사 영역이 주어졌을 때, 각 영역에 육지, 바다, 강이 각각 몇 개씩 있는지 구하는 프로그램을 작성하세요.
# [입력]
# 첫째 줄에 지도의 크기 M과 N을 입력합니다.
# (2 ≤ M, N ≤ 100)
# 둘째 줄에 과학자들이 만든 조사 대상 영역의 개수 K를 입력합니다.
# (1 ≤ K ≤ 500)
# 셋째 줄부터 M개 줄에는 엘리스 토끼 호가 보낸 지도의 내용을 입력합니다.
# 다음 K개 줄에는 조사 대상 영역의 정보를 입력합니다. 정보는 네 정수 a b c d로 이루어져 있습니다.
# 구역은 직사각형 모양이며, 왼쪽 위 모서리의 좌표가 (a, b) 오른쪽 아래 모서리의 좌표가 (c, d)입니다.
# [출력]
# 각 조사 대상 영역에 포함되어 있는 육지, 바다, 강의 수를 공백으로 구분해 한 줄에 한 정보씩 출력합니다.
# [입력 예시]
# 4 4
# 1
# OOOO
# OSRO
# ORSO
# OOOO
# 1 1 3 3
# [출력 예시]
# 2 5 2
# [문제출처] 엘리스 AI 트랙
| [
"youngminieo1005@gmail.com"
] | youngminieo1005@gmail.com |
7263a68d87f21b4ea91d391b6d4f9ed8b297e855 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Clutter/TextPrivate.py | b4fbac1eab470075b4b2e86eeac89087a5a7d5ff | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,373 | py | # encoding: utf-8
# module gi.repository.Clutter
# from /usr/lib64/girepository-1.0/Clutter-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Atk as __gi_repository_Atk
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class TextPrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(TextPrivate), '__module__': 'gi.repository.Clutter', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'TextPrivate' objects>, '__weakref__': <attribute '__weakref__' of 'TextPrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(TextPrivate)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
e4379a99b14799daf5bc0222f50f423a0529080b | f9be9d0ff9e92eace1f983b9d526e0a72760d1a4 | /vending_machine.py | 534c26bde7179c6a8ad64cd9946daf860ad43901 | [] | no_license | jtuck15/test-driven-development-python | 71fa171e7f30199967f3521922710f4f5a817fb0 | 784fc19504ec0e037162e9122fafcd43fc4b15fa | refs/heads/master | 2020-04-23T16:48:49.798156 | 2019-02-19T22:24:56 | 2019-02-19T22:24:56 | 171,310,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | from byotest import *
usd_coins = [100, 50, 25, 10, 5, 1]
euro_coins = [100, 50, 20, 10, 5, 2, 1]
def get_change(amount, coins=euro_coins):
change = []
for coin in coins:
while coin <= amount:
amount -= coin
change.append(coin)
return change
test_are_equal(get_change(0), [])
test_are_equal(get_change(1), [1])
test_are_equal(get_change(2), [2])
test_are_equal(get_change(5), [5])
test_are_equal(get_change(10), [10])
test_are_equal(get_change(20), [20])
test_are_equal(get_change(50), [50])
test_are_equal(get_change(100), [100])
test_are_equal(get_change(3), [2,1])
test_are_equal(get_change(7), [5,2])
test_are_equal(get_change(9), [5,2,2])
test_are_equal(get_change(35, usd_coins), [25,10])
print("All tests pass!") | [
"jim.tuck15@gmail.com"
] | jim.tuck15@gmail.com |
d494097bbf187d0379fca24dc86a72af79decb28 | 7fa71198fcd80d78c55090616a3929f88bd749f7 | /Spider/you-get_download_video.py | 4836ee2d2b07bfb6b2685d48857832091510333f | [] | no_license | py503/win10_pycharm_project | 97a7ee99dae7189e0e14ade232bcf18ebda1ba10 | 5a12e56bb72b22231c72c3b968278d28977f94fe | refs/heads/master | 2020-08-18T19:50:46.866409 | 2019-10-25T07:36:25 | 2019-10-25T07:36:25 | 215,827,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import os
path = r"C:\\Users\\Administrator\\Desktop\\youtube\\"
# 使用you-get 下载网络视频
def download(url):
cmd = 'you-get -o {} {}'.format(path, url)
print(cmd)
os.system(cmd)
print("下载完成")
if __name__ == '__main__':
# url = input("请输入你要下截视频的url: ")
url = "https://v.youku.com/v_show/id_XNDI0NjcxMzYwOA==.html?spm=a2h0k.11417342.soresults.dposter"
download(url) | [
"172409222@qq.com"
] | 172409222@qq.com |
f44499d267dd8e234c6c753a888ab64ee817e509 | d63c4b9e05638d6abb68333edf43936134b97570 | /tests/core/models/test_template.py | 981933fcd9a8e20f4d54ca7c320469541ac33f2a | [
"Apache-2.0",
"Python-2.0"
] | permissive | SwissDataScienceCenter/renku-python | 316dc83646e9014803dff268438d34e844ba0b54 | e0ff587f507d049eeeb873e8488ba8bb10ac1a15 | refs/heads/develop | 2023-08-31T20:33:09.342385 | 2023-08-24T08:15:46 | 2023-08-24T08:15:46 | 100,947,017 | 30 | 25 | Apache-2.0 | 2023-09-12T21:52:34 | 2017-08-21T11:49:21 | Python | UTF-8 | Python | false | false | 9,532 | py | #
# Copyright 2019-2023 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Template tests."""
import textwrap
import pytest
from renku.core import errors
from renku.core.util.metadata import read_renku_version_from_dockerfile
from renku.domain_model.template import TemplateMetadata, TemplateParameter, TemplatesManifest
TEMPLATE_METADATA = {"__name__": "my-project", "__project_description__": "My Project", "__renku_version__": "42.0.0"}
def test_template_get_files(source_template):
"""Test get files of a template."""
files = set(source_template.get_files())
assert {
".gitignore",
".renku/renku.ini",
"Dockerfile",
"README.md",
"{{ __name__ }}.dummy",
"requirements.txt",
"immutable.file",
} == files
def test_template_render(source_template):
"""Test rendering a template."""
rendered_template = source_template.render(metadata=TemplateMetadata.from_dict(TEMPLATE_METADATA))
assert "A Renku project: My Project\n" == (rendered_template.path / "README.md").read_text()
assert "42.0.0" == str(read_renku_version_from_dockerfile(rendered_template.path / "Dockerfile"))
@pytest.mark.parametrize("name", ["", "a-renku-project"])
def test_template_render_with_templated_filename(source_template, name):
"""Test rendering a template with templated filenames."""
rendered_template = source_template.render(metadata=TemplateMetadata.from_dict({"__name__": name}))
assert (rendered_template.path / f"{name}.dummy").exists()
def test_template_get_rendered_files(source_template):
"""Test get files of a rendered template."""
rendered_template = source_template.render(metadata=TemplateMetadata.from_dict(TEMPLATE_METADATA))
assert {
".gitignore",
".renku/renku.ini",
"Dockerfile",
"README.md",
"my-project.dummy",
"requirements.txt",
"immutable.file",
} == set(rendered_template.get_files())
def test_templates_manifest():
"""Test creating a template manifest."""
manifest = TemplatesManifest.from_string(
textwrap.dedent(
"""
- folder: python
name: Python Project
description: A Python-based Renku project
variables: {}
icon: python.png
- id: R
aliases: ["R-minimal", "R-base"]
name: R Project
description: An R-based Renku project
variables:
rate:
type: number
description: sample rate
icon: R.png
"""
)
)
assert 2 == len(manifest.templates)
template = next(t for t in manifest.templates if t.id == "python")
assert [] == template.aliases
assert "Python Project" == template.name
assert "A Python-based Renku project" == template.description
assert "python.png" == template.icon
assert [] == template.parameters
template = next(t for t in manifest.templates if t.id == "R")
assert ["R-minimal", "R-base"] == template.aliases
assert "R Project" == template.name
assert "An R-based Renku project" == template.description
assert "R.png" == template.icon
assert 1 == len(template.parameters)
assert "rate" == template.parameters[0].name
assert "number" == template.parameters[0].type
assert "sample rate" == template.parameters[0].description
def test_templates_manifest_non_existing_file():
"""Test creating a template manifest form non-existing file."""
with pytest.raises(errors.InvalidTemplateError, match="There is no manifest file 'non-existing-path'"):
TemplatesManifest.from_path("non-existing-path")
def test_templates_manifest_binary_content(tmp_path):
"""Test creating a template manifest form non-text file."""
path = tmp_path / "manifest.yaml"
path.write_bytes(b"\x80") # NOTE: Write an invalid unicode sequence
with pytest.raises(errors.InvalidTemplateError, match="Cannot read manifest file.*manifest.yaml"):
TemplatesManifest.from_path(path)
def test_templates_manifest_invalid_yaml(tmp_path):
"""Test creating a template manifest form invalid YAML content."""
with pytest.raises(errors.InvalidTemplateError, match="Cannot parse manifest file"):
TemplatesManifest.from_string("- id: python\nid")
@pytest.mark.parametrize(
"content, message",
[
("", "Cannot find any valid template in manifest file"),
("id: python", "Invalid manifest content type: 'dict'"),
("-\n - id: python", "Invalid template type: 'list'"),
("- no-id: python", "Template doesn't have an id:"),
("- id: python\n variables: p1", "Invalid template variable type on template 'python': 'str'"),
("- id: python\n variables:\n p1: 42", "Invalid parameter type 'int' for 'p1'"),
("- id: python\n name: Python\n aliases: [R]\n- id: R\n name: R\n", "Found duplicate IDs or aliases: 'R'"),
],
)
def test_templates_manifest_invalid_content(tmp_path, content, message):
"""Test creating a template manifest form invalid content."""
with pytest.raises(errors.InvalidTemplateError, match=message):
TemplatesManifest.from_string(content)
def test_templates_manifest_warnings(tmp_path):
"""Test creating a template manifest form invalid content."""
content = "- folder: python\n name: python\n variables:\n p1: My parameter"
manifest = TemplatesManifest.from_string(content, skip_validation=True)
warnings = manifest.validate()
assert "Template 'python' should use 'id' attribute instead of 'folder'." in warnings
assert (
"Template 'python' variable 'p1' uses old string format in manifest and should be replaced"
" with the nested dictionary format."
) in warnings
@pytest.mark.parametrize("default, has_default", [(None, False), (42, True), ("", True), (False, True)])
def test_template_parameter_default_value(default, has_default):
"""Test parameter has not default only if default is None."""
parameter = TemplateParameter(name="parameter", description="", type="", default=default, possible_values=None)
parameter.validate()
assert default == parameter.default
assert parameter.has_default is has_default
@pytest.mark.parametrize(
"value, message",
[
({"type": "int"}, "Template contains variable .* of type 'int' which is not supported"),
({"possible_values": "42"}, "Invalid type for possible values of template variable"),
({"type": "enum"}, "Template variable 'parameter' of type enum does not provide a corresponding enum list"),
({"type": "number", "default_value": "true"}, "Invalid default value for 'parameter':"),
],
)
def test_template_parameter_validation(value, message):
"""Test TemplateVariable validations."""
with pytest.raises(errors.InvalidTemplateError, match=message):
parameter = TemplateParameter.from_dict(name="parameter", value=value)
parameter.validate()
@pytest.mark.parametrize(
"type, possible_values, value, expected_value, expected_type",
[
(None, None, "truE", "truE", str),
(None, None, True, True, bool),
(None, None, 42, 42, int),
(None, None, None, None, type(None)),
("boolean", None, "true", True, bool),
("boolean", None, True, True, bool),
("boolean", None, "False", False, bool),
("number", None, 42, 42, int),
("number", None, "42", 42, int),
("number", None, "42.0", 42, float),
("string", None, "", "", str),
("string", None, "some value", "some value", str),
("enum", ["1", "2", "3"], "2", "2", str),
],
)
def test_template_parameter_value_conversion(type, possible_values, value, expected_value, expected_type):
"""Test TemplateVariable conversion."""
parameter = TemplateParameter.from_dict(name="parameter", value={"type": type, "possible_values": possible_values})
converted_value = parameter.convert(value)
assert expected_value == converted_value
assert expected_type == converted_value.__class__
@pytest.mark.parametrize(
"type, possible_values, value",
[
("boolean", None, "TRUE"),
("boolean", None, 42),
("boolean", None, ""),
("number", None, "42.0f"),
("string", None, 42),
("enum", ["1", "2", "3"], "42"),
],
)
def test_template_parameter_value_conversion_error(type, possible_values, value):
"""Test TemplateVariable conversion with invalid values."""
parameter = TemplateParameter.from_dict(name="parameter", value={"type": type, "possible_values": possible_values})
with pytest.raises(ValueError, match=f"Invalid value '{value}.*' for template variable 'parameter.*'"):
parameter.convert(value)
| [
"noreply@github.com"
] | SwissDataScienceCenter.noreply@github.com |
1e3af897399b49e9505ad1e9fd475ae609f63e29 | 29fd1454d19dc5682f751cff58e85023fe1a311d | /py/problems/datachallenge/tests/util.py | c2e8866bc24e6521d16742f8bc05977ce6be5f2d | [] | no_license | rags/playground | 5513c5fac66900e54a6d9f319defde7f6b34be5e | 826ad4806cacffe1b3eed4b43a2541e273e8c96f | refs/heads/master | 2023-02-03T04:30:46.392606 | 2023-01-25T13:45:42 | 2023-01-25T13:45:42 | 812,057 | 0 | 6 | null | 2022-11-24T04:54:25 | 2010-08-02T10:39:19 | Python | UTF-8 | Python | false | false | 859 | py | import sys
import contextlib
#comment this like uncomment next for python 2.7.x
from io import StringIO as SOut #3.x
#from StringIO import StringIO as SOut #2.7.x
from collections import Iterable
import re
@contextlib.contextmanager
def mock_console_io(input_str):
oldout, oldin = sys.stdout, sys.stdin
try:
(sys.stdout, sys.stdin) = out = [SOut(), SOut(input_str)]
yield out
finally:
sys.stdout, sys.stdin = oldout, oldin
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def replace(txt, frm, to):
def rep(s):
return re.sub(frm, to, s)
if isinstance(txt, str):
return rep(txt)
elif isinstance(txt, Iterable):
return list(map(rep, txt))
def tabify(txt):
return replace(txt, ' +', '\t')
def untabify(txt):
return replace(txt, '\t', ' ')
| [
"r.raghunandan@gmail.com"
] | r.raghunandan@gmail.com |
96d5c25adfdc1afb3dd294bdc4c627890827a2eb | fea771620aa3e6702ff7f2ca7d39c00faeeec6dd | /Python/5-starter-files/solution.py | 519055f1806c57ce233aa6b0a5324034f4aa8367 | [] | no_license | edmond-chu/OldStuff | c863e16333da8188e489bf3ff84c850aed2840e8 | d8ae7a74fdbebe47e22ee3fd16902c55302aeac9 | refs/heads/main | 2023-05-29T07:18:40.221968 | 2021-06-10T22:16:41 | 2021-06-10T22:16:41 | 375,829,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # Given a sorted array A of size n (with n at least 1) where A is uniquely
# filled with integers in the range 1 to n + 1 such that there is one “missing
# integer” (the “missing integer” is defined as the integer in the
# range 1 to n + 1 that is not in A), return that “missing integer”.
# Return -1 if the input is None or an empty list.
# Notes:
# * `nums` is a list of integers (e.g. [1,2,4])
# * findMissing will initially be called with
# `start = 0` and `end = len(nums) - 1`
# e.g. findMissing([1,2,3,5], 0, 3)
def findMissing(nums, start, end) -> int:
| [
"echu12@umd.edu"
] | echu12@umd.edu |
180427ed2a72ccf7a27005623a6bfcd708d778aa | 7e31d4d5d273c3812c5a9670e53d9bcc94730175 | /EBS/ouforms/models.py | 88ac60c4b9742d5fb15b93fe9c05f5606deea240 | [] | no_license | JChen6592/SE_ElectronicBusinessSystem | c654a35048317d95cb20e4ef5fc4b0517e61d196 | ad2a0f06f1028a326dc1542a7c22bb851277023d | refs/heads/master | 2020-05-23T12:43:51.555580 | 2019-05-16T01:18:59 | 2019-05-16T01:18:59 | 186,763,571 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | from django.db import models
from django.utils import timezone
# Create your models here.
applying = (
('OU', 'Apply for Ordinary User'),
)
class OUForm(models.Model):
subject = models.CharField("Subject", max_length = 100, choices = applying)
first_name = models.CharField(default='', max_length=20)
last_name = models.CharField(default='', max_length=20)
address = models.CharField(default='', max_length=50)
email = models.CharField(default='', max_length=50)
date_submitted = models.DateTimeField(default=timezone.now)
status = models.IntegerField("Status",default = 0)
author = models.CharField("Author username", max_length = 100, unique = True)
| [
"PJ@Johns-MacBook-Pro.local"
] | PJ@Johns-MacBook-Pro.local |
7c078ebae1b3d841abcf5d8b430fd8f19475b58d | 5e46a4506e002db6d62e4a6ab38c7af1fa5c0eb9 | /hw1/code/5/test.numpyExtend.py | 51382fb73d66e336a1bcc43096a03fa21b6dd1be | [] | no_license | king1224/Cryptography-and-Network-Security | 050ff51033290674f301d87a8ec0d8db18261f4a | f51b01b65a48f3a7e0d743d2d3cf2a68c1c18f52 | refs/heads/master | 2020-05-05T10:14:01.216079 | 2019-04-07T13:24:18 | 2019-04-07T13:24:18 | 178,863,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | #!/usr/bin/python
from numpyExtend import getNonZeroInRow;
from numpyExtend import getNonZeroInColumn;
from numpyExtend import changeColumn;
from numpyExtend import changeRow;
import numpy as np;
def returnDefaultList():
return [
[1, 1, 0 , 0],
[1, 0, 1 , 0],
[1, 0, 0 , 1],
[0, 1, 1 , 0],
[0, 1, 0 , 1],
[0, 0, 1 , 1]
];
def returnDefaultMatrix():
return np.matrix(returnDefaultList());
print 'get NonZero in Column: ';
matrix = returnDefaultMatrix();
print '\t', getNonZeroInColumn(matrix, 0) == [0, 1, 2];
print '\t', getNonZeroInColumn(matrix, 1) == [0, 3, 4];
print '\t', getNonZeroInColumn(matrix, 2) == [1, 3, 5];
print '\t', getNonZeroInColumn(matrix, 3) == [2, 4, 5];
print 'get NonZero in Row: ';
matrix = returnDefaultMatrix();
print '\t', getNonZeroInRow(matrix, 0) == [0, 1];
print '\t', getNonZeroInRow(matrix, 1) == [0, 2];
print '\t', getNonZeroInRow(matrix, 2) == [0, 3];
print '\t', getNonZeroInRow(matrix, 3) == [1, 2];
print '\t', getNonZeroInRow(matrix, 4) == [1, 3];
print '\t', getNonZeroInRow(matrix, 5) == [2, 3];
print 'change two Columns: ';
matrix = returnDefaultMatrix();
matrix = changeColumn(matrix, 1, 2);
print '\t', matrix[0, 1] == 0 and matrix[0, 2] == 1;
print '\t', matrix[1, 1] == 1 and matrix[1, 2] == 0;
print '\t', matrix[2, 1] == 0 and matrix[2, 2] == 0;
print '\t', matrix[3, 1] == 1 and matrix[3, 2] == 1;
print '\t', matrix[4, 1] == 0 and matrix[4, 2] == 1;
print '\t', matrix[5, 1] == 1 and matrix[5, 2] == 0;
print 'change two rows: ';
matrix = returnDefaultMatrix();
matrix = changeRow(matrix, 1, 2);
print '\t', matrix[1, 0] == 1 and matrix[2, 0] == 1;
print '\t', matrix[1, 1] == 0 and matrix[2, 1] == 0;
print '\t', matrix[1, 2] == 0 and matrix[2, 2] == 1;
print '\t', matrix[1, 3] == 1 and matrix[2, 3] == 0;
print 'remove one row: ';
matrix = returnDefaultMatrix();
matrix = np.delete(matrix, 2,0);
print '\t', matrix.shape == (5,4);
| [
"a0987856762@gmail.com"
] | a0987856762@gmail.com |
87db130e21a172d48ce24cd1480dd27f518ba1f0 | 8313b823a755694cfd71e57ad63760ba1c7009d4 | /Classification/kernal_SVM.py | adcd73f8c99e84b0ddc56f69991b888dba8e9c20 | [] | no_license | KRBhavaniSankar/Machine-Learning | 49063374a8b243563212cf52a933da03b41bb576 | 339f146362aa5960794d8ddcef50d502955c24c4 | refs/heads/master | 2021-06-07T17:09:57.259971 | 2020-02-18T13:40:03 | 2020-02-18T13:40:03 | 143,809,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | # Kernal-SVM Classification
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel="rbf",random_state=0)
classifier.fit(X_train,y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernal-SVM Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernal-SVM Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | [
"krbhavanisankar@gmail.com"
] | krbhavanisankar@gmail.com |
97b6760b3c49af66653a8bbdce4d15bc99b5d52c | cd28af2edfe92b20a8d438eccd1be4dca82926fb | /Servo.py | 86de271770ca242cacd6cf15dc60f93b09f9f702 | [] | no_license | DatPham123/MyApplication | ccdb30203c434d0bde87568775009fe5291eeeb8 | f5b7aa8548e26a85dc882e29d3978af1b0207968 | refs/heads/master | 2020-04-23T18:07:17.505310 | 2019-04-23T01:12:47 | 2019-04-23T01:12:47 | 171,355,425 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | import RPi.GPIO as GPIO
import time
control = [5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10]
servo = 11
GPIO.setmode(GPIO.BOARD)
GPIO.setup(servo, GPIO.OUT)
p= GPIO.PWM(servo, 50) #50hz frequency = 20 ms
p.start(2.5) #starting duty at 0 degree
try:
while True:
for x in range(11):
p.ChangeDutyCycle(control[x])
time.sleep(0.03)
print(x)
for x in range(9,0,-1):
p.ChangeDutyCycle(control[x])
time.sleep(0.03)
print (x)
except KeyboardInterrupt:
GPIO.cleanup()
| [
"noreply@github.com"
] | DatPham123.noreply@github.com |
5feaa7de4cb28d27aa5cf50cc0daa4d89a2fed56 | a590cb0c9b232ad98d17a9917a36930c6a2c03f8 | /8kyu/8kyu interpreters HQ9.py | b0236dc360274759dbaabb6e495a1fd40a998e01 | [] | no_license | AbbyGeek/CodeWars | 6e10c10cbdb11f2df17a657d11ff5ffa79a5fb0b | 64dddda9f2a14a0592cc946b35302c4bd9bc569e | refs/heads/master | 2020-12-21T00:14:53.665879 | 2020-01-26T01:16:41 | 2020-01-26T01:16:41 | 236,252,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,148 | py | def HQ9(code):
if "9" in code:
return "99 bottles of beer on the wall, 99 bottles of beer.\nTake one down and pass it around, 98 bottles of beer on the wall.\n98 bottles of beer on the wall, 98 bottles of beer.\nTake one down and pass it around, 97 bottles of beer on the wall.\n97 bottles of beer on the wall, 97 bottles of beer.\nTake one down and pass it around, 96 bottles of beer on the wall.\n96 bottles of beer on the wall, 96 bottles of beer.\nTake one down and pass it around, 95 bottles of beer on the wall.\n95 bottles of beer on the wall, 95 bottles of beer.\nTake one down and pass it around, 94 bottles of beer on the wall.\n94 bottles of beer on the wall, 94 bottles of beer.\nTake one down and pass it around, 93 bottles of beer on the wall.\n93 bottles of beer on the wall, 93 bottles of beer.\nTake one down and pass it around, 92 bottles of beer on the wall.\n92 bottles of beer on the wall, 92 bottles of beer.\nTake one down and pass it around, 91 bottles of beer on the wall.\n91 bottles of beer on the wall, 91 bottles of beer.\nTake one down and pass it around, 90 bottles of beer on the wall.\n90 bottles of beer on the wall, 90 bottles of beer.\nTake one down and pass it around, 89 bottles of beer on the wall.\n89 bottles of beer on the wall, 89 bottles of beer.\nTake one down and pass it around, 88 bottles of beer on the wall.\n88 bottles of beer on the wall, 88 bottles of beer.\nTake one down and pass it around, 87 bottles of beer on the wall.\n87 bottles of beer on the wall, 87 bottles of beer.\nTake one down and pass it around, 86 bottles of beer on the wall.\n86 bottles of beer on the wall, 86 bottles of beer.\nTake one down and pass it around, 85 bottles of beer on the wall.\n85 bottles of beer on the wall, 85 bottles of beer.\nTake one down and pass it around, 84 bottles of beer on the wall.\n84 bottles of beer on the wall, 84 bottles of beer.\nTake one down and pass it around, 83 bottles of beer on the wall.\n83 bottles of beer on the wall, 83 bottles of beer.\nTake one down and pass it around, 82 bottles of beer on the wall.\n82 bottles of beer on the wall, 82 bottles of beer.\nTake one down and pass it around, 81 bottles of beer on the wall.\n81 bottles of beer on the wall, 81 bottles of beer.\nTake one down and pass it around, 80 bottles of beer on the wall.\n80 bottles of beer on the wall, 80 bottles of beer.\nTake one down and pass it around, 79 bottles of beer on the wall.\n79 bottles of beer on the wall, 79 bottles of beer.\nTake one down and pass it around, 78 bottles of beer on the wall.\n78 bottles of beer on the wall, 78 bottles of beer.\nTake one down and pass it around, 77 bottles of beer on the wall.\n77 bottles of beer on the wall, 77 bottles of beer.\nTake one down and pass it around, 76 bottles of beer on the wall.\n76 bottles of beer on the wall, 76 bottles of beer.\nTake one down and pass it around, 75 bottles of beer on the wall.\n75 bottles of beer on the wall, 75 bottles of beer.\nTake one down and pass it around, 74 bottles of beer on the wall.\n74 bottles of beer on the wall, 74 bottles of beer.\nTake one down and pass it around, 73 bottles of beer on the wall.\n73 bottles of beer on the wall, 73 bottles of beer.\nTake one down and pass it around, 72 bottles of beer on the wall.\n72 bottles of beer on the wall, 72 bottles of beer.\nTake one down and pass it around, 71 bottles of beer on the wall.\n71 bottles of beer on the wall, 71 bottles of beer.\nTake one down and pass it around, 70 bottles of beer on the wall.\n70 bottles of beer on the wall, 70 bottles of beer.\nTake one down and pass it around, 69 bottles of beer on the wall.\n69 bottles of beer on the wall, 69 bottles of beer.\nTake one down and pass it around, 68 bottles of beer on the wall.\n68 bottles of beer on the wall, 68 bottles of beer.\nTake one down and pass it around, 67 bottles of beer on the wall.\n67 bottles of beer on the wall, 67 bottles of beer.\nTake one down and pass it around, 66 bottles of beer on the wall.\n66 bottles of beer on the wall, 66 bottles of beer.\nTake one down and pass it around, 65 bottles of beer on the wall.\n65 bottles of beer on the wall, 65 bottles of beer.\nTake one down and pass it around, 64 bottles of beer on the wall.\n64 bottles of beer on the wall, 64 bottles of beer.\nTake one down and pass it around, 63 bottles of beer on the wall.\n63 bottles of beer on the wall, 63 bottles of beer.\nTake one down and pass it around, 62 bottles of beer on the wall.\n62 bottles of beer on the wall, 62 bottles of beer.\nTake one down and pass it around, 61 bottles of beer on the wall.\n61 bottles of beer on the wall, 61 bottles of beer.\nTake one down and pass it around, 60 bottles of beer on the wall.\n60 bottles of beer on the wall, 60 bottles of beer.\nTake one down and pass it around, 59 bottles of beer on the wall.\n59 bottles of beer on the wall, 59 bottles of beer.\nTake one down and pass it around, 58 bottles of beer on the wall.\n58 bottles of beer on the wall, 58 bottles of beer.\nTake one down and pass it around, 57 bottles of beer on the wall.\n57 bottles of beer on the wall, 57 bottles of beer.\nTake one down and pass it around, 56 bottles of beer on the wall.\n56 bottles of beer on the wall, 56 bottles of beer.\nTake one down and pass it around, 55 bottles of beer on the wall.\n55 bottles of beer on the wall, 55 bottles of beer.\nTake one down and pass it around, 54 bottles of beer on the wall.\n54 bottles of beer on the wall, 54 bottles of beer.\nTake one down and pass it around, 53 bottles of beer on the wall.\n53 bottles of beer on the wall, 53 bottles of beer.\nTake one down and pass it around, 52 bottles of beer on the wall.\n52 bottles of beer on the wall, 52 bottles of beer.\nTake one down and pass it around, 51 bottles of beer on the wall.\n51 bottles of beer on the wall, 51 bottles of beer.\nTake one down and pass it around, 50 bottles of beer on the wall.\n50 bottles of beer on the wall, 50 bottles of beer.\nTake one down and pass it around, 49 bottles of beer on the wall.\n49 bottles of beer on the wall, 49 bottles of beer.\nTake one down and pass it around, 48 bottles of beer on the wall.\n48 bottles of beer on the wall, 48 bottles of beer.\nTake one down and pass it around, 47 bottles of beer on the wall.\n47 bottles of beer on the wall, 47 bottles of beer.\nTake one down and pass it around, 46 bottles of beer on the wall.\n46 bottles of beer on the wall, 46 bottles of beer.\nTake one down and pass it around, 45 bottles of beer on the wall.\n45 bottles of beer on the wall, 45 bottles of beer.\nTake one down and pass it around, 44 bottles of beer on the wall.\n44 bottles of beer on the wall, 44 bottles of beer.\nTake one down and pass it around, 43 bottles of beer on the wall.\n43 bottles of beer on the wall, 43 bottles of beer.\nTake one down and pass it around, 42 bottles of beer on the wall.\n42 bottles of beer on the wall, 42 bottles of beer.\nTake one down and pass it around, 41 bottles of beer on the wall.\n41 bottles of beer on the wall, 41 bottles of beer.\nTake one down and pass it around, 40 bottles of beer on the wall.\n40 bottles of beer on the wall, 40 bottles of beer.\nTake one down and pass it around, 39 bottles of beer on the wall.\n39 bottles of beer on the wall, 39 bottles of beer.\nTake one down and pass it around, 38 bottles of beer on the wall.\n38 bottles of beer on the wall, 38 bottles of beer.\nTake one down and pass it around, 37 bottles of beer on the wall.\n37 bottles of beer on the wall, 37 bottles of beer.\nTake one down and pass it around, 36 bottles of beer on the wall.\n36 bottles of beer on the wall, 36 bottles of beer.\nTake one down and pass it around, 35 bottles of beer on the wall.\n35 bottles of beer on the wall, 35 bottles of beer.\nTake one down and pass it around, 34 bottles of beer on the wall.\n34 bottles of beer on the wall, 34 bottles of beer.\nTake one down and pass it around, 33 bottles of beer on the wall.\n33 bottles of beer on the wall, 33 bottles of beer.\nTake one down and pass it around, 32 bottles of beer on the wall.\n32 bottles of beer on the wall, 32 bottles of beer.\nTake one down and pass it around, 31 bottles of beer on the wall.\n31 bottles of beer on the wall, 31 bottles of beer.\nTake one down and pass it around, 30 bottles of beer on the wall.\n30 bottles of beer on the wall, 30 bottles of beer.\nTake one down and pass it around, 29 bottles of beer on the wall.\n29 bottles of beer on the wall, 29 bottles of beer.\nTake one down and pass it around, 28 bottles of beer on the wall.\n28 bottles of beer on the wall, 28 bottles of beer.\nTake one down and pass it around, 27 bottles of beer on the wall.\n27 bottles of beer on the wall, 27 bottles of beer.\nTake one down and pass it around, 26 bottles of beer on the wall.\n26 bottles of beer on the wall, 26 bottles of beer.\nTake one down and pass it around, 25 bottles of beer on the wall.\n25 bottles of beer on the wall, 25 bottles of beer.\nTake one down and pass it around, 24 bottles of beer on the wall.\n24 bottles of beer on the wall, 24 bottles of beer.\nTake one down and pass it around, 23 bottles of beer on the wall.\n23 bottles of beer on the wall, 23 bottles of beer.\nTake one down and pass it around, 22 bottles of beer on the wall.\n22 bottles of beer on the wall, 22 bottles of beer.\nTake one down and pass it around, 21 bottles of beer on the wall.\n21 bottles of beer on the wall, 21 bottles of beer.\nTake one down and pass it around, 20 bottles of beer on the wall.\n20 bottles of beer on the wall, 20 bottles of beer.\nTake one down and pass it around, 19 bottles of beer on the wall.\n19 bottles of beer on the wall, 19 bottles of beer.\nTake one down and pass it around, 18 bottles of beer on the wall.\n18 bottles of beer on the wall, 18 bottles of beer.\nTake one down and pass it around, 17 bottles of beer on the wall.\n17 bottles of beer on the wall, 17 bottles of beer.\nTake one down and pass it around, 16 bottles of beer on the wall.\n16 bottles of beer on the wall, 16 bottles of beer.\nTake one down and pass it around, 15 bottles of beer on the wall.\n15 bottles of beer on the wall, 15 bottles of beer.\nTake one down and pass it around, 14 bottles of beer on the wall.\n14 bottles of beer on the wall, 14 bottles of beer.\nTake one down and pass it around, 13 bottles of beer on the wall.\n13 bottles of beer on the wall, 13 bottles of beer.\nTake one down and pass it around, 12 bottles of beer on the wall.\n12 bottles of beer on the wall, 12 bottles of beer.\nTake one down and pass it around, 11 bottles of beer on the wall.\n11 bottles of beer on the wall, 11 bottles of beer.\nTake one down and pass it around, 10 bottles of beer on the wall.\n10 bottles of beer on the wall, 10 bottles of beer.\nTake one down and pass it around, 9 bottles of beer on the wall.\n9 bottles of beer on the wall, 9 bottles of beer.\nTake one down and pass it around, 8 bottles of beer on the wall.\n8 bottles of beer on the wall, 8 bottles of beer.\nTake one down and pass it around, 7 bottles of beer on the wall.\n7 bottles of beer on the wall, 7 bottles of beer.\nTake one down and pass it around, 6 bottles of beer on the wall.\n6 bottles of beer on the wall, 6 bottles of beer.\nTake one down and pass it around, 5 bottles of beer on the wall.\n5 bottles of beer on the wall, 5 bottles of beer.\nTake one down and pass it around, 4 bottles of beer on the wall.\n4 bottles of beer on the wall, 4 bottles of beer.\nTake one down and pass it around, 3 bottles of beer on the wall.\n3 bottles of beer on the wall, 3 bottles of beer.\nTake one down and pass it around, 2 bottles of beer on the wall.\n2 bottles of beer on the wall, 2 bottles of beer.\nTake one down and pass it around, 1 bottle of beer on the wall.\n1 bottle of beer on the wall, 1 bottle of beer.\nTake one down and pass it around, no more bottles of beer on the wall.\nNo more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy some more, 99 bottles of beer on the wall."
if "H" in code:
return "Hello World!"
if "Q" in code:
return code
else: return None | [
"abbyrosewest@gmail.com"
] | abbyrosewest@gmail.com |
f2713f64b045adf1069395bfe4d9c24d90f7c5e2 | 93da9b43ae9b0ec648cdaf448a705821fb8f91b1 | /LostMobileReporter/lostupdater/migrations/0001_initial.py | 1db6b966da90c1092fd12afcbd24405ddeb4f223 | [] | no_license | JigarJoshi04/LostMobileReporter | f926e27eac39f8207fb78c3c4873a64ad12fffdc | 4506970ec4de9a1da8425301dffb388a62e531d0 | refs/heads/master | 2022-11-26T05:12:39.369837 | 2020-07-16T07:23:17 | 2020-07-16T07:23:17 | 280,083,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | # Generated by Django 2.2 on 2020-07-16 05:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ReportModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imei_number', models.CharField(max_length=150)),
('mobile_number', models.CharField(max_length=150)),
('mobile_company', models.CharField(max_length=100)),
('mobile_model', models.CharField(max_length=100)),
('email_id', models.CharField(max_length=100)),
],
),
]
| [
"jigar.pj@somaiya.edu"
] | jigar.pj@somaiya.edu |
adcc99a09b7467c4aa785baaf844f7c46206fc94 | d6b262ba14567b97a114c75f8aa358deb17740e4 | /chaos/provider.py | 9fb2d4c3abffc37f0d2bf6780ef91568dfe15255 | [] | no_license | WilixLead/script.elementum.chaos | 2d8f521a0122c5e1024bbf19798389ab91b112d5 | cb451cd4f666a07ddaa33f167051747e15af3ac5 | refs/heads/master | 2021-04-26T23:18:18.065411 | 2018-03-05T19:19:00 | 2018-03-05T19:19:00 | 123,966,797 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,393 | py | # -*- coding: utf-8 -*-
"""
Provider thread methods
"""
import os
import re
import json
import urllib
import xbmc
import xbmcaddon
from client import Client
from elementum.provider import log, get_setting, set_setting
from providers.definitions import definitions, longest
from utils import ADDON_PATH, get_int, clean_size, get_alias
def generate_payload(provider, generator, filtering, verify_name=True, verify_size=True):
""" Payload formatter to format results the way Elementum expects them
Args:
provider (str): Provider ID
generator (function): Generator method, can be either ``extract_torrents`` or ``extract_from_api``
filtering (Filtering): Filtering class instance
verify_name (bool): Whether to double-check the results' names match the query or not
verify_size (bool): Whether to check the results' file sizes
Returns:
list: Formatted results
"""
filtering.information(provider)
results = []
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
for name, info_hash, uri, size, seeds, peers in generator:
size = clean_size(size)
# uri, info_hash = clean_magnet(uri, info_hash)
v_name = name if verify_name else filtering.title
v_size = size if verify_size else None
if filtering.verify(provider, v_name, v_size):
results.append({"name": name,
"uri": uri,
"info_hash": info_hash,
"size": size,
"seeds": get_int(seeds),
"peers": get_int(peers),
"language": definition["language"] if 'language' in definition else 'en',
"provider": '[COLOR %s]%s[/COLOR]' % (definition['color'], definition['name']),
"icon": os.path.join(ADDON_PATH, 'chaos', 'providers', 'icons', '%s.png' % provider),
})
else:
log.debug(filtering.reason.encode('utf-8'))
log.debug('>>>>>> %s would send %d torrents to Elementum <<<<<<<' % (provider, len(results)))
return results
def process(provider, generator, filtering, has_special, verify_name=True, verify_size=True):
""" Method for processing provider results using its generator and Filtering class instance
Args:
provider (str): Provider ID
generator (function): Generator method, can be either ``extract_torrents`` or ``extract_from_api``
filtering (Filtering): Filtering class instance
has_special (bool): Whether title contains special chars
verify_name (bool): Whether to double-check the results' names match the query or not
verify_size (bool): Whether to check the results' file sizes
"""
log.debug("execute_process for %s with %s" % (provider, repr(generator)))
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
client = Client()
token = None
logged_in = False
token_auth = False
if get_setting("use_cloudhole", bool):
client.clearance = get_setting('clearance')
client.user_agent = get_setting('user_agent')
if get_setting('kodi_language', bool):
kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
if kodi_language:
filtering.kodi_language = kodi_language
language_exceptions = get_setting('language_exceptions')
if language_exceptions.strip().lower():
filtering.language_exceptions = re.split(r',\s?', language_exceptions)
log.debug("[%s] Queries: %s" % (provider, filtering.queries))
log.debug("[%s] Extras: %s" % (provider, filtering.extras))
for query, extra in zip(filtering.queries, filtering.extras):
log.debug("[%s] Before keywords - Query: %s - Extra: %s" % (provider, repr(query), repr(extra)))
if has_special:
# Removing quotes, surrounding {title*} keywords, when title contains special chars
query = re.sub("[\"']({title.*?})[\"']", '\\1', query)
query = filtering.process_keywords(provider, query)
extra = filtering.process_keywords(provider, extra)
if 'charset' in definition and 'utf' not in definition['charset'].lower():
try:
query = urllib.quote(query.encode(definition['charset']))
extra = urllib.quote(extra.encode(definition['charset']))
except:
pass
log.debug("[%s] After keywords - Query: %s - Extra: %s" % (provider, repr(query), repr(extra)))
if not query:
return filtering.results
url_search = filtering.url.replace('QUERY', query)
if extra:
url_search = url_search.replace('EXTRA', extra)
else:
url_search = url_search.replace('EXTRA', '')
url_search = url_search.replace(' ', definition['separator'])
# MagnetDL fix...
url_search = url_search.replace('FIRSTLETTER', query[:1])
# Creating the payload for POST method
payload = dict()
for key, value in filtering.post_data.iteritems():
if 'QUERY' in value:
payload[key] = filtering.post_data[key].replace('QUERY', query)
else:
payload[key] = filtering.post_data[key]
# Creating the payload for GET method
data = None
if filtering.get_data:
data = dict()
for key, value in filtering.get_data.iteritems():
if 'QUERY' in value:
data[key] = filtering.get_data[key].replace('QUERY', query)
else:
data[key] = filtering.get_data[key]
log.debug("- %s query: %s" % (provider, repr(query)))
log.debug("-- %s url_search before token: %s" % (provider, repr(url_search)))
log.debug("--- %s using POST payload: %s" % (provider, repr(payload)))
log.debug("----%s filtering with post_data: %s" % (provider, repr(filtering.post_data)))
# Set search's "title" in filtering to double-check results' names
if 'filter_title' in definition and definition['filter_title']:
filtering.filter_title = True
filtering.title = query
if token:
log.info('[%s] Reusing existing token' % provider)
url_search = url_search.replace('TOKEN', token)
elif 'token' in definition:
token_url = definition['base_url'] + definition['token']
log.debug("Getting token for %s at %s" % (provider, repr(token_url)))
client.open(token_url.encode('utf-8'))
try:
token_data = json.loads(client.content)
except:
log.error('%s: Failed to get token for %s' % (provider, repr(url_search)))
return filtering.results
log.debug("Token response for %s: %s" % (provider, repr(token_data)))
if 'token' in token_data:
token = token_data['token']
log.debug("Got token for %s: %s" % (provider, repr(token)))
url_search = url_search.replace('TOKEN', token)
else:
log.warning('%s: Unable to get token for %s' % (provider, repr(url_search)))
if logged_in:
log.info("[%s] Reusing previous login" % provider)
elif token_auth:
log.info("[%s] Reusing previous token authorization" % provider)
elif 'private' in definition and definition['private']:
username = get_setting('%s_username' % provider)
password = get_setting('%s_password' % provider)
passkey = get_setting('%s_passkey' % provider)
if not username and not password and not passkey:
for addon_name in ('script.magnetic.%s' % provider, 'script.magnetic.%s-mc' % provider):
for setting in ('username', 'password'):
try:
value = xbmcaddon.Addon(addon_name).getSetting(setting)
set_setting('%s_%s' % (provider, setting), value)
if setting == 'username':
username = value
if setting == 'password':
password = value
except:
pass
if passkey:
logged_in = True
client.passkey = passkey
url_search = url_search.replace('PASSKEY', passkey)
elif 'login_object' in definition and definition['login_object']:
logged_in = False
login_object = definition['login_object'].replace('USERNAME', '"%s"' % username).replace('PASSWORD', '"%s"' % password)
# TODO generic flags in definitions for those...
if provider == 'hd-torrents':
client.open(definition['root_url'] + definition['login_path'])
if client.content:
csrf_token = re.search(r'name="csrfToken" value="(.*?)"', client.content)
if csrf_token:
login_object = login_object.replace('CSRF_TOKEN', '"%s"' % csrf_token.group(1))
else:
logged_in = True
if provider == 'lostfilm':
client.open(definition['root_url'] + '/v_search.php?c=111&s=1&e=1')
if client.content is not 'log in first':
logged_in = True
if 'token_auth' in definition:
# log.debug("[%s] logging in with: %s" % (provider, login_object))
if client.open(definition['root_url'] + definition['token_auth'], post_data=eval(login_object)):
try:
token_data = json.loads(client.content)
except:
log.error('%s: Failed to get token from %s' % (provider, definition['token_auth']))
return filtering.results
log.debug("Token response for %s: %s" % (provider, repr(token_data)))
if 'token' in token_data:
client.token = token_data['token']
log.debug("Auth token for %s: %s" % (provider, repr(client.token)))
else:
log.error('%s: Unable to get auth token for %s' % (provider, repr(url_search)))
return filtering.results
log.info('[%s] Token auth successful' % provider)
token_auth = True
else:
log.error("[%s] Token auth failed with response: %s" % (provider, repr(client.content)))
return filtering.results
elif not logged_in and client.login(definition['root_url'] + definition['login_path'],
eval(login_object), definition['login_failed']):
log.info('[%s] Login successful' % provider)
logged_in = True
elif not logged_in:
log.error("[%s] Login failed: %s", provider, client.status)
log.debug("[%s] Failed login content: %s", provider, repr(client.content))
return filtering.results
if logged_in:
if provider == 'hd-torrents':
client.open(definition['root_url'] + '/torrents.php')
csrf_token = re.search(r'name="csrfToken" value="(.*?)"', client.content)
url_search = url_search.replace("CSRF_TOKEN", csrf_token.group(1))
if provider == 'lostfilm':
log.info('[%s] Need open page before search', provider)
client.open(url_search.encode('utf-8'), post_data=payload, get_data=data)
search_info = re.search(r'PlayEpisode\((.*?)\)">', client.content)
if search_info:
series_details = re.search('\'(\d+)\',\'(\d+)\',\'(\d+)\'', search_info.group(1))
client.open(definition['root_url'] + '/v_search.php?c=%s&s=%s&e=%s' % (series_details.group(1), series_details.group(2), series_details.group(3)))
redirect_url = re.search(ur'url=(.*?)">', client.content)
if redirect_url is not None:
url_search = redirect_url.group(1)
else:
return filtering.results
log.info("> %s search URL: %s" % (definition['name'].rjust(longest), url_search))
client.open(url_search.encode('utf-8'), post_data=payload, get_data=data)
filtering.results.extend(
generate_payload(provider,
generator(provider, client),
filtering,
verify_name,
verify_size))
return filtering.results
| [
"lead@w.wilix.ru"
] | lead@w.wilix.ru |
8a3e322773fda54fc1bccdaf01f3d1691fc37bc6 | fc5c86802bf1e3a4cbe7d9bf933e644a60f4ef6c | /apps/quotes/views.py | a47de24ed55058d26ab8be5dae53343ad97f5ef8 | [] | no_license | ryandecoster/pythonexam | ec4cc62d27f4b113e8548cd18c7333edc4aeba6c | 333fe8048d92b1254a4080585b79a9e1baba0071 | refs/heads/master | 2020-03-21T15:54:28.533848 | 2018-06-26T18:31:52 | 2018-06-26T18:31:52 | 138,739,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,680 | py | from django.shortcuts import render, HttpResponse, redirect
from .models import User, Quote, UserManager, QuoteManager
from django.contrib import messages
from django.db.models import Count
import bcrypt
def index(request):
return render(request, 'quotes/index.html')
def login(request):
if User.objects.filter(email = request.POST['email']):
user = User.objects.get(email=request.POST['email'])
if bcrypt.checkpw(request.POST['password'].encode(), user.password.encode()):
request.session['user_id'] = user.id
request.session['first_name'] = user.first_name
request.session['last_name'] = user.last_name
messages.success(request, "Successfully logged in!")
return redirect('/quotes')
else:
messages.error(request, "Invalid email or password.")
return redirect ('/')
else:
messages.error(request, "Invalid email or password.")
return redirect('/')
def register(request):
errors = User.objects.validator(request.POST)
if len(errors):
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
hashed = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())
user = User.objects.create(first_name = request.POST['first_name'], last_name = request.POST['last_name'], email = request.POST['email'], password = hashed)
request.session['user_id'] = user.id
request.session['first_name'] = user.first_name
request.session['last_name'] = user.last_name
messages.success(request, "Successfully registered!")
return redirect('/quotes')
def quotes(request):
if 'user_id' not in request.session:
messages.error(request, "Must be logged in to view this page!")
return redirect('/')
context = {
'first_name': request.session['first_name'],
'last_name': request.session['last_name'],
'user': User.objects.get(id=request.session['user_id']),
'Quotes': Quote.objects.annotate(count_likes=Count('liked_users')),
'Users': User.objects.all()
}
return render(request, 'quotes/quotes.html', context)
def like(request):
if request.method == "POST":
liked_users = Quote.objects.process_like(request.POST)
return redirect('/quotes')
def delete(request):
this_quote = Quote.objects.get(id=request.POST['quote_id'])
this_quote.delete()
return redirect('/quotes')
def add(request):
errors = Quote.objects.quoteValidator(request.POST)
if len(errors):
for key, value in errors.items():
messages.error(request, value)
return redirect('/quotes')
else:
quote = request.POST['quote']
author = request.POST['author']
id = request.session['user_id']
user = User.objects.get(id=id)
Quote.objects.create(quote = quote, author = author, uploader = user)
return redirect('/quotes')
def logout(request):
request.session.clear()
messages.error(request, "Successfully logged out!")
return redirect('/')
def show(request, id):
if 'user_id' not in request.session:
messages.error(request, "Must be logged in to view this page!")
return redirect('/')
context = {
'id': id,
'first_name': User.objects.get(id=id).first_name,
'last_name': User.objects.get(id=id).last_name,
'Users': User.objects.all(),
'Quotes': Quote.objects.filter(uploader__id__contains=id)
}
return render(request, 'quotes/show.html', context)
def edit(request, id):
if 'user_id' not in request.session:
messages.error(request, "Must be logged in to view this page!")
return redirect('/')
context = {
'id': request.session['user_id'],
'first_name': User.objects.get(id=id).first_name,
'last_name': User.objects.get(id=id).last_name,
'email': User.objects.get(id=id).email,
}
return render(request, 'quotes/edit.html', context)
def update(request, id):
user = User.objects.get(id=id)
errors = User.objects.infoValidator(request.POST)
if len(errors):
for key, value in errors.items():
messages.error(request, value)
return redirect('/myaccount/'+str(user.id))
else:
user = User.objects.get(id=id)
user.first_name = request.POST['first_name']
user.last_name = request.POST['last_name']
user.email = request.POST['email']
user.save()
messages.error(request, "Updated successfully!")
return redirect('/myaccount/'+str(user.id))
| [
"ryandecoster26@yahoo.com"
] | ryandecoster26@yahoo.com |
0dad5e1d305a873fa56187c074313e2abafcd989 | a57a79bd2cb2397c6d879751e7041e9142390acc | /apps/tags/management/commands/migrate_tags.py | ba82af97368ac66dbcffd52844782f5c57617454 | [] | no_license | cephey/country | b41e85bfd5df20caec5d6f54b409ffe4f1b11ac3 | 774800e79417122876119246bb5b6e9b2e186891 | refs/heads/master | 2021-01-22T23:15:46.934125 | 2017-09-10T21:53:16 | 2017-09-10T21:53:16 | 85,618,298 | 0 | 0 | null | 2017-05-11T11:34:16 | 2017-03-20T19:36:45 | Python | UTF-8 | Python | false | false | 911 | py | import csv
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from apps.tags.models import Tag
class Command(BaseCommand):
help = 'Migrate tags from csv'
def add_arguments(self, parser):
parser.add_argument('--path', help='/path/to/file.csv')
def handle(self, *args, **kwargs):
self.stdout.write('Start...')
path = kwargs.get('path')
if not path:
raise CommandError('Path is required')
with open(path, 'r', encoding=settings.MIGRATE_FILE_ENCODING) as csvfile:
reader = csv.reader(csvfile)
tags = []
for row in reader:
tags.append(
Tag(
name=row[7], ext_id=row[0]
)
)
Tag.objects.bulk_create(tags, batch_size=100)
self.stdout.write('End...')
| [
"andrey.ptitsyn86@gmail.com"
] | andrey.ptitsyn86@gmail.com |
04f24b285f9fbb2a43edf32265cd0119c8b0abd0 | 6b46d1ed3eade452255016ea31452494b21ad879 | /model.py | 38a287831d76ba7725b54870051e2262ae1b60e5 | [
"MIT"
] | permissive | yang-jiawen/snphot | c994357bd25f00d041323ab1f1f437ca6a7adcb6 | b7d5d5e33bb2d4f1c8902f55043b1899d3eeca55 | refs/heads/main | 2023-08-26T16:17:27.398725 | 2021-11-05T02:36:18 | 2021-11-05T02:36:18 | 424,099,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,418 | py | from scipy.interpolate import interp1d
import numpy as np
import pycmpfit
# phase, mean, _, _, _, _, = np.loadtxt(
# '/Users/jiawenyang/Packages/LCfitter/LCfPCA_He/bandSpecific_B.txt',
# skiprows=1, unpack=True) # FIXME
def stretch_model_val(t, template_phase, template_mean, theta,
fit_range=[-10, 50]):
"""Calculate stretch model value given time t.
See https://arxiv.org/pdf/1107.2404.pdf
Args:
t (float or array_like): time to calculate model value.
template_phase (array_like): phases of template used to fit
the light curve.
template_mean (array_like): magnitudes of template used to
fit the light curve.
theta (array_like): array_like of length 4,
[max mag, max time, s1, s2].
fit_range (array_like, optional): array_like of length 2,
defines the range of phase needs to be fit. Defaults to [-10,50]
Returns:
float or array_like: magnitudes of model at give t.
"""
maxm, maxt, s1, s2 = theta
imod0 = interp1d(template_phase, template_mean+maxm,
fill_value='extrapolate')
return np.where(t < maxt,
np.where((t-maxt)/s1 <
fit_range[0], np.nan, imod0((t-maxt)/s1)),
np.where((t-maxt)/s2 > fit_range[1],
np.nan, imod0((t-maxt)/s2)))
# if t < maxt:
# if (t-maxt)/s1 < -10:
# return np.nan
# else:
# return imod0((t-maxt)/s1)
# else:
# if (t-maxt)/s2 > 50:
# return np.nan
# else:
# return imod0((t-maxt)/s2)
def stretch_userfunc(m, n, theta, private_data):
"""User function to be optimized.
Args:
m (int): Length of data.
n (int): Length of parameters (theta).
theta (array-like): Parameters.
private_data (dict): Data to be fit.
Returns:
dict: Deviation of model from data.
"""
tt, mm, emm = [private_data[ii] for ii in ['x', 'y', 'ey']]
template_days, template_magnitudes = [
private_data[ii] for ii in ['template_days', 'template_magnitudes']]
fit_range = private_data['fit_range']
devs = np.zeros((m), dtype=np.float64)
y_model = stretch_model_val(
tt, template_days, template_magnitudes, theta, fit_range=fit_range)
devs = np.where(np.isnan(y_model), 0, (mm-y_model)/emm)
user_dict = {"deviates": devs}
return user_dict
def stretch_fit(days, magnitudes, emagnitudes,
template_days, template_magnitudes,
theta=None, fit_range=[-10, 50], bounds=None, fixed=None):
"""Fit light curve use two streches.
Args:
days (array-like): days of light curve to be fit.
magnitudes (arra-like): magnitudes of light curve to be fit.
emagnitudes (array-like): errors of magnitudes.
template_days (array-like): days of template light curve.
template_magnitudes (array-like): magnitudes of template lighht curve,
theta (array-like, optional): Initial parameters value.
Defaults to None.
fit_range (array_like, optional): array_like of length 2,
defines the range of phase needs to be fit. Defaults to [-10,50]
bounds (2 tuple of array-like, optional): gives the lower an upper
boundary of parameters. Defaults to None.
fixed (array-like of boolean value): if True, fix that
parameter to inital value. Defaults to None.
"""
max_mag = min(magnitudes)
max_date = days[np.argmin(magnitudes)]
if theta is None:
theta = np.array([max_mag, max_date, 1, 1])
py_mp_par = list(pycmpfit.MpPar() for i in range(len(theta)))
if bounds is not None:
for ii in range(len(theta)):
py_mp_par[ii].limited = [1, 1]
py_mp_par[ii].limits = [bounds[0][ii], bounds[1][ii]]
if fixed is not None:
for ii in range(len(fixed)):
py_mp_par[ii].fixed = fixed[ii]
user_data = {'x': days, 'y': magnitudes, 'ey': emagnitudes,
'template_days': template_days,
'template_magnitudes': template_magnitudes,
'fit_range': fit_range}
fit = pycmpfit.Mpfit(stretch_userfunc, len(days), theta,
private_data=user_data, py_mp_par=py_mp_par)
fit.mpfit()
return theta, fit.result
| [
"37666860+jiaweny@users.noreply.github.com"
] | 37666860+jiaweny@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.