blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fbbeabbd46e4a86d74f7198a8f67b6784e9927b | bffc0b61e630f3710be2b1dff5d6685ffc91a2c6 | /src/ulazni_paketi/models.py | 743d93d7c276069d87bf0da341912ee72e29512c | [] | no_license | VjekoRezic/Django_evidencija_PiU | 1457728f126f1d6b81e554e96e440c72f072cd39 | c166b89c250edb68432197a7f9d7355a2d3beec3 | refs/heads/main | 2023-08-26T18:20:16.858185 | 2021-10-29T14:44:20 | 2021-10-29T14:44:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from django.db import models
class UlazniPaket(models.Model):
paket=models.CharField(max_length=50)
kvantiteta=models.IntegerField()
| [
"vjekogmf@gmail.com"
] | vjekogmf@gmail.com |
edfd6e489f61d204d3c8ed859133fe1cad841109 | 90d9dbd9b797463129b8c812e3fbcd07cc0034a0 | /6.shell.py | 9c0857f20c24442b2242930d1cf71d26858c0c99 | [] | no_license | iconigto/pp2021 | 7851f5fc966b72a3683a24b31a08924b497ea2d8 | f9e332464ba73bcb0cf24408e5858164fb62bf03 | refs/heads/master | 2023-04-21T07:26:01.940585 | 2021-05-10T06:17:03 | 2021-05-10T06:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | import subprocess
while True:
cmd = input('shell> ')
if cmd == 'exit':
break
print(subprocess.getoutput(cmd))
| [
"dqhieu.2016@gmail.com"
] | dqhieu.2016@gmail.com |
37d8bc549df4636bad8bb5f07474e436ca70a23a | dc839f40260b83de429f7e0aed3b9fbb5a37e8ac | /product/views.py | cfc4617a1bb44558e4f27453c4c37afdcd98db4c | [] | no_license | pantipanth/DjangoEcommerce | b779d030ea2e5dfa856c40ffbeedb62bc755f8c2 | 8037b0872bc22d8a892ebdfae89da55aee804305 | refs/heads/master | 2023-04-30T02:28:05.338992 | 2019-07-11T02:44:03 | 2019-07-11T02:44:03 | 196,309,575 | 0 | 0 | null | 2023-04-21T20:33:33 | 2019-07-11T02:57:46 | JavaScript | UTF-8 | Python | false | false | 336 | py | from django.shortcuts import render
from django.http import HttpResponse
from .models import Product
# Create your views here.
# def index(request):
# return HttpResponse("Product page here")
def index(request):
# product =
products = Product.objects.all()
return render(request, 'index.html', {'products': products})
| [
"Pantira.Pa@BDMS.CO.TH"
] | Pantira.Pa@BDMS.CO.TH |
536b01c341cc34f6df3f9ff4e98ec00bea9c79d7 | 49d6f5f50407b6adbb79e5a4124fbe99934a4e32 | /myapp/availability.py | 002c88b9dd59879febfea6670e3f8ac6dc273fb8 | [] | no_license | KritiDiyaliSunar/Hotelbooking | 8c6e2da6af6d71b7ea33a2e3e061774e65c3d899 | e00e65567b535648aac9678017d2a340160b380d | refs/heads/main | 2023-04-29T22:47:27.701169 | 2021-05-15T18:31:00 | 2021-05-15T18:31:00 | 355,771,545 | 0 | 2 | null | 2021-05-03T12:20:20 | 2021-04-08T05:13:34 | HTML | UTF-8 | Python | false | false | 394 | py | import datetime
from .models import Room, Booking
def check_availability(room, checkin, checkout):
avail_list = []
booking_list = Booking.objects.filter(room=room)
for booking in booking_list:
if booking.checkin > checkout or booking.checkout < checkin:
avail_list.append(True)
else:
avail_list.append(False)
return all(avail_list)
| [
"poohk2250@gmail.com"
] | poohk2250@gmail.com |
6a2d97297852933c5f4a971f9e2e750365d16931 | 3a88ccc2835e4a88c93d3401926b4ce991b61168 | /run_parallax.py | a18d2ebeef0fec16048d09cbfb46a9b439cd3bfa | [] | no_license | Shiner11/bd18f-Noori2 | adf779f441fac2e1b22e8df535bb7f5983121ea1 | df4221e406cc714c85cac54ac448276c6f8847f1 | refs/heads/master | 2020-04-10T11:45:02.645773 | 2018-12-09T08:37:04 | 2018-12-09T08:37:04 | 161,001,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,555 | py | """
Example command for running this script:
python run_parallax.py --max_steps=10
Example command for examining the checkpoint file:
python <PARALLAX_HOME>/tensorflow/tensorflow/python/tools/inspect_checkpoint.py --file_name=parallax_ckpt/model.ckpt-0 --tensor_name=conv1/kernel
"""
import os
import time
import tensorflow as tf
import parallax
from model import rnn
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('resource_info_file', os.path.abspath(os.path.join(os.path.dirname(__file__), '.', 'resource_info')),
'Filename containing cluster information')
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of iterations to run for each workers.""")
tf.app.flags.DEFINE_integer('log_frequency', 50,
"""How many steps between two runop logs.""")
tf.app.flags.DEFINE_integer('batch_size', 64,
"""Batch size""")
tf.app.flags.DEFINE_boolean('sync', True, '')
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# Build single-GPU rnn model
single_gpu_graph = tf.Graph()
with single_gpu_graph.as_default():
ops = rnn()
train_op = ops['train_op']
loss = ops['loss']
acc = ops['acc']
x = ops['images']
y = ops['labels']
is_training = ops['is_training']
parallax_config = parallax.Config()
ckpt_config = parallax.CheckPointConfig(ckpt_dir='parallax_ckpt',
save_ckpt_steps=1)
parallax_config.ckpt_config = ckpt_config
sess, num_workers, worker_id, num_replicas_per_worker = parallax.parallel_run(
single_gpu_graph,
FLAGS.resource_info_file,
sync=FLAGS.sync,
parallax_config=parallax_config)
start = time.time()
for i in range(FLAGS.max_steps):
batch = mnist.train.next_batch(FLAGS.batch_size, shuffle=False)
_, loss_ = sess.run([train_op, loss], feed_dict={x: [batch[0]],
y: [batch[1]],
is_training: [True]})
if i % FLAGS.log_frequency == 0:
end = time.time()
throughput = float(FLAGS.log_frequency) / float(end - start)
acc_ = sess.run(acc, feed_dict={x: [mnist.test.images],
y: [mnist.test.labels],
is_training: [False]})[0]
parallax.log.info("step: %d, test accuracy: %lf, throughput: %f steps/sec"
% (i, acc_, throughput))
start = time.time()
| [
"noreply@github.com"
] | Shiner11.noreply@github.com |
03651903aaf208efde97db1ad6a0aa745cc4d917 | 270dd0fdc2bd8f2842cfc525ee016ced787ce916 | /main.py | 98dc9675d1ec390da169a55aa4ba7a9287d5a897 | [] | no_license | ekanshpurwar/SpaceInvader-game | 29d002f2e202c024e7b2168448fc37bfc7d5f50c | c90859270b873858a9a7dc8b0ba895ed965ac622 | refs/heads/main | 2023-02-14T20:26:00.305719 | 2021-01-12T06:30:08 | 2021-01-12T06:30:08 | 328,892,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,618 | py | import pygame
import random
import math
from pygame import mixer
# Press control + alt+ l to format python code properly
'''Anything happening inside the game window is known as event'''
# Initialize game
pygame.init()
# Create the screen
screen = pygame.display.set_mode((800, 600))
# Loading image for background
background = pygame.image.load("background.png")
# Game title and icon on game window
pygame.display.set_caption("SpaceInvader")
icon = pygame.image.load("ufo.png")
pygame.display.set_icon(icon)
# Background music
mixer.music.load("background.wav")
mixer.music.play(-1)
# Player
playerImg = pygame.image.load("player.png")
playerX = 370
playerY = 480
playerX_change = 0
# Enemy
enemyImg = []
enemyX = []
enemyY = []
enemyX_change = []
enemyY_change = []
num_enemies = 6
for i in range(num_enemies):
enemyImg.append(pygame.image.load("enemy.png"))
enemyX.append(random.randint(0, 735))
enemyY.append(random.randint(50, 150))
enemyX_change.append(4)
enemyY_change.append(20)
# Bullet
bulletImg = pygame.image.load("bullet.png")
bulletX = 0
bulletY = 480
bulletX_change = 0
bulletY_change = 10
bullet_state = "ready"
# Score
''''''
score_value = 0
font = pygame.font.Font("freesansbold.ttf", 32)
textX = 10
textY = 10
# Game over
game_over_font = pygame.font.Font("freesansbold.ttf", 64)
game_overX = 250
game_overY = 250
# Functions
def game_over(x, y):
game_over_text = font.render("Game Over !", True, (255, 255, 255))
screen.blit(game_over_text, (x, y))
def show_score(x, y):
score = font.render("Score : " + str(score_value), True, (255, 255, 255))
screen.blit(score, (x, y))
def player(x, y):
screen.blit(playerImg, (x, y)) # blit method is used to draw an image to the screen
def enemy(x, y, i):
screen.blit(enemyImg[i], (x, y)) # blit method is used to draw an image to the screen
def fire_bullet(x, y):
global bullet_state
bullet_state = "fire"
screen.blit(bulletImg, (x + 16, y + 10)) # blit method is used to draw an image to the screen
def isCollision(enemyX, enemyY, bulletX, bulletY):
distance = math.sqrt((math.pow(enemyX - enemyY, 2)) + (math.pow(bulletX - bulletY, 2)))
if distance < 27:
return True
else:
return False
running = True
while running:
# Change background color
screen.fill((0, 255, 0))
# Change background image
screen.blit(background, (0, 0)) # Here(0,0) are coordinate from where background image to be appeared
for event in pygame.event.get(): # To loop through all the event happening inside the game
# Quit Event
if event.type == pygame.QUIT: # To check if event type is quit happend which is done by pressing the cross button on window
running = False
# Events
# To check is the left arrow or right arrow key event is happend
if event.type == pygame.KEYDOWN: # keydown means if key is presses
if event.key == pygame.K_LEFT:
# print("Left key is pressed")
playerX_change = -5
if event.key == pygame.K_RIGHT:
# print("Right key is pressed")
playerX_change = 5
if event.key == pygame.K_SPACE:
if bullet_state == "ready":
# play bullet sound
bullet_sound = mixer.Sound("laser.wav")
bullet_sound.play()
bulletX = playerX
fire_bullet(bulletX, bulletY)
if event.type == pygame.KEYUP: # keyup means if key is released
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
playerX_change = 0
# Player Movement
playerX += playerX_change
'''Check if player reaches to boundary then it should not cross the bounds'''
if playerX <= 0:
playerX = 0
elif playerX >= 736: # 800-64=736 because width of spaceship is 64px
playerX = 736
for i in range(num_enemies):
# Game over
if enemyY[i] > 440:
for j in range(num_enemies):
enemyY[j] = 2000 # if enemy is closer to spaceship then send enemy out of window in y-axis
game_over(game_overX, game_overY)
break
# Enemy Movement
enemyX[i] += enemyX_change[i]
if enemyX[i] <= 0:
enemyX_change[i] = 4
enemyY[i] += enemyY_change[i]
elif enemyX[i] >= 736: # 800-64=736 because width of spaceship is 64px
enemyX_change[i] = -4
enemyY[i] += enemyY_change[i]
# Collision
collision = isCollision(enemyX[i], enemyY[i], bulletX, bulletY)
if collision:
# Adding collision sound
collision_sound = mixer.Sound("explosion.wav")
collision_sound.play()
bulletY = 480
bullet_state = "ready"
score_value += 1
enemyX[i] = random.randint(0, 735)
enemyY[i] = random.randint(50, 150)
enemy(enemyX[i], enemyY[i], i)
# Bullet Movement
if bulletY <= 0:
bulletY = 480
bullet_state = "ready"
if bullet_state == "fire":
fire_bullet(bulletX, bulletY)
bulletY -= bulletY_change
# Function call
show_score(textX, textY)
player(playerX, playerY)
pygame.display.update() # so that the display always get updating whenever any event happens
| [
"noreply@github.com"
] | ekanshpurwar.noreply@github.com |
7cf276926f662a3e7edbcdfcd8677e9e17844f59 | c7933b505b9d23b9ed5326aeed52a1a80de12208 | /account/models.py | 43c6f84178014a019c81088668e750960f42e3e8 | [] | no_license | Bektur756/pyshop_cloud | 0a5c376cf52f052d59624c61c68b08e847481087 | 2275bb7b729df97eec52363a582fe635251f0d67 | refs/heads/master | 2023-07-11T04:52:50.908010 | 2021-08-25T13:50:49 | 2021-08-25T13:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.core.mail import send_mail
from django.db import models
# Create your models here.
from django.utils.crypto import get_random_string
class UserManager(BaseUserManager):
def _create(self, email, password, **extra_fields):
if not email:
raise ValueError('Email не может быть пустым')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_user(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', False)
return self._create(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_active', True)
return self._create(email, password, **extra_fields)
class User(AbstractBaseUser):
email = models.EmailField(primary_key=True)
name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50, blank=True)
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
activation_code = models.CharField(max_length=20, blank=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def __str__(self):
return self.email
def has_module_perms(self, app_label):
return self.is_staff
def has_perm(self, perm, obj=None):
return self.is_staff
def create_activation_code(self):
code = get_random_string(10)
self.activation_code = code
self.save()
def send_activation_email(self):
message = f'''
Благодарим Вас за регистрацию на нашем сайте
Ваш код активации: {self.activation_code}
'''
send_mail('Активация аккаунта',
message,
'test@gmail.com',
[self.email],
)
| [
"bektur756@gmail.com"
] | bektur756@gmail.com |
8728e365e7d7eb7024f6524d63406cd1993322f7 | 4b1d977acfde9354685157e02459c016c041421d | /tests/test_molecules.py | 88a58a7272d50cddca1b5caf2d88a8175dd2b880 | [] | no_license | fujiisoup/pyspectra | f6c50d929e992ab6064ef978a4de0c0647ff3d4b | 152bf37dee7e9eeabf42d24496566022d00d31ec | refs/heads/master | 2023-07-25T08:23:13.637233 | 2023-07-05T16:32:30 | 2023-07-05T16:32:30 | 246,492,492 | 3 | 0 | null | 2023-07-05T16:32:32 | 2020-03-11T06:28:08 | Python | UTF-8 | Python | false | false | 2,363 | py | import numpy as np
from pyspectra import molecules, units, refractive_index, data
def test_level():
# fulcher
constants = data.diatomic_molecules("H2").sel(state="X 1Σg 1sσ2")
for key in constants:
print(key, constants[key].item())
upper = molecules.level("H2", "d 3Πu 3pπ", 0, 1)
lower = molecules.level("H2", "a 3Σg+ 2sσ", 0, 1)
wavelength = refractive_index.vacuum_to_air(units.eV_to_nm(upper - lower))
print(wavelength, units.eV_to_nm(upper - lower))
assert np.allclose(601.8299, wavelength, atol=2e-3, rtol=0)
def test_OH_X2():
"""
randomly choose levels from Table 27
"""
qnums = []
levels = []
# v, J, parity, 3/2 or 1/2
# qnums.append([0, 0.5, +1, 1]) # F1e
# levels.append(0.0000)
qnums.append([0, 0.5, +1, 2]) # F2e
levels.append(88.1066)
# qnums.append([0, 0.5, -1, 1]) # F1f
# levels.append(0.0000)
qnums.append([0, 0.5, -1, 2]) # F2f
levels.append(88.2642)
qnums.append([0, 1.5, +1, 1]) # F1e
levels.append(-38.2480)
qnums.append([0, 1.5, +1, 2]) # F2e
levels.append(149.3063)
qnums.append([0, 1.5, -1, 1]) # F1f
levels.append(-38.1926)
qnums.append([0, 1.5, -1, 2]) # F2f
levels.append(149.5662)
qnums.append([0, 10.5, +1, 1]) # F1e
levels.append(1976.8000)
qnums.append([0, 10.5, +1, 2]) # F2e
levels.append(2414.9290)
qnums.append([0, 10.5, -1, 1]) # F1f
levels.append(1981.4015)
qnums.append([0, 10.5, -1, 2]) # F2f
levels.append(2412.0731)
v, J, parity, spin = np.array(qnums).T
energies = molecules.level_OH_X2(v, J, parity, spin)
# for lev, en in zip(levels, energies):
# print('{} : {}'.format(lev, en))
assert np.allclose(energies, levels, atol=0.1)
qnums = []
levels = []
qnums.append([4, 13.5, +1, 1]) # F1e
levels.append(16062.2776)
qnums.append([4, 13.5, +1, 2]) # F2e
levels.append(16522.0293)
qnums.append([4, 13.5, -1, 1]) # F1f
levels.append(16068.1260)
qnums.append([4, 13.5, -1, 2]) # F2f
levels.append(16517.9751)
v, J, parity, spin = np.array(qnums).T
energies = molecules.level_OH_X2(v, J, parity, spin)
# for lev, en in zip(levels, energies):
# print('{} : {}'.format(lev, en))
assert np.allclose(energies, levels, atol=0.1)
| [
"fujiisoup@gmail.com"
] | fujiisoup@gmail.com |
df20cf4dd924be42b06b437892eee15ece3e7b32 | 31de8a25f0390c20dbf6a6843c55d21db891e3b3 | /TripAdvisor/MongoDB_Helper.py | 2592af9dea6259775832a17f8400802a3be3fb13 | [] | no_license | stevenshichn/TripAdvisor_Scraping_Analysis | 9259a7b30d11bb9f07f3d6c4de15928e02424f5e | 65578042ea83c1af66b72b5d60b8f912d6a7d6d4 | refs/heads/master | 2021-01-20T10:50:17.341956 | 2017-11-05T18:25:24 | 2017-11-05T18:25:24 | 101,651,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,105 | py | import csv
import re
from datetime import datetime
import TripAdvisor.StringUtils as Utils
import TripAdvisor.Convert_Helper as ConHelper
import TripAdvisor.DateUtils as DateUtils
def Drop_DataBase(databaseName, client):
client.drop_database(databaseName)
def Clear_AllRecords_In_Table(databaseName, tableName, client):
db = client[databaseName]
collection = db[tableName]
collection.remove()
def Insert_Data_IntoMongoDB(databBaseName, tableName, client, valueDic, updateColumn = '', updateKey = ''):
db = client[databBaseName]
collection = db[tableName]
if updateColumn == '':
collection.insert_one(valueDic)
else:
queryFilter = {updateColumn : updateKey}
record = collection.find_one(queryFilter)
if record is not None:
collection.update_one(queryFilter, {'$set': valueDic})
else:
collection.insert_one(valueDic)
def Insert_CSV_Reviews_Into_MongoDB(fileName, client):
db = client[Utils.TripAdvisor_DB]
collection = db[Utils.Reviews_Table]
with open(fileName, 'r', encoding='utf-8') as f:
line = csv.reader(f)
firstRow = next(line)
for row in line:
col = len(firstRow)
dict = {}
for index in range(col):
if firstRow[index] == Utils.DicReviewDateKey:
dict[firstRow[index].replace(' ', '_')] = ConHelper.Convert_To_DateObject(row[index])
else:
dict[firstRow[index].replace(' ', '_')] = row[index]
try:
collection.insert_one(dict)
except:
print(row[0])
f.close()
def Insert_CSV_Users_Into_MongoDB(fileName, client):
db = client[Utils.TripAdvisor_DB]
userCollection = db[Utils.Users_Table]
with open(fileName, 'r', encoding='utf-8') as f:
line = csv.reader(f)
firstRow = next(line)
for row in line:
col = len(firstRow)
dict = {}
userName = ''
for index in range(col):
if firstRow[index] == Utils.DicUserNameKey:
userName = row[index]
dict[firstRow[index].replace(' ', '_')] = row[index]
try:
queryFilter = {Utils.DicUserNameKey.replace(' ', '_'): userName}
record = userCollection.find_one(queryFilter)
if record is None:
userCollection.insert_one(dict)
else:
print(userName)
userCollection.update_one(queryFilter, dict)
except:
print(row[0])
f.close()
# accept yyyy-mm-dd
def getDateRangeData(biggerDate, smallerDate, dbCollection):
biggerD = DateUtils.parserDate(biggerDate)
smallerD = DateUtils.parserDate(smallerDate)
return dbCollection.find({Utils.DicReviewDateKey : {"$lt" : biggerD, "$gt" : smallerD}})
def getDateRangeDataAcceptDateObject(big, small, col, functionName):
return col.find({'$and': [{Utils.DicReviewDateKey : {"$lte" : big, "$gte" : small}}, {Utils.DicTouristFunctionKey : functionName}]},{Utils.DicSentimentKey : 1,'_id' :0})
def getRegexObject(pattern):
return re.compile(pattern, re.I)
def getDataWithKeyword(dbCollection, column, keywordPattern):
return dbCollection.find({column : {"$regex" : getRegexObject(keywordPattern)}})
def aggregateSentiment(col, startDate, endDate, functionName, keyword = ''):
pipe = [{
'$match': {'$and': [{Utils.DicTouristFunctionKey : functionName},
{Utils.DicReviewDateKey : {'$gte': startDate, '$lte': endDate}},
{'$or':[{'$and' : [{Utils.DicCoomentKey : {"$regex" : getRegexObject(keyword)}}]},{Utils.DicTitleKey:{'$regex' : getRegexObject(keyword)}}]}]}
},
{'$group': {'_id': '$' + Utils.Sentiment_Col, 'total': {'$sum': 1}}}]
return col.aggregate(pipeline=pipe)
def getSortedDateRecord(col, functionName):
pipe = [{'$match': {Utils.DicTouristFunctionKey : functionName}},{'$sort': {Utils.DicReviewDateKey : -1}}]
return col.aggregate(pipeline = pipe)
def getLatesetDateRecordDateString(col, functionName):
df = getSortedDateRecord(col,functionName)
return str(df.next()[Utils.DicReviewDateKey]).split( )[0]
def getLatestDate(col, functionName):
d = getLatesetDateRecordDateString(col,functionName).split('-')
return datetime(int(d[0]),int(d[1]), int(d[2]))
def averageReviewRating_DateRange(col, startDate, endDate, functionName, keyword =''):
pipe = [{'$match': {'$and':
[ {Utils.DicReviewDateKey:
{'$gte': startDate,
'$lte': endDate
}},{'$or':[{'$and' : [{Utils.DicCoomentKey : {"$regex" : getRegexObject(keyword)}}]},{ Utils.DicTitleKey :{'$regex' : getRegexObject(keyword)}}]},
{Utils.DicTouristFunctionKey : functionName}
]}},
{'$group': {'_id':'$' + Utils.DicTouristFunctionKey, 'avgRating':{'$avg' : '$' + Utils.DicReviewRatingKey}}}]
return col.aggregate(pipeline = pipe) | [
"stevenshichn@gmail.com"
] | stevenshichn@gmail.com |
5602376417650cf495e65153cbdf4344f714dd86 | ede4e0a53b382ba08cbb2a905fc3ee5d9013c0c5 | /ZuriCapWeb/backend/database/inject_queries.py | d879921f3305e731696fc38951c0953b7307b189 | [] | no_license | riasatullah/ZuriCapWeb | dd0daf6b8f37b9af9a01ec2dac254cbbc98d0710 | 668f3a623caf1823d3b859127acb2481664f4cf9 | refs/heads/master | 2021-08-31T02:30:55.392525 | 2017-12-20T07:02:29 | 2017-12-20T07:02:29 | 114,853,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,370 | py | # By: Riasat Ullah
from ZuriCapWeb.backend.database.connector import DBConn
from ZuriCapWeb.utils import times, handyman
from ZuriCapWeb.variables import messages, params
import psycopg2
conn = DBConn()
def add_new_invoice(data):
try:
query = '''
insert into invoices
(reference_id, buyer_id, buyer_name, supplier_id, supplier_name,
financing_product_id, financing_product, submission_timestamp,
submission_date, invoice_date, item_description,
currency, invoice_total, submittedby, submission_overridden,
approval_date, approved_by, status)
values
('{0}', {1}, '{2}', {3}, '{4}',
{5}, '{6}', '{7}',
'{8}', '{9}', '{10}',
'{11}', {12}, '{13}', '{14}',
'{15}', '{16}', '{17}')
'''.format(data[params.invoice_ref_id],
data[params.buyer_id],
data[params.buyer_name],
data[params.supplier_id],
data[params.supplier_name],
data[params.financing_product_id],
data[params.financing_product],
data[params.submission_timestamp],
data[params.submitted_on],
data[params.invoice_date],
data[params.description],
data[params.currency],
data[params.invoice_total],
data[params.submitted_by],
data[params.submission_overridden],
data[params.approved_on],
data[params.approved_by],
data[params.invoice_status])
conn.execute(query)
return 0
except Exception as e:
raise Exception(e)
def close_invoice(data, cancelled=False):
cancel_query = '''
update invoices set cancelled = 't',
cancelled_timestamp = '{0}', cancelled_by = '{1}'
where invoice_id = {2};
'''.format(data[params.updated_timestamp],
data[params.updated_by],
data[params.invoice_id])
query = '''
begin;
insert into closed_invoices
(invoice_id, closing_date, status, currency,
financed, transaction_costs, principal_repaid, total_repayments,
discount_fees, unrealized_pnl, updated_by, updated_timestamp, notes)
values
({0), '{1}', '{2}', '{3}', {4}, {5}, {6},
{7}, {8}, {9}, '{10}', '{11}', '{12}');
update invoices set status = '{2}' where invoice_id = {0};
{13}
end;
'''.format(data[params.invoice_id],
data[params.completion_date],
data[params.invoice_status],
data[params.currency],
data[params.total_financed],
data[params.transaction_cost],
data[params.principal_repaid],
data[params.total_repayments],
data[params.discount_fees],
data[params.unrealized_pnl],
data[params.updated_by],
data[params.updated_timestamp],
data[params.notes],
cancel_query if cancelled else '')
try:
print(query)
except Exception as e:
raise Exception(e)
def add_client(data):
query = '''
insert into clients
(startdate, enddate, buyer, supplier, client_name,
address, city, zip, country, phone,
registration_no, tax_pin, description, industry, size)
'''.format(data[params.start_date],
data[params.end_date],
data[params.buyer],
data[params.supplier],
data[params.client_name],
data[params.address],
data[params.city],
data[params.client_zip],
data[params.country],
data[params.phone],
data[params.registration],
data[params.tax_pin],
data[params.description],
data[params.industry],
data[params.client_size])
try:
conn.execute(query)
except Exception as e:
raise Exception(e)
def add_user(data):
query = '''
insert into authorized_signatories
(username, pwd_salt, pwd_hash, id_type, id_no,
access_start, access_end, clientid, first_name, last_name,
birth_date, title, email, office_phone, cell_phone,
question_1, answer_1, question_2, answer_2, notes)
values
('{0}', '{1}', '{2}', '{3}', '{4}',
'{5}', '{6}', {7}, '{8}', '{9}',
'{10}', '{11}', '{12}', '{13}', '{14}',
'{15}', '{16}', '{17}', '{18}', '{19}')
'''.format(data[params.username],
data[params.salt],
data[params.hash_password],
data[params.id_type],
data[params.id],
data[params.start_date],
data[params.end_date],
data[params.client_id],
data[params.first_name],
data[params.last_name],
data[params.birth_date],
data[params.title],
data[params.email],
data[params.office_phone],
data[params.cell_phone],
data[params.question_1],
data[params.answer_1],
data[params.question_2],
data[params.answer_2],
data[params.notes])
try:
conn.execute(query)
except Exception as e:
raise Exception(e)
def update_password(username, salt, new_hash_pwd):
query = '''
update authorized_signatories
set pwd_salt = '{0}', pwd_hash = '{1}'
where username = '{2}'
'''.format(salt,
new_hash_pwd,
username)
try:
print(query)
except Exception as e:
raise Exception(e)
def add_forwarded_payment(payment_date, currency, amount, fees, payment_type, paid_to,
updated_by, notes, invoice_id, invoice_percentage):
query = '''
select make_payment('{0}', '{1}', {2}, {3},
'{4}', 0, {5}, '{6}',
'{7}', '{8}', {9}, {10})
'''.format(payment_date, currency, str(amount), str(fees),
payment_type, str(paid_to), updated_by,
times.current_timestamp(), notes,
str(invoice_id), str(invoice_percentage))
try:
conn.execute(query)
except Exception as e:
raise Exception(e)
def add_received_payment(payment_date, currency, amount, fees, payment_type,
paid_by, updated_by, notes, invoices, closures):
details_str = ''
details = []
for item in invoices:
details.append('{' + ','.join(str(element) for element in item) + '}')
if len(details) > 0:
details_str += '{' + ','.join(details) + '}'
query = '''
begin;
select accept_payment('{0}', '{1}', {2}, {3},
'{4}', {5}, 0, '{6}',
'{7}', '{8}', {9});
update invoices set status = 'CLOSED' where invoice_id in ({10});
end;
'''.format(payment_date, currency, str(amount), str(fees),
payment_type, str(paid_by), updated_by,
times.current_timestamp(), notes, details_str,
handyman.convert_int_to_string(closures))
try:
conn.execute(query)
except Exception as e:
raise Exception(e)
def add_new_client(data):
'''
Inserts a new client
:param data: a dictionary of data to add
'''
query = '''
begin;
with t1 as(
select max(clientid) + 1 as id from clients
)
, t2 as(
insert into clients
(select id, '{0}', '99990101', {1}, {2},
'{3}', '{4}', '{5}', '{6}', '{7}',
'{8}', '{9}', '{10}', '{11}', '{12}', '{13}' from t1)
)
, t3 as(
insert into client_limits
(select '{0}', '99990101', id, 1, 'discounting_max_giv', 'KES', 0 from t1)
), t4 as(
insert into client_limits
(select '{0}', '99990101', id, 2, 'discounting_max_single_iv', 'KES', 0 from t1)
), t5 as(
insert into client_limits
(select '{0}', '99990101', id, 3, 'discounting_max_invoice_count', 'KES', 0 from t1)
), t6 as(
insert into client_limits
(select '{0}', '99990101', id, 4, 'invoicing_max_giv', 'KES', 0 from t1)
), t7 as(
insert into client_limits
(select '{0}', '99990101', id, 5, 'invoicing_max_single_iv', 'KES', 0 from t1)
)
insert into client_limits
(select '{0}', '99990101', id, 6, 'invoicing_max_invoice_count', 'KES', 0 from t1);
end;
'''.format(data[params.start_date],
True if data[params.client_type] in ['BUYER', 'BOTH'] else False,
True if data[params.client_type] in ['SUPPLIER', 'BOTH'] else False,
data[params.client_name],
data[params.address],
data[params.city],
data[params.client_zip],
data[params.country],
data[params.office_phone],
data[params.registration],
data[params.tax_pin],
data[params.description],
data[params.industry],
data[params.client_size])
try:
conn.execute(query)
except psycopg2.DatabaseError as e:
raise psycopg2.DatabaseError(messages.error_db_query) from e
except Exception as e:
raise Exception(e)
def update_client_profile(data):
'''
Update's a client's details
:param data: dictionary of data to update with
'''
query = '''
update clients
set buyer = {0},
supplier = {1},
address = '{2}',
city = '{3}',
zip = '{4}',
country = '{5}'
phone = '{6}',
registration_no = '{7}',
tax_pin = '{8}',
description = '{9}',
industry = '{10}',
size = '{11}'
where clientid = {12}
'''.format(True if data[params.client_type] in ['BUYER', 'BOTH'] else False,
True if data[params.client_type] in ['SUPPLIER', 'BOTH'] else False,
data[params.address],
data[params.city],
data[params.client_zip],
data[params.country],
data[params.phone],
data[params.registration],
data[params.tax_pin],
data[params.description],
data[params.industry],
data[params.client_size],
data[params.client_id])
try:
print(query)
except psycopg2.DatabaseError as e:
raise psycopg2.DatabaseError(messages.error_db_query) from e
def add_relation(data):
'''
Adds a new relation
:param data: data for the relation
'''
query = '''
begin;
insert into relations values
('{0}', '99990101', {1}, '{2}',
{3}, '{4}', {5}, {6},
{7}, {8}, '{9}');
insert into relation_limits values
('{0}', '99990101', {1}, {3}, 7, 'relation_max_giv', 'KES', 0),
('{0}', '99990101', {1}, {3}, 8, 'relation_max_single_iv', 'KES', 0),
('{0}', '99990101', {1}, {3}, 9, 'relation_max_invoice_count', 'KES', 0);
end;
'''.format(data[params.start_date],
str(data[params.buyer_id]),
data[params.buyer_name],
str(data[params.supplier_id]),
data[params.supplier_name],
data[params.buyer_fraction],
data[params.supplier_fraction],
True if data[params.buyer_approval] == 0 else False,
True if data[params.supplier_approval] == 0 else False,
data[params.rm_name])
try:
print(query)
conn.execute(query)
except psycopg2.DatabaseError as e:
raise psycopg2.DatabaseError(messages.error_db_query) from e
def edit_relation(buyer_id, supplier_id, data, date=None):
'''
Updates a relation
:param buyer_id: buyer's client id
:param supplier_id: supplier's client id
:param data: data to update
'''
today = times.current_date()
if date is not None:
today = date
query = '''
begin;
update relations set enddate = '{0}'
where buyer_clientid = {1} and
supplier_clientid = {2} and
startdate <= '{0}' and
enddate > '{0}';
with t1 as(
select client_name as buyer_name from clients where clientid = {1}
)
, t2 as(
select client_name as supplier_name from clients where clientid = {2}
)
, t3 as(
select t1.buyer_name, t2.supplier_name from t1, t2
)
insert into relations
(select '{0}', '99990101', {1}, buyer_name, {2}, supplier_name,
{3}, {4}, {5}, {6}, '{7}' from t3);
end;
'''.format(today,
str(buyer_id),
str(supplier_id),
str(data[params.buyer_fraction]),
str(data[params.supplier_fraction]),
True if int(data[params.buyer_approval]) == 0 else False,
True if int(data[params.supplier_approval]) == 0 else False,
data[params.rm_name])
try:
conn.execute(query)
except psycopg2.DatabaseError as e:
raise psycopg2.DatabaseError(messages.error_db_query) from e
def save_client_limits(client_id, data, date=None, currency='KES'):
'''
Saves a client's limits
:param client_id: client id
:param data: data to update with
:param date: date to set the changes for
:param currency: currency of the limits
'''
today = times.current_date()
if date is not None:
today = date
query = '''
begin;
update client_limits set enddate = '{0}'
where startdate <= '{0}' and
enddate > '{0}' and
clientid = {1};
insert into client_limits values
('{0}', '99990101', {1}, 1, 'discounting_max_giv', '{2}', {3}),
('{0}', '99990101', {1}, 2, 'discounting_max_single_iv', '{2}', {4}),
('{0}', '99990101', {1}, 3, 'discounting_max_invoice_count', '{2}', {5}),
('{0}', '99990101', {1}, 4, 'invoicing_max_giv', '{2}', {6}),
('{0}', '99990101', {1}, 5, 'invoicing_max_single_iv', '{2}', {7}),
('{0}', '99990101', {1}, 6, 'invoicing_max_invoice_count', '{2}', {8});
end;
'''.format(today,
str(client_id),
currency,
data[params.discounting_max_giv],
data[params.discounting_max_single_iv],
data[params.discounting_max_invoice_count],
data[params.invoicing_max_giv],
data[params.invoicing_max_single_iv],
data[params.invoicing_max_invoice_count])
try:
conn.execute(query)
except psycopg2.DatabaseError as e:
raise psycopg2.DatabaseError(messages.error_db_query) from e
def save_relation_limits(buyer_id, supplier_id, data, date=None, currency='KES'):
'''
Saves relation limits in the database
:param buyer_id: buyer's client id
:param supplier_id: supplier's client id
:param data: data to update with
:param date: date to set the changes for
:param currency: currency of the limits
'''
today = times.current_date()
if date is not None:
today = date
query = '''
begin;
update relation_limits set enddate = '{0}'
where startdate <= '{0}' and
enddate > '{0}' and
buyer_clientid = {1} and
supplier_clientid = {2};
insert into relation_limits values
('{0}', '99990101', {1}, {2}, 7, 'relation_max_giv', '{3}', {4}),
('{0}', '99990101', {1}, {2}, 8, 'relation_max_single_iv', '{3}', {5}),
('{0}', '99990101', {1}, {2}, 9, 'relation_max_invoice_count', '{3}', {6});
end;
'''.format(today,
str(buyer_id),
str(supplier_id),
currency,
data[params.relation_max_giv],
data[params.relation_max_single_iv],
data[params.relation_max_invoice_count])
try:
conn.execute(query)
except psycopg2.DatabaseError as e:
raise psycopg2.DatabaseError from e
def close_connection():
conn.disconnect()
| [
"riasat.ullah@gmail.com"
] | riasat.ullah@gmail.com |
412a9d56f8c1abbd64aaec008f01f4127a964dd7 | 724028fd5ea6bdec7fad7558a8a7f890eacaaa0c | /appengine_config.py | b7fc022d2b2d42c2a0d989261e7410a820e87720 | [
"MIT"
] | permissive | Gorgoroth/steamplaytime | d6be3d65e8d019eeb81e755c798ee6ba8dc72539 | 8c0f18150050c9125c4fdbdbde8ac21de9a49805 | refs/heads/master | 2021-01-15T19:58:43.960786 | 2013-10-24T04:27:20 | 2013-10-24T04:27:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | def webapp_add_wsgi_middleware(app):
appstats_CALC_RPC_COSTS = True
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app | [
"nate@natecollings.com"
] | nate@natecollings.com |
2bdc663042e1e1aefc99f900694814b55def8c35 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-arms/aliyunsdkarms/request/v20190808/SearchEventsRequest.py | 3fae22f10c2789e7944a1a6c990d8133f134697a | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,864 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkarms.endpoint import endpoint_data
class SearchEventsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ARMS', '2019-08-08', 'SearchEvents','arms')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IsTrigger(self): # Integer
return self.get_query_params().get('IsTrigger')
def set_IsTrigger(self, IsTrigger): # Integer
self.add_query_param('IsTrigger', IsTrigger)
def get_AppType(self): # String
return self.get_query_params().get('AppType')
def set_AppType(self, AppType): # String
self.add_query_param('AppType', AppType)
def get_EndTime(self): # Long
return self.get_query_params().get('EndTime')
def set_EndTime(self, EndTime): # Long
self.add_query_param('EndTime', EndTime)
def get_Pid(self): # String
return self.get_query_params().get('Pid')
def set_Pid(self, Pid): # String
self.add_query_param('Pid', Pid)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_StartTime(self): # Long
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # Long
self.add_query_param('StartTime', StartTime)
def get_AlertType(self): # Integer
return self.get_query_params().get('AlertType')
def set_AlertType(self, AlertType): # Integer
self.add_query_param('AlertType', AlertType)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_AlertId(self): # Long
return self.get_query_params().get('AlertId')
def set_AlertId(self, AlertId): # Long
self.add_query_param('AlertId', AlertId)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
7d55518fab513f39689e34d6074566697309dd8a | 571d1ec6972b75f0d0515195e8062d9b4fc624bd | /_tests/annoy/imagehash_using_annoy.py | e3618b0bc5fcdd4a0a5738f7291429abccf39083 | [] | no_license | timpowellgit/allweveeverhad | 2b1a36ffc6e328caf5e340698a87d9e03b1959a6 | c1cf0e16a9e06a96a26ae37d1feebd3094635202 | refs/heads/master | 2022-12-01T20:16:32.655243 | 2019-02-24T21:50:49 | 2019-02-24T21:50:49 | 170,889,209 | 0 | 0 | null | 2022-11-22T02:22:56 | 2019-02-15T15:50:29 | Python | UTF-8 | Python | false | false | 1,775 | py | import random
import os
from PIL import Image
import imagehash
from tqdm import tqdm
import numpy as np
from annoy import AnnoyIndex
IMG_DIR = '/Users/greg/Desktop/ART-freeriots/adam-basanta-all-weve-ever-had-is-one-another/color_histogram_hashing/1000-artsy-images'
ANNOY_INDEX_PATH = '/Users/greg/Desktop/ART-freeriots/adam-basanta-all-weve-ever-had-is-one-another/repo/_annoy_tests/imghashes.ann'
NMB_ANNOY_TREES = 1
NMB_HASH_VECTORS = 64
img_filepaths = filter(lambda _: _.endswith('.jpg'), os.listdir(IMG_DIR))
img_needle_filepath = random.choice(img_filepaths)
# TODO start with one hash algo, then check for all 4
dist_to_needle = {}
needle_ahash = imagehash.average_hash(Image.open(os.path.join(IMG_DIR, img_needle_filepath)))
needle_ahash_hash = needle_ahash.hash.astype(int)
needle_ahash_hash = needle_ahash_hash.reshape(1, NMB_HASH_VECTORS)[0]
annoy_index = AnnoyIndex(NMB_HASH_VECTORS, metric='hamming')
for img_idx, img_filepath in enumerate(tqdm(img_filepaths)):
ahash = imagehash.average_hash(Image.open(os.path.join(IMG_DIR, img_filepath)))
dist_to_needle[img_filepath] = needle_ahash - ahash
hash_int_array = ahash.hash.astype(int).reshape(1, NMB_HASH_VECTORS)[0]
annoy_index.add_item(img_idx, hash_int_array)
annoy_index.build(NMB_ANNOY_TREES)
annoy_index.save(ANNOY_INDEX_PATH)
# retrieve/compare test
annoy_index = AnnoyIndex(NMB_HASH_VECTORS, metric='hamming')
annoy_index.load(ANNOY_INDEX_PATH)
results = annoy_index.get_nns_by_vector(needle_ahash_hash,
len(img_filepaths),
include_distances=True)
annoy_results = dict((img_filepaths[k], v) for k,v in zip(*results))
# print('annoy_results', annoy_results)
# print('dist_to_needle', dist_to_needle)
print('annoy_results == dist_to_needle', annoy_results == dist_to_needle)
| [
""
] | |
0f55c548cff809bf9f5ac8d241c37fad2a725162 | 8ccdd01f0c2e843ac7e3827d2b690c4235baa430 | /parseval/exceptions.py | 73f3070537b1e2c9df74c9bcffab5539b9c1ce3e | [
"MIT"
] | permissive | saumalya75/parseval | f309e0374109e42eeec8dac58524dfa0cb7f154d | d5e8c0c6cab2b8c236d1f728314eeb0a804401a7 | refs/heads/master | 2022-12-20T02:09:03.065042 | 2020-09-13T15:38:11 | 2020-09-13T15:38:11 | 282,387,832 | 3 | 0 | MIT | 2020-09-13T15:38:12 | 2020-07-25T06:44:01 | Python | UTF-8 | Python | false | false | 3,934 | py | class UnexpectedSystemException(Exception):
def __init__(self, msg="Unexpected error occurred."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<UnexpectedSystemException({self.msg})>"
class UnexpectedParsingException(Exception):
def __init__(self, msg="Unexpected error occurred while parsing the data."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<UnexpectedParsingException({self.msg})>"
class UnsupportedDatatypeException(Exception):
def __init__(self, msg="Unsupported datatype for column."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<UnsupportedDatatypeException({self.msg})>"
class SchemaBuildException(Exception):
def __init__(self, msg="Unexpected error occurred while building the schema. Please declare the schema properly."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<SchemaBuildException({self.msg})>"
class NullValueInNotNullFieldException(Exception):
def __init__(self, msg="NULL value detected in Not NULL field."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<NullValueInNotNullFieldException({self.msg})>"
class ValidValueCheckException(Exception):
def __init__(self, msg="Provided value is not part of valid value list."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<ValidValueCheckException({self.msg})>"
class MaximumValueConstraintException(Exception):
def __init__(self, msg="Provided value is higher than maximum allowed value for the column."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<MaximumValueConstraintException({self.msg})>"
class MinimumValueConstraintException(Exception):
def __init__(self, msg="Provided value is lower than maximum allowed value for the column."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<MinimumValueConstraintException({self.msg})>"
class RegexMatchException(Exception):
def __init__(self, msg="Provided value does not match with expected pattern."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<RegexMatchException({self.msg})>"
class StringParsingException(Exception):
def __init__(self, msg="Column value could not be casted to String."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<StringParsingException({self.msg})>"
class IntegerParsingException(Exception):
def __init__(self, msg="Column value could not be casted to Integer."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<IntegerParsingException({self.msg})>"
class FloatParsingException(Exception):
def __init__(self, msg="Column value could not be casted to Float."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<FloatParsingException({self.msg})>"
class BooleanParsingException(Exception):
def __init__(self, msg="Column value could not be casted to Boolean."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<BooleanParsingException({self.msg})>"
class DateTimeParsingException(Exception):
def __init__(self, msg="Column value is not aligned to the provided formats."):
self.msg = msg
def __str__(self):
return self.msg
def __repr__(self):
return f"<DateTimeParsingException({self.msg})>"
| [
"saumalya75@gmail.com"
] | saumalya75@gmail.com |
9b1a9ff5d3d3ad9d0086cc8d179cdb717f6b6bde | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/yadayada_acd_cli/acd_cli-master/acdcli/utils/conf.py | 9ebf249680620acffb9a172fb3c9591bb51f646c | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 584 | py | import configparser
import logging
import os
logger = logging.getLogger(__name__)
def get_conf(path, filename, default_conf: configparser.ConfigParser) \
-> configparser.ConfigParser:
conf = configparser.ConfigParser()
conf.read_dict(default_conf)
conffn = os.path.join(path, filename)
try:
with open(conffn) as cf:
conf.read_file(cf)
except OSError:
pass
logger.debug('configuration resulting from merging default and %s: %s' % (filename,
{section: dict(conf[section]) for section in conf}))
return conf
| [
"659338505@qq.com"
] | 659338505@qq.com |
426305176e2f3e0ce143e440802f2c1559b287fa | 015ff12eca7a8fdb464d0b52215b44813536a65b | /news/views.py | 838aaf2d46f6094750ed280bbff20d94f9fd098d | [] | no_license | bartekbrak/acnaid | 9886d151f8cf761f246889ab401b0150fe302ac2 | 7d23c2d70066214bb225dd34825c88138f94a918 | refs/heads/master | 2020-04-27T11:51:09.844089 | 2012-11-25T16:06:00 | 2012-11-25T16:06:00 | 174,311,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from models import *
def news(request, year=None, month=None, slug=None):
news = get_object_or_404(News, published=True, date__year=year, date__month=month, translations__slug=slug)
return render_to_response('news/news.html', {
'news': news,
}, context_instance=RequestContext(request))
def index(request):
news = News.objects.filter(published=True).order_by('-date')
news_paginator = Paginator(news, 10)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
news_page = news_paginator.page(page)
except (EmptyPage, InvalidPage):
news_page = news_paginator.page(news_paginator.num_pages)
return render_to_response('news/index.html', {
'news_paginator': news_paginator,
'news_page': news_page,
}, context_instance=RequestContext(request))
| [
"bartek.rychlicki@gmail.com"
] | bartek.rychlicki@gmail.com |
be001ed69adc73c109e0d3bf488cbdfc856cd582 | 5251d0d81debb24d0532ed78a68258f95af1e889 | /hw9.py | fbb250b3edb5b135fe84f4804281030220dada20 | [] | no_license | jhink7/IS602 | b87564eed342b9260c1fa12b91200052d9dc13a6 | 56fbaf439897f4cff8026ae3594eccdf1e012231 | refs/heads/master | 2021-01-23T08:56:44.158883 | 2015-12-17T04:26:51 | 2015-12-17T04:26:51 | 23,470,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | #########
## Solution for Homework 9
## CUNY IS602
#########
## Justin Hink
if __name__ == "__main__":
import pandas as pd
#Q1#
epa = pd.read_csv('epa-http.txt', header=None, sep="\t", error_bad_lines=False)
epa.columns = ['host','date','request','reply', 'bytes']
hostcounts = epa.groupby('host').count().sort('bytes').tail(1).date
# answer : sandy.rtptok1.epa.gov 294 requests
print "Q1"
print "Most requests:"
print str(hostcounts)
print ""
#Q2#
# create shallow copy as we're going to slice the data frame and we may need the original
# later
epa2 = epa.copy()
epa2 = epa2[epa2['bytes'] != '-']
epa2['bytes'] = epa2['bytes'].astype('int')
maxBytes = epa2.groupby('host').sum().sort('bytes').tail(1).bytes
#answer: piankhi.cs.hamptonu.edu, 7267751 bytes
print "Q2"
print "Max bytes received:"
print maxBytes
print ""
#Q3#
# format the date column into a unique day-hour string
epa.date = epa.date.astype('string')
epa.date = epa.date.str.replace('[','')
epa.date = epa.date.str.replace(']','')
epa.date = epa.date.str[:-6]
busiestHour = epa.groupby('date').count().sort('bytes').tail(1).bytes
#answer 30:14 4716
print "Q3"
print "Hour With Most Requests:"
print busiestHour
print""
#Q4#
# create shallow copy as we're going to slice the data frame and we may need the original
# later
epa3 = epa.copy()
epa3 = epa3[epa3.request.str.contains('.gif')]
maxGIF = epa3.groupby('request').count().sort('bytes').tail(1).bytes
#answer circle_logo_small.gif, 3189 GET requests
print "Q4"
print "Most Downloaded GIF:"
print maxGIF
print""
#Q5#
epa = epa[epa['reply'] != 200]
codes= epa.reply.unique()
print "Q5"
print"Response codes other than 200:"
print codes
print""
| [
"jhink7@gmail.com"
] | jhink7@gmail.com |
f7f94f78ba8f6b378e794ffd5537245e08ebf39b | bf4fe74cf9d8cf9e95a3ebed88fce82447d9011c | /learn-reg3.py | a6e0d27124c28ade78a4fd6667e2110041b13576 | [] | no_license | huzhengen/python | b6c73da55962ce402fb20c8d887ea68ea334df46 | 6bf928ed400fe7d7cd601cf7c9bda79a4dd3b4f9 | refs/heads/master | 2021-09-03T21:43:59.079745 | 2018-01-12T07:56:18 | 2018-01-12T07:56:18 | 114,317,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
content = 'extra things hello 123455 world_this is a Re Extra things'
result = re.search('hello.*?(\d+).*?Re', content)
# print(result)
# print(result.group(1))
html = '''<div id="songs-list">
<h2 class="title">经典老歌</h2>
<p class="introduction">
经典老歌列表
</p>
<ul id="list" class="list-group">
<li data-view="2">一路上有你</li>
<li data-view="7">
<a href="/2.mp3" singer="任贤齐">沧海一声笑</a>
</li>
<li data-view="4" class="active">
<a href="/3.mp3" singer="齐秦">往事随风</a>
</li>
<li data-view="6"><a href="/4.mp3" singer="beyond">光辉岁月</a></li>
<li data-view="5"><a href="/5.mp3" singer="陈慧琳">记事本</a></li>
<li data-view="5">
<a href="/6.mp3" singer="邓丽君">但愿人长久</a>
</li>
</ul>
</div>'''
# result2 = re.search('<li.*?active.*?singer="(.*?)">(.*?)</a>', html, re.S)
# print(result2)
# print(result2.group(1))
# print(result2.group(2))
result3 = re.findall('<li.*?href="(.*?)".*?singer="(.*?)">(.*?)</a>', html, re.S)
# print(result3)
# print(type(result3))
# for result in result3:
# print(result)
result4 = re.findall('<li.*?>\s*?(<a.*?>)?(\w+)(</a>)?\s*?</li>',html,re.S)
# print(result4)
# for result in result4:
# print(result[1])
content5 = """hello 12345 world_this
123 fan
"""
pattern5 = re.compile("hello.*fan",re.S)
result5 = re.match(pattern5 ,content5)
print(pattern5)
print(result5)
print(result5.group()) | [
"huzhengen@163.com"
] | huzhengen@163.com |
a9ece926d5ef08c0f4b132f641d23b78a79d3b03 | b6d46fac859a8ae9b80cc935dddb1d1648e813e4 | /initial_train.py | c86445dca39b593f7fae384dfc95ffa47c043aa3 | [] | no_license | amitavaaditya/online-learning-microservices | cc401d470907d22b8bb20184311502723429b346 | e4a7d10e9d1777afb58c683b1cad3e5643311a75 | refs/heads/master | 2022-12-11T07:48:30.854089 | 2018-10-29T20:19:51 | 2018-10-29T20:19:51 | 155,123,300 | 0 | 0 | null | 2022-11-21T21:18:20 | 2018-10-28T22:48:09 | Jupyter Notebook | UTF-8 | Python | false | false | 5,361 | py | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import tensorflow as tf
# Constants needed
LABEL_COL = 'classLabel'
MODEL_DIR = 'model'
def read_data(filename):
"""
Method to read the dataset as a pandas DataFrame
:param filename: filename of the dataset
:return: DataFrame of the dataset
"""
return pd.read_csv(filename, sep=';', decimal=',', na_values=['NA'])
def preprocess_data(df):
"""
Method responsible to perform basic preprocessing required prior to
training/evaluation/prediction
:param df: unprocessed DataFrame
:return: DataFrame after preprocessing
"""
df['v19'] = df['v19'].apply(str)
median_values = {
'v2': 28.67,
'v3': 0.000425,
'v8': 1.75,
'v11': 2.0,
'v14': 120.0,
'v15': 113.0,
'v17': 1200000.0
}
iqr_values = {
'v2': 17.83,
'v3': 0.0008125000000000001,
'v8': 4.5,
'v11': 6.0,
'v14': 280.0,
'v15': 1059.75,
'v17': 2800000.0
}
categories_dict = {'v1': ['a', 'missing', 'b'],
'v4': ['y', 'u', 'missing', 'l'],
'v5': ['p', 'g', 'gg', 'missing'],
'v6': ['k', 'x', 'ff', 'cc', 'r', 'j', 'm', 'W', 'aa',
'missing', 'q', 'd', 'e', 'i', 'c'],
'v7': ['ff', 'j', 'h', 'n', 'missing', 'dd', 'v', 'bb',
'z', 'o'], 'v9': ['f', 'missing', 't'],
'v10': ['f', 'missing', 't'],
'v12': ['f', 'missing', 't'],
'v13': ['p', 'g', 'missing', 's'],
'v18': ['missing', 'f', 't'],
'v19': ['missing', '0', '1'],
'classLabel': ['no.', 'missing', 'yes.']}
df = df.fillna(median_values)
for column in median_values:
df[column] = (df[column] - median_values[column]) / iqr_values[column]
for column in categories_dict:
df[column].fillna('missing', inplace=True)
df[column] = pd.Categorical(df[column], ordered=False,
categories=categories_dict[column])
df['classLabel'] = df['classLabel'].map({
'no.': 0,
'yes.': 1
})
return df
def split_data(df):
"""
Method to split the DataFrame into training and validation sets
:param df: preprocessed DataFrame
:return: tuple of training and validation sets
"""
return train_test_split(df, random_state=0, stratify=df[LABEL_COL])
def input_fn(df):
"""
Input function generator required by Tensorflow Estimator API
:param df: DataFrame to be used for training/evaluation/prediction
:return: Input function for Tensorflow Estimator API
"""
return tf.data.Dataset.from_tensor_slices((dict(df), df[LABEL_COL])) \
.batch(32)
def get_feature_columns(df):
"""
Method to generate the list of Feature Columns required by Tensorflow
Estimator API
:param df: Dataset to fetch columns from
:return: List of Tensorflow Feature Columns
"""
numeric_columns = [tf.feature_column.numeric_column(column) for column
in ('v2', 'v3', 'v8', 'v11', 'v14', 'v15', 'v17')]
categorical_columns = [
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
column, vocabulary_list=df[column].cat.categories))
for column in ('v1', 'v4', 'v5', 'v6', 'v7', 'v9', 'v10', 'v12',
'v13', 'v18', 'v19')
]
return numeric_columns + categorical_columns
def build_model(feature_columns):
"""
Method to build the classifier model
:param feature_columns: List of Tensorflow Feature Columns
:return: Tensorflow Estimator model
"""
return tf.estimator.DNNClassifier(
hidden_units=[4, 4],
feature_columns=feature_columns,
model_dir=MODEL_DIR,
n_classes=2,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001))
def train(model, train_input_fn, epochs):
"""
Method to invoke the training process
:param model: Tensorflow Estimator model
:param train_input_fn: Input function for Tensorflow Estimator API
:param epochs: number of passes over the data
:return: None
"""
for i in range(epochs):
print('Epoch {}'.format(i + 1))
model.train(train_input_fn)
def evaluate(model, val_input_fn, val_df):
"""
Method to invoke the training process
:param model: Tensorflow Estimator model
:param val_input_fn: Input function for Tensorflow Estimator API
:param val_df: DataFrame for validation
:return: None
"""
y_pred = [pred['class_ids'][0] for pred in list(model.predict(
val_input_fn))]
y_true = val_df[LABEL_COL]
print(classification_report(y_true, y_pred))
if __name__ == '__main__':
train_df, val_df = split_data(preprocess_data(read_data('train.csv')))
train_input_fn = lambda: input_fn(train_df)
val_input_fn = lambda: input_fn(val_df)
feature_columns = get_feature_columns(train_df)
model = build_model(feature_columns)
train(model, train_input_fn, epochs=10)
evaluate(model, val_input_fn, val_df)
| [
"amitavaaditya2009@gmail.com"
] | amitavaaditya2009@gmail.com |
9961f6135479d5127d4412c48dae71f8cdaf81cc | e45bac42c3375dd84bf6a56f628eaa07f244cd9f | /mysite/settings.py | a24264ef5786bf6debd693ff7ae62ece810db368 | [] | no_license | Justiffy/drf_starts | 1c8459f6382b26e91a0aefeb44369ad3194894a0 | 78402460e520c1f152da9fe5cf68bfd2127a481f | refs/heads/master | 2023-04-02T06:38:50.277104 | 2021-04-12T21:51:49 | 2021-04-12T21:51:49 | 354,711,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,447 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.19.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f#%1e5=11!z7i&s(p&n!4=kvq1ev-eve!_m2nz5!=i%fg7fpm6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '***',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
| [
"shnap2010@yandex.ru"
] | shnap2010@yandex.ru |
24c1f145fb8771680cd3bc3dafa1f4db36c625b3 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/dbgac/rsfromepg.py | 857baf4940ff69594982fe829e028d01e3e1d557 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,594 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsFromEpg(Mo):
"""
A source relation to the set of requirements for an application-level endpoint group instance.
"""
meta = SourceRelationMeta("cobra.model.dbgac.RsFromEpg", "cobra.model.fv.AEPg")
meta.cardinality = SourceRelationMeta.N_TO_M
meta.moClassName = "dbgacRsFromEpg"
meta.rnFormat = "rsfromEpg-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_TO_GLOBAL
meta.label = "Source EPG Relation for Atomic Counter Policy"
meta.writeAccessMask = 0xc001
meta.readAccessMask = 0xc001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.parentClasses.add("cobra.model.dbgac.EpgToEp")
meta.parentClasses.add("cobra.model.dbgac.EpgToIp")
meta.parentClasses.add("cobra.model.dbgac.EpgToEpg")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsfromEpg-', True),
]
prop = PropMeta("str", "annotation", "annotation", 37761, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 39900, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14593, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 12875, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1981
prop.defaultValueStr = "fvAEPg"
prop._addConstant("fvAEPg", None, 1981)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 12874, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpgPolToLocale", "Fabric Nodes", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
886f758e5f28b2fde3f832bf141793466ac770e5 | 712234f68bdc532756e2f577b03444249f1ce52e | /Classification Based Machine Learning for Algorithmic Trading/default_predictions/GPC.py | f311c5e073c6dd1f32740fb09696efb7f1abe5a7 | [
"MIT"
] | permissive | gohjunyi/Machine-Learning-For-Finance | 55f02e0d55fb4d78a8c89902bdfad5da963ff4f4 | e73c3df3c68d71f671cdde3153988c6c617eba53 | refs/heads/master | 2021-09-26T03:13:01.118588 | 2018-10-27T05:48:23 | 2018-10-27T05:48:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 22:02:07 2017
@author: Anthony
"""
import numpy as np
import pandas as pd
df = pd.read_csv("dataset_2.csv")
df['default'].describe()
sum(df['default'] == 0)
sum(df['default'] == 1)
X = df.iloc[:, 1:6].values
y = df['default'].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
shuffle_index = np.random.permutation(len(X_train))
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn import gaussian_process
clf = gaussian_process.GaussianProcessClassifier(random_state=0)
clf.fit(X_train, y_train)
# Cross Validation
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
cross_val_score(clf, X_train, y_train, cv=3, scoring='accuracy')
y_train_pred = cross_val_predict(clf, X_train, y_train, cv=3)
cm = confusion_matrix(y_train, y_train_pred)
print(cm)
from sklearn.metrics import precision_score, recall_score
print("precision score = {0:.4f}".format(precision_score(y_train, y_train_pred)))
print("recall score = {0:.4f}".format(recall_score(y_train, y_train_pred)))
| [
"gjunyi90@gmail.com"
] | gjunyi90@gmail.com |
4b52b6b9730607564cb4bb97e081a34ed237d59b | 871dddb5c8059d96b767a323b0f87d3fbb62e786 | /test/unit/vint/ast/plugin/scope_plugin/test_scope_detector.py | 9b57c816b88abb523c7f5ccfecb0d56e9ce5d76b | [
"MIT"
] | permissive | msabramo/vint | 6ef12ed61d54d0d2b2a9d1da1ce90c0e2c734ab2 | f13569f2a62ff13ff8ad913e7d6fb2c57953af20 | refs/heads/master | 2023-08-24T01:20:14.699485 | 2014-12-31T18:28:59 | 2014-12-31T18:28:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,134 | py | import pytest
from vint.ast.node_type import NodeType
from vint.ast.plugin.scope_plugin.scope_detector import ScopeDetector, ScopeVisibility as Vis
from vint.ast.plugin.scope_plugin.identifier_classifier import (
IDENTIFIER_ATTRIBUTE,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG,
IDENTIFIER_ATTRIBUTE_DEFINITION_FLAG,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG,
)
def create_scope(visibility):
return {
'scope_visibility': visibility,
}
def create_scope_visibility_hint(visibility, is_implicit=False):
return {
'scope_visibility': visibility,
'is_implicit': is_implicit,
}
def create_id(id_value, is_declarative=True, is_function=False, is_autoload=False):
return {
'type': NodeType.IDENTIFIER.value,
'value': id_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DEFINITION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: is_function,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: is_autoload,
},
}
def create_env(env_value):
return {
'type': NodeType.ENV.value,
'value': env_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DEFINITION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
},
}
def create_option(opt_value):
return {
'type': NodeType.OPTION.value,
'value': opt_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DEFINITION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
},
}
def create_reg(reg_value):
return {
'type': NodeType.REG.value,
'value': reg_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DEFINITION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
},
}
def create_curlyname(is_declarative=True):
""" Create a node as a `my_{'var'}`
"""
return {
'type': NodeType.CURLYNAME.value,
'value': [
{
'type': NodeType.CURLYNAMEPART.value,
'value': 'my_',
},
{
'type': NodeType.CURLYNAMEEXPR.value,
'value': {
'type': NodeType.CURLYNAMEEXPR.value,
'value': 'var',
},
}
],
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DEFINITION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: True,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
},
}
def create_subscript_member(is_declarative=True):
return {
'type': NodeType.IDENTIFIER.value,
'value': 'member',
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DEFINITION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: True,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
},
}
@pytest.mark.parametrize(
'context_scope_visibility, id_node, expected_scope_visibility, expected_implicity', [
# Declarative variable test
(Vis.SCRIPT_LOCAL, create_id('g:explicit_global'), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('implicit_global'), Vis.GLOBAL_LIKE, True),
(Vis.FUNCTION_LOCAL, create_id('g:explicit_global'), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('b:buffer_local'), Vis.GLOBAL_LIKE, False),
(Vis.FUNCTION_LOCAL, create_id('b:buffer_local'), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('w:window_local'), Vis.GLOBAL_LIKE, False),
(Vis.FUNCTION_LOCAL, create_id('w:window_local'), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('s:script_local'), Vis.SCRIPT_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('s:script_local'), Vis.SCRIPT_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('l:explicit_function_local'), Vis.FUNCTION_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('implicit_function_local'), Vis.FUNCTION_LOCAL, True),
(Vis.FUNCTION_LOCAL, create_id('a:param'), Vis.FUNCTION_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('a:000'), Vis.FUNCTION_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('a:1'), Vis.FUNCTION_LOCAL, False),
(Vis.SCRIPT_LOCAL, create_id('v:count'), Vis.BUILTIN, False),
(Vis.FUNCTION_LOCAL, create_id('v:count'), Vis.BUILTIN, False),
(Vis.FUNCTION_LOCAL, create_id('count'), Vis.BUILTIN, True),
(Vis.SCRIPT_LOCAL, create_curlyname(), Vis.UNANALYZABLE, False),
(Vis.FUNCTION_LOCAL, create_curlyname(), Vis.UNANALYZABLE, False),
(Vis.SCRIPT_LOCAL, create_subscript_member(), Vis.UNANALYZABLE, False),
(Vis.FUNCTION_LOCAL, create_subscript_member(), Vis.UNANALYZABLE, False),
# Referencing variable test
(Vis.SCRIPT_LOCAL, create_id('g:explicit_global', is_declarative=False), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('implicit_global', is_declarative=False), Vis.GLOBAL_LIKE, True),
(Vis.FUNCTION_LOCAL, create_id('g:explicit_global', is_declarative=False), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('b:buffer_local', is_declarative=False), Vis.GLOBAL_LIKE, False),
(Vis.FUNCTION_LOCAL, create_id('b:buffer_local', is_declarative=False), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('w:window_local', is_declarative=False), Vis.GLOBAL_LIKE, False),
(Vis.FUNCTION_LOCAL, create_id('w:window_local', is_declarative=False), Vis.GLOBAL_LIKE, False),
(Vis.SCRIPT_LOCAL, create_id('s:script_local', is_declarative=False), Vis.SCRIPT_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('s:script_local', is_declarative=False), Vis.SCRIPT_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('l:explicit_function_local', is_declarative=False), Vis.FUNCTION_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('implicit_function_local', is_declarative=False), Vis.FUNCTION_LOCAL, True),
(Vis.FUNCTION_LOCAL, create_id('a:param', is_declarative=False), Vis.FUNCTION_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('a:000', is_declarative=False), Vis.FUNCTION_LOCAL, False),
(Vis.FUNCTION_LOCAL, create_id('a:1', is_declarative=False), Vis.FUNCTION_LOCAL, False),
(Vis.SCRIPT_LOCAL, create_id('v:count', is_declarative=False), Vis.BUILTIN, False),
(Vis.FUNCTION_LOCAL, create_id('v:count', is_declarative=False), Vis.BUILTIN, False),
(Vis.FUNCTION_LOCAL, create_id('count', is_declarative=False), Vis.BUILTIN, True),
(Vis.SCRIPT_LOCAL, create_curlyname(is_declarative=False), Vis.UNANALYZABLE, False),
(Vis.FUNCTION_LOCAL, create_curlyname(is_declarative=False), Vis.UNANALYZABLE, False),
(Vis.SCRIPT_LOCAL, create_subscript_member(is_declarative=False), Vis.UNANALYZABLE, False),
(Vis.FUNCTION_LOCAL, create_subscript_member(is_declarative=False), Vis.UNANALYZABLE, False),
(Vis.FUNCTION_LOCAL, create_id('file#func', is_autoload=True, is_function=True, is_declarative=False), Vis.GLOBAL_LIKE, True),
]
)
def test_detect_scope_visibility(context_scope_visibility, id_node, expected_scope_visibility, expected_implicity):
scope = create_scope(context_scope_visibility)
scope_visibility_hint = ScopeDetector.detect_scope_visibility(id_node, scope)
expected_scope_visibility_hint = create_scope_visibility_hint(expected_scope_visibility,
is_implicit=expected_implicity)
assert expected_scope_visibility_hint == scope_visibility_hint
@pytest.mark.parametrize(
'context_scope_visibility, node, expected_variable_name', [
(Vis.SCRIPT_LOCAL, create_id('g:explicit_global'), 'g:explicit_global'),
(Vis.SCRIPT_LOCAL, create_id('implicit_global'), 'g:implicit_global'),
(Vis.SCRIPT_LOCAL, create_id('implicit_global', is_declarative=False), 'g:implicit_global'),
(Vis.FUNCTION_LOCAL, create_id('l:explicit_function_local'), 'l:explicit_function_local'),
(Vis.FUNCTION_LOCAL, create_id('implicit_function_local'), 'l:implicit_function_local'),
(Vis.FUNCTION_LOCAL, create_id('implicit_function_local', is_declarative=False), 'l:implicit_function_local'),
(Vis.SCRIPT_LOCAL, create_id('v:count'), 'v:count'),
(Vis.FUNCTION_LOCAL, create_id('v:count'), 'v:count'),
(Vis.FUNCTION_LOCAL, create_id('count'), 'v:count'),
(Vis.SCRIPT_LOCAL, create_env('$ENV'), '$ENV'),
(Vis.SCRIPT_LOCAL, create_option('&OPT'), '&OPT'),
(Vis.SCRIPT_LOCAL, create_reg('@"'), '@"'),
]
)
def test_normalize_variable_name(context_scope_visibility, node, expected_variable_name):
scope = create_scope(context_scope_visibility)
normalize_variable_name = ScopeDetector.normalize_variable_name(node, scope)
assert expected_variable_name == normalize_variable_name
@pytest.mark.parametrize(
'id_value, is_function, expected_result', [
('my_var', False, False),
('count', False, True),
('v:count', False, True),
('MyFunc', True, False),
('localtime', True, True),
]
)
def test_is_builtin_variable(id_value, is_function, expected_result):
id_node = create_id(id_value, is_function=is_function)
result = ScopeDetector.is_builtin_variable(id_node)
assert expected_result == result
@pytest.mark.parametrize(
'id_value, context_scope_visibility, expected_result', [
('g:my_var', Vis.SCRIPT_LOCAL, True),
('g:my_var', Vis.FUNCTION_LOCAL, True),
('my_var', Vis.SCRIPT_LOCAL, True),
('my_var', Vis.FUNCTION_LOCAL, False),
('s:my_var', Vis.SCRIPT_LOCAL, False),
('s:my_var', Vis.FUNCTION_LOCAL, False),
('count', Vis.SCRIPT_LOCAL, True),
('v:count', Vis.SCRIPT_LOCAL, True),
('count', Vis.FUNCTION_LOCAL, True),
('v:count', Vis.FUNCTION_LOCAL, True),
]
)
def test_is_global_variable(id_value, context_scope_visibility, expected_result):
id_node = create_id(id_value)
context_scope = create_scope(context_scope_visibility)
result = ScopeDetector.is_global_variable(id_node, context_scope)
assert expected_result == result
| [
"yuki.kokubun@mixi.co.jp"
] | yuki.kokubun@mixi.co.jp |
6a667c0a754635c782145b12de82b60bafd92189 | b03db989aa48826f146beff0762553aea752137d | /map_sea.py | 0a7a94460c802d02e44e3adebc52433757d49a3d | [
"BSD-2-Clause"
] | permissive | dwiddows/pilmaps | 63f5a8724b3dde28d3386bef4a12c279acbf86b8 | 4ad85571b92c0155d5d871907f6a1fc4116657b7 | refs/heads/main | 2023-08-03T09:05:20.186298 | 2021-09-13T01:02:10 | 2021-09-13T01:02:10 | 363,476,138 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,729 | py | """Script that draws an image of Southeast Asia and some surrounding parts.
Much of this could be factored out into more general utility functions and interfaces.
"""
import logging
import math
import numpy as np
from PIL import Image, ImageDraw, ImageFont, ImageOps
import shapefile
BORDER = 20
PIXELS_PER_DEGREE = 40
FONT_TYPE = "Times.ttc"
ALL_COUNTRY_RECORDS = shapefile.Reader("./data/ne_50m_admin_0_countries.shp")
""" These names match with those in ALL_COUNTRY_RECORDS.
Change to a different set of countries and near countries to get a different map."""
SEA_COUNTRIES = ['Indonesia', 'Philippines', 'Vietnam', 'Thailand', 'Myanmar', 'Malaysia', 'Singapore', 'Cambodia',
'Laos', 'Brunei', 'East Timor']
CORE_COUNTRY_SHAPES = [record for record in ALL_COUNTRY_RECORDS.shapeRecords()
if record.record['NAME_EN'] in SEA_COUNTRIES]
NAME_OFFSETS = {
"Myanmar": (-10, -40),
"Thailand": (0, -50),
"Laos": (-30, -40),
"Vietnam": (115, 30),
"Singapore": (25, 10),
"Brunei": (-22, -12),
"East Timor": (40, 16),
"Cambodia": (0, -8),
"Indonesia": (-60, 50),
"Philippines": (125, -10),
"Malaysia": (-40, 20)
}
NEAR_COUNTRIES = ["People's Republic of China", 'India', 'Bangladesh',
'Taiwan', 'Australia', 'Papua New Guinea', 'Bhutan']
NEAR_COUNTRY_SHAPES = [record for record in ALL_COUNTRY_RECORDS.shapeRecords()
if record.record['NAME_EN'] in NEAR_COUNTRIES]
class FlatFrame:
"""Like map_frame.MapFrame but just uses lat lon as cartesian coordinates. Only use near the equator."""
def __init__(self, polygons):
all_points = [point for polygon in polygons for point in polygon]
self.maxlon = max([x[0] for x in all_points])
self.minlon = min([x[0] for x in all_points])
self.maxlat = max([x[1] for x in all_points])
self.minlat = min([x[1] for x in all_points])
self.size = [BORDER * 2 + int(PIXELS_PER_DEGREE * (self.maxlon - self.minlon)),
BORDER * 2 + int(PIXELS_PER_DEGREE * (self.maxlat - self.minlat))]
self.img = Image.new("RGB", self.size, "#f9f9f9")
self.img_draw = ImageDraw.Draw(self.img)
logging.debug(f"Made flat frame with \nminlat {self.minlat}\nmaxlat {self.maxlat}"
f"\nminlon {self.minlon}\nmaxlon {self.maxlon}\nsize {self.size}")
def intersects(self, polygon):
return any([self.minlon < point[0] < self.maxlon and self.minlat < point[1] < self.maxlat for point in polygon])
def get_name(record):
try:
return record.record.NAME
except:
return record.record.name
def point_to_coords(lon_lat_point, frame: FlatFrame):
lon = lon_lat_point[0]
if lon < -180:
lon += 360
return (int(BORDER + PIXELS_PER_DEGREE * (lon - frame.minlon)),
int(BORDER + PIXELS_PER_DEGREE * (frame.maxlat - lon_lat_point[1])))
def record_to_coords(record, frame: FlatFrame):
points = record.shape.points
parts = record.shape.parts
all_coords = []
if not frame.intersects(points):
return []
else:
logging.debug(f"Including intersecting shape {get_name(record)}")
for i in range(len(parts)):
start = parts[i]
end = parts[i + 1] if i + 1 < len(parts) else -1
polygon = points[start:end]
if any([10 > point[0] > -90 for point in polygon]):
continue
coords = [point_to_coords(point, frame) for point in polygon]
all_coords.append(coords)
return all_coords
def draw_countries(frame, shape_records, fill="#dddddd", outline="grey"):
for record in shape_records:
for coords in record_to_coords(record, frame):
frame.img_draw.polygon(coords, fill=fill, outline=outline)
def draw_name(frame: FlatFrame, record, name_on_line=False):
if not hasattr(record.shape, 'bbox'):
logging.warning(f"Record with name f{get_name(record)} has no bounding box.")
return
bbox = record.shape.bbox
size = bbox[2] - bbox[0] + bbox[3] - bbox[1]
font_size = max(math.ceil(2 + math.pow(size, 0.3) * 10), 16)
font = ImageFont.truetype(FONT_TYPE, font_size)
name = get_name(record)
w, h = font.getsize(name)
lat_lon_spot = record.shape.points[len(record.shape.points) // 2] if name_on_line \
else [(bbox[0] + bbox[2])/2, (bbox[1] + bbox[3])/2]
center = point_to_coords(lat_lon_spot, frame)
top_left = (center[0] - w/2, center[1] - h/2)
if name in NAME_OFFSETS:
top_left = np.add(top_left, NAME_OFFSETS[name])
frame.img_draw.text(top_left, name, font=font, align="center", fill="#111111")
def get_sea_base_frame(polygons, outline="grey") -> FlatFrame:
frame = FlatFrame(polygons)
draw_countries(frame, CORE_COUNTRY_SHAPES, outline=outline)
draw_countries(frame, NEAR_COUNTRY_SHAPES, fill="#eeeeee")
return frame
def countries_and_names():
polygons = [shape.shape.points for shape in CORE_COUNTRY_SHAPES]
frame = get_sea_base_frame(polygons)
for core_shape in CORE_COUNTRY_SHAPES:
draw_name(frame, core_shape)
img = ImageOps.expand(frame.img, border=3)
img.show()
img.save("maps/sea_countries.png")
def lat_lon_lines():
"""Draws lines on equator and others and adds labels, including rotation.
Actually quite a hassle to get working, might be easier to annotate manually in Mac Preview or similar."""
frame = get_sea_base_frame([shape.shape.points for shape in CORE_COUNTRY_SHAPES])
dash_length = 5
# Equator and tropic of cancer - horizontal
for start, end in [((frame.minlon - 5, 0), (frame.maxlon + 5, 0)),
((frame.minlon - 5, 23.5), (frame.maxlon + 5, 23.5))]:
plot_start, plot_end = point_to_coords(start, frame), point_to_coords(end, frame)
while plot_start[0] < plot_end[0]:
frame.img_draw.line((plot_start, (plot_start[0] + dash_length, plot_start[1])), fill="#333333")
plot_start = (plot_start[0] + 2*dash_length, plot_start[1])
# 141st meridian - vertical
plot_start = point_to_coords((141, frame.maxlat + 5), frame)
plot_end = point_to_coords((141, frame.minlat - 5), frame)
while plot_start[1] < plot_end[1]:
frame.img_draw.line((plot_start, (plot_start[0], plot_start[1] + dash_length)), fill="#333333")
plot_start = (plot_start[0], plot_start[1] + 2 * dash_length)
font = ImageFont.truetype(FONT_TYPE, 24)
frame.img_draw.text(point_to_coords((133, 1.5), frame), "Equator", font=font, fill="#111111")
frame.img_draw.text(point_to_coords((121.8, 22.5), frame),
"Tropic of Cancer (23.5° North)", font=font, fill="#111111")
tmp_txt = Image.new("RGBA", (400, 50))
tmp_draw = ImageDraw.Draw(tmp_txt)
tmp_draw.text((0, 0), "141° East", font=font, fill="#111111")
tmp_window = tmp_txt.rotate(270, expand=1)
frame.img.paste(tmp_window, point_to_coords((138, 20), frame), tmp_window)
img = ImageOps.expand(frame.img, border=3)
img.show()
img.save("maps/sea_lat_lon.png")
def draw_widening_river(coords, frame):
chunk_size = 50
chunks = [coords[i*chunk_size:(i+1)*chunk_size+1] for i in range(len(coords) // chunk_size)]
for i, chunk in enumerate(chunks):
frame.img_draw.line(chunk, fill="#555", width=i)
def draw_rivers():
land_border_shapes = [record for record in ALL_COUNTRY_RECORDS.shapeRecords()
if record.record['NAME_EN'] in ["Myanmar", "Vietnam", "Cambodia"]]
polygon_with_bounds = [shape.shape.points for shape in land_border_shapes]
polygon_with_bounds.append([[88, 30]])
frame = get_sea_base_frame(polygon_with_bounds, outline="#cccccc")
rivers = shapefile.Reader("data/ne_10m_rivers_lake_centerlines.shp")
sea_rivers = ["Mekong", "Lancang",
"Tonlé Sap", "Tonle Sap",
"Salween",
"Irrawaddy Delta", "Ayeyarwady", "N'Mai", # Irrawaddy
"Chao Phraya", "Yom", "Ping",
"Salween", "Nu", # Salween
"Brahmaputra", "Ganges", "Yarlung", "Dihang", # Bhamaputra Ganges
"Hong", "Da", # Red river, Vietnam, without
"Jinsha", "Chang Jiang", "Yalong" # Yangtze without Min, Dadu
]
for record in rivers.shapeRecords():
if frame.intersects(record.shape.points) and get_name(record) not in sea_rivers:
continue
for coords in record_to_coords(record, frame):
frame.img_draw.line(coords, fill="#444", width=2)
#draw_name(frame, record)
# Hack to join Brahmaputra reaches
yarlung = [x for x in rivers if get_name(x) == "Yarlung"][0]
dihang = [x for x in rivers if get_name(x) == "Dihang"][0]
coords = (
point_to_coords(yarlung.shape.points[-2], frame),
point_to_coords(yarlung.shape.points[-1], frame),
point_to_coords(dihang.shape.points[0], frame),
point_to_coords(dihang.shape.points[1], frame))
print(f"Yarlung to Dihang coords: {coords}")
frame.img_draw.line(coords, fill="#444", width=3)
lakes = shapefile.Reader("data/ne_10m_lakes.shp")
for record in lakes.shapeRecords():
if get_name(record) not in sea_rivers:
continue
for coords in record_to_coords(record, frame):
frame.img_draw.polygon(coords, fill="#777")
frame.img_draw.line(coords, fill="#444", width=4)
img = ImageOps.expand(frame.img, border=3)
img.show()
img.save("maps/sea_rivers.png")
def main():
logging.basicConfig(level=logging.INFO)
draw_rivers()
if __name__ == '__main__':
main()
| [
"dwiddows@liveperson.com"
] | dwiddows@liveperson.com |
cf8cd812164f4022a58ebe2f98f32461359c3c54 | 830acb926cc5cf5a12f2045c8497d6f4aa1c2ef2 | /Hangman/Problems/Markdown heading/task.py | 0dade62780dd97612ca478a683d4cbd91f967a34 | [] | no_license | BuyankinM/JetBrainsAcademyProjects | ca2223875ea4aab3ee7fceedc8e293bdb6e1fdcf | d5f9fcde4298af714960b2755f762141de796694 | refs/heads/main | 2023-02-26T05:47:26.070972 | 2021-02-03T22:10:53 | 2021-02-03T22:10:53 | 335,762,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def heading(sym, num=1):
num = max(num, 1)
num = min(num, 6)
return f"{'#' * num} {sym}" | [
"mbuyankin@gmail.com"
] | mbuyankin@gmail.com |
8053ee58f5dd6ffded2d437f49b8b8805f0c96b5 | 5acd016d0f8df87888e4ac383a11c18c6d9cedd6 | /psi4EDFT/LibDegen.py | d82da0c6f1de97db6b805595d59989ed57aa6068 | [] | no_license | gambort/psi4-EDFT | 549732b5c7e9dd6a8ebb216d3d8b0150f34b2dad | 0509a618cda0ff553288747d96045b5e13042d8e | refs/heads/main | 2023-03-28T17:30:43.487856 | 2021-04-16T21:01:49 | 2021-04-16T21:01:49 | 305,366,690 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,257 | py | import numpy as np
import scipy.linalg as la
class DegenHelper:
def __init__(self, C=None, H=None, epsilon=1e-5):
if not(C is None):
self.NTot = C.shape[1]
kIndx = []
for k in range(self.NTot):
kIndx += [np.argwhere(np.abs(C[:,k])>epsilon).reshape((-1,))]
else:
F = H
if F is None:
quit()
dF = np.abs(np.diag(F))
x = np.abs(F)/np.sqrt(np.outer(dF,dF))
self.NTot = x.shape[1]
kIndx = []
for k in range(self.NTot):
kIndx += [np.argwhere(x[:,k]>epsilon).reshape((-1,))]
######################################
# Block things
self.kAll = {}
for I in kIndx:
k0 = I.min()
if not(k0 in self.kAll):
self.kAll[k0] = I
elif len(I)>len(self.kAll[k0]):
for k in I:
if not(k in self.kAll[k0]):
self.kAll[k0] = np.hstack((self.kAll[k0],k))
for k0 in self.kAll:
self.kAll[k0] = np.sort(self.kAll[k0])
######################################
# Remove duplicates
Master = np.zeros((self.NTot,),dtype=int)
for k0 in sorted(list(self.kAll)):
I = self.kAll[k0]
kUnique = []
for i in I:
if Master[i]==0:
kUnique += [i]
Master[i]=1
if len(kUnique)>0:
self.kAll[k0] = np.array(kUnique, dtype=int)
else: self.kAll.pop(k0)
# Test
kCheck = np.zeros((self.NTot,), dtype=int)
for k in self.kAll:
kCheck[self.kAll[k]]=1
NMissing = np.sum(kCheck) - self.NTot
if not(NMissing==0):
print("Number missing elements = %d"%(NMissing))
quit()
if True and len(self.kAll)>1:
print("Symmetry mappings:")
for k0 in self.kAll:
print("%3d :"%(k0) + \
", ".join(["%3d"%(x) for x in self.kAll[k0]]))
self.EigIndx = None
def Solve(self, F, Sort="Pre"):
# Solves the eigenvalue equaiton with the degeneracies
if not(F.shape==(self.NTot,self.NTot)):
print("Does not have the right shape")
return None
w = np.zeros(F.shape[0])
v = np.zeros(F.shape)
for k0 in self.kAll:
kp = self.kAll[k0]
Fp = F[kp[:,None],kp]
wp, vp = la.eigh(Fp)
w[kp] = wp
v[kp[:,None],kp] = vp
if Sort is None or Sort[0] in ("N", "n"):
return w, v # Not recommended
if Sort[0] in ("A", "a") or self.EigIndx is None:
ii = np.argsort(w)
self.EigIndx = ii
w = w[self.EigIndx]
v = v[:,self.EigIndx]
return w,v
if __name__=="__main__":
F1 = np.ones((2,2)) + np.eye(2)*10.
F2 = np.ones((3,3)) + np.eye(3)*5.
F3 = np.ones((2,2))*3.
F = la.block_diag(F1,F2,F3)
DH = DegenHelper(F)
w,v=DH.Solve(F)
print(w)
print(v)
print(np.dot(v,np.diag(w)).dot(v.T))
| [
"tgould.lego@gmail.com"
] | tgould.lego@gmail.com |
f09e09b066b83eb93839703b12f7fe62adf4b05a | 8be3fbe41873b5682eed4da3aab93be657a893bc | /nested_admin/tests/three_deep/tests.py | a25c53f636b475f837708e24585fcaee22e597d2 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | theKomix/django-nested-admin | 0b5f10b88928dc3167a720cf9a36f2ffe428cba7 | 2bfed729ba17bc69e4fe98d4a672b6b34186ae0f | refs/heads/master | 2020-03-30T12:03:51.430420 | 2018-12-26T05:24:04 | 2018-12-26T05:24:04 | 151,206,354 | 0 | 1 | NOASSERTION | 2018-12-07T15:11:31 | 2018-10-02T05:49:00 | Python | UTF-8 | Python | false | false | 4,893 | py | from nested_admin.tests.base import BaseNestedAdminTestCase
from .models import TopLevel, LevelOne, LevelTwo, LevelThree
class TestDeepNesting(BaseNestedAdminTestCase):
root_model = TopLevel
nested_models = (LevelOne, LevelTwo, LevelThree)
@classmethod
def setUpClass(cls):
super(TestDeepNesting, cls).setUpClass()
cls.l1_model, cls.l2_model, cls.l3_model = cls.nested_models
def test_validationerror_on_empty_extra_parent_form(self):
toplevel = TopLevel.objects.create(name='a')
self.load_admin(toplevel)
self.set_field('name', 'c', indexes=[0, 0])
self.set_field('name', 'd', indexes=[0, 0, 0])
self.save_form()
field_id_with_error = self.selenium.execute_script(
"return $('ul.errorlist li').closest('.form-row').find('input').attr('id')")
self.assertEqual(field_id_with_error, "id_children-0-name")
def test_create_new(self):
self.load_admin()
self.set_field('name', 'a')
self.set_field('name', 'b', [0])
self.set_field('name', 'c', [0, 0])
self.set_field('name', 'd', [0, 0, 0])
self.save_form()
root_instances = self.root_model.objects.all()
self.assertNotEqual(len(root_instances), 0, "%s did not save" % self.root_model.__name__)
self.assertEqual(len(root_instances), 1, "Too many %s found" % self.root_model.__name__)
root = root_instances[0]
self.assertEqual(root.name, 'a', "%s.name has wrong value" % self.root_model.__name__)
l1_instances = root.children.all()
self.assertNotEqual(len(l1_instances), 0, "%s did not save" % self.l1_model.__name__)
self.assertEqual(len(l1_instances), 1, "Too many %s found" % self.l1_model.__name__)
l1_instance = l1_instances[0]
self.assertEqual(l1_instance.name, 'b', "%s.name has wrong value" % self.l1_model.__name__)
l2_instances = l1_instance.children.all()
self.assertNotEqual(len(l2_instances), 0, "%s did not save" % self.l2_model.__name__)
self.assertEqual(len(l2_instances), 1, "Too many %s found" % self.l2_model.__name__)
l2_instance = l2_instances[0]
self.assertEqual(l2_instance.name, 'c', "%s.name has wrong value" % self.l2_model.__name__)
l3_instances = l2_instance.children.all()
self.assertNotEqual(len(l3_instances), 0, "%s did not save" % self.l3_model.__name__)
self.assertEqual(len(l3_instances), 1, "Too many %s found" % self.l3_model.__name__)
l3_instance = l3_instances[0]
self.assertEqual(l3_instance.name, 'd', "%s.name has wrong value" % self.l3_model.__name__)
def test_create_new_no_extras(self):
self.load_admin()
self.set_field('name', 'a')
self.remove_inline([0])
self.add_inline(name='b')
self.remove_inline([0, 0])
self.add_inline([0], name='c')
self.remove_inline([0, 0, 0])
self.add_inline([0, 0], name='d')
self.save_form()
root_instances = self.root_model.objects.all()
self.assertNotEqual(len(root_instances), 0, "%s did not save" % self.root_model.__name__)
self.assertEqual(len(root_instances), 1, "Too many %s found" % self.root_model.__name__)
root = root_instances[0]
self.assertEqual(root.name, 'a', "%s.name has wrong value" % self.root_model.__name__)
l1_instances = root.children.all()
self.assertNotEqual(len(l1_instances), 0, "%s did not save" % self.l1_model.__name__)
self.assertEqual(len(l1_instances), 1, "Too many %s found" % self.l1_model.__name__)
l1_instance = l1_instances[0]
self.assertEqual(l1_instance.name, 'b', "%s.name has wrong value" % self.l1_model.__name__)
l2_instances = l1_instance.children.all()
self.assertNotEqual(len(l2_instances), 0, "%s did not save" % self.l2_model.__name__)
self.assertEqual(len(l2_instances), 1, "Too many %s found" % self.l2_model.__name__)
l2_instance = l2_instances[0]
self.assertEqual(l2_instance.name, 'c', "%s.name has wrong value" % self.l2_model.__name__)
l3_instances = l2_instance.children.all()
self.assertNotEqual(len(l3_instances), 0, "%s did not save" % self.l3_model.__name__)
self.assertEqual(len(l3_instances), 1, "Too many %s found" % self.l3_model.__name__)
l3_instance = l3_instances[0]
self.assertEqual(l3_instance.name, 'd', "%s.name has wrong value" % self.l3_model.__name__)
def test_save_missing_intermediate_inline(self):
self.load_admin()
self.set_field('name', 'a')
self.set_field('name', 'b', [0])
self.set_field('name', 'd', [0, 0, 0])
self.save_form()
root_instances = self.root_model.objects.all()
self.assertNotEqual(len(root_instances), 0, "%s did not save" % self.root_model.__name__)
| [
"fdintino@theatlantic.com"
] | fdintino@theatlantic.com |
3f2d87c5d9adac2bae03cac7b2debb3b2d0a0f28 | ff7fbb8f70a9373149e703805a04ec9955ff1471 | /shopify/urls.py | 473d1e64458633a162e5c71d8171fd4dcfc9484d | [] | no_license | osga1291/shopify_project | f7b1bd20a8a0a92a0914f7214820a7815aaa8a37 | 38b5b5cf5cd61da790a04a44ee8bc9dd2cc0e50d | refs/heads/main | 2023-08-17T01:44:46.536864 | 2021-09-21T17:59:05 | 2021-09-21T17:59:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | """shopify URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
from pic import views as pic_views
from users import views as user_views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from pic.views import(
PictureDetailView
)
urlpatterns = [
path('admin/', admin.site.urls),
path('register/',user_views.register, name = 'register'),
path('login/', auth_views.LoginView.as_view(template_name = "users/login.html"), name = 'login'),
path('', user_views.profile, name = 'profile'),
path('pic/',include('pic.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"osga1291@colorado.edu"
] | osga1291@colorado.edu |
d88639bfe99fd9a8b8cdc84c368d8ae9a4f18a9e | 54bece16de866430eb105b805de82f0c5e11f93f | /icecat/management/commands/import_products.py | bb747c200cd548a537bf7408174609b98ad3442b | [] | no_license | boostsup/django-icecat | 2c4e9313e60c6d03d46c8ddaf7ef6d9b27f3d53b | 6118cd3bbd329bcdf3c0c1ef9f973784a8f50ad2 | refs/heads/master | 2020-05-18T15:10:46.622650 | 2012-12-19T09:32:16 | 2012-12-19T09:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | from django.core.management.base import BaseCommand
from icecat.models import *
from icecat import settings
from datetime import datetime
import requests
from optparse import make_option
class Command(BaseCommand):
help = 'Download products'
args = '<index.xml>'
def handle(self, *args, **options):
# TODO: make this configurable?
filename = args[0]
with open(filename, "r") as f:
# use cElement, it's faaaaaaaaast
from cElementTree import iterparse
context = iterparse(f, events=("start", "end"))
# turn it into an iterator
context = iter(context)
# # get the root element
event, root = context.next()
# loop through suppliers
for event, elem in context:
if event == "end" and elem.tag == "file":
values = dict(elem.items())
supplier, created = Supplier.objects.get_or_create(pk=values['Supplier_id'])
category, created = Category.objects.get_or_create(pk=values['Catid'])
product = Product()
product.pk = values['Product_ID']
product.supplier = supplier
product.category = category
product.model_name = values['Model_Name']
product.part = values['Prod_ID']
product.created_at = datetime.strptime(values['Date_Added'], '%Y%m%d%H%M%S')
product.updated_at = datetime.strptime(values['Updated'], '%Y%m%d%H%M%S')
product.thumbnail = values['HighPic']
if values['On_Market'] == '1':
product.on_market = True
product.save()
print product.model_name, product.part
root.clear()
| [
"mail@svdgraaf.nl"
] | mail@svdgraaf.nl |
fb543d62dabe1704fcad93ba0ffb4bb861451c6a | cc4a8dec643d226b3abf9ab69fd2bde71769d083 | /day_2/part_2.py | 218c33b1b2d8a286085f504a8a19c30f8bf02e96 | [] | no_license | JesseDesjardins/AdventOfCode2020 | 51c18711d6d46db9442419d43fd40d18a092ac83 | 191a258cd1a5893084839113fb1f28982117a379 | refs/heads/master | 2023-01-30T15:30:28.023113 | 2020-12-14T09:37:59 | 2020-12-14T09:37:59 | 320,192,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | lst = []
with open('input.txt', 'r') as infile:
lst = [line.rstrip('\n') for line in infile]
solution = 0
for entry in lst:
char_count = 0
policy_full, password = entry.split(':')
password = password.strip()
policy_pos, policy_char = policy_full.split(' ')
policy_pos_1, policy_pos_2 = policy_pos.split('-')
if (password[int(policy_pos_1)-1] == policy_char or password[int(policy_pos_2)-1] == policy_char):
if password[int(policy_pos_1)-1] != password[int(policy_pos_2)-1]: solution += 1
print(solution) | [
"jesse.m.desjardins@gmail.com"
] | jesse.m.desjardins@gmail.com |
fea4eff340c129f9c5054944eb459e463026262e | 17bb47df65453039db6dc25c2c07d149689c22d9 | /Florin2/PercentileRank.py | 1222b9902e194cfd1c6e49ce5836417012f9a054 | [] | no_license | ultraeric/switch_wecc_gis | 1f49275c119cccbcdaff78f2bd3759ea09ab7ecb | 389167395d6f7cccb327bbe4bbcc2400993435fc | refs/heads/master | 2021-01-20T11:00:43.940207 | 2017-09-20T06:26:32 | 2017-09-20T06:26:32 | 71,405,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | import csv
import os
filename = 'PollutantsSum.csv'
pollutant = 'CO2'
outputfile = 'PollutantPercentile' + pollutant + '.csv'
if os.path.isfile(outputfile):
# Remove old file since we are appending to a file later
os.remove(outputfile)
with open(filename, 'rb') as csvfile, open(outputfile, 'a') as csvout:
reader = csv.DictReader(csvfile)
fieldnames = ['Region'] + [pollutant + '_RUNEX'] + [pollutant + '_Perc'] # add new columns
writer = csv.DictWriter(csvout, fieldnames)
writer.writeheader()
tuples = []
for row in reader:
region = row['Region']
pol = float(row[pollutant + '_RUNEX'])
tuples.append((region, pol))
sort_pol = sorted(tuples, key=lambda pollutant: pollutant[1])
length = len(sort_pol)
for i in range(0, length):
pr = (i/float(length))*100
writer.writerow({'Region': sort_pol[i][0], pollutant + '_RUNEX': sort_pol[i][1], pollutant + '_Perc': pr})
| [
"f@Florins-MacBook-Pro.local"
] | f@Florins-MacBook-Pro.local |
9dfe4b52d8ceaf76afe7af513c2335e055ac271c | 6078520160f17b7e57bd232bd09d9c7cfb7324b1 | /nagios/libexec/check_multiportcheck.py | ac0634aa58965cb6596302fd111aba0dfa454d52 | [] | no_license | juniorkalvin7/nagios-2 | 12bee5ff4c35313b42b61b073e8e710ea47acf4f | 223a796ed073bfd97da36fca046ae5e05b0a8c87 | refs/heads/master | 2021-01-17T14:48:36.650222 | 2017-03-06T16:49:24 | 2017-03-06T16:49:24 | 84,097,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* Copyright (c) 2005 Gemayel Alves de Lira (gemayellira@gmail.com.br)
* All rights reserved.
* Intechne Information Technologies
* version 0.1 -
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE INTECHNE INFORMATION TECNOLOGIES, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
import commands
import time
import math
import os
import sys
import string
args = sys.argv[1:]
if len(args)<2:
print "Centreon Plugin\nChecagem Multipla de portas de email por GemayelLira"
print "Argumentos:host community"
print "Ex.\n%s 192.166.254.133 u3fr0I9b5" % sys.argv[0]
#print
sys.exit(1)
else:
#print args
host=args[0]
community=args[1]
#opcao=args[2]
mib='.1.3.6.1.4.1.2022.29'
#if opcao=='fila':
#mib='.1.3.6.1.4.1.2024.07'
#elif mib=='':
#print 'Defina uma opcao valida'
try:
status=(commands.getoutput("/usr/bin/snmpwalk -t 100000 -v 1 -c %s %s %s.101.1" %(community,str(host),mib))).split("\"")[1]
except Exception,e:
print "CRITICAL - nao consegui capturar valores"
print e
sys.exit(2)
if status.count('CRITICAL'):
print "".join(status)
sys.exit(2)
#print status
data = "\n".join(status.split(':::'))
print data
sys.exit(0)
| [
"juniorkalvin7@gmail.com"
] | juniorkalvin7@gmail.com |
113533ae02e5f503e25caea078a34de22fed76b9 | 4c4611e2a1353cfba1e40bd480bd32c947c13718 | /locallibrary/ll_env/bin/django-admin.py | 372353b12387bcf15d381c8f1ec3b5a31e1e8421 | [] | no_license | zchuhui/python-learning | 74320d3c0a510a28875122afd91ae49bd647d7c8 | 13b96443dfa79ad3b2cc866027babf231a0f58b4 | refs/heads/master | 2023-01-05T22:39:49.297880 | 2020-11-08T15:06:33 | 2020-11-08T15:06:33 | 272,928,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #!/Users/mac/codes/python/python-learning/locallibrary/ll_env/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"oncwnuNxWDdBi2j9l5fXB6LuSqhg@git.weixin.qq.com"
] | oncwnuNxWDdBi2j9l5fXB6LuSqhg@git.weixin.qq.com |
a24ded89245e027192b8ac7fcf78bd28af461ba8 | 25033dec5e621744884058c7fb2f3ffb27b6d368 | /testDB/sqlh.py | d1aa33400bccc3b8dc9776e6a0145874a1fa0ada | [] | no_license | springwater101/Real-Python-Test- | e6e08750aad331b6d346dc4d24724e430b0ab934 | 129effb5e8eacfcf7695bfd986af6d9c5f93367e | refs/heads/master | 2020-05-20T16:18:14.382549 | 2015-03-11T10:13:59 | 2015-03-11T10:13:59 | 31,297,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #UPDATE and DELETE statements
import sqlite3
with sqlite3.connect("new.db") as connection:
c = connection.cursor()
# update data
c.execute("UPDATE population SET population = 9000000 WHERE city='New York City'")
# delete data
c.execute("DELETE FROM population WHERE city='Boston'")
print "\nNEW DATA:\n"
c.execute("SELECT * FROM population")
rows = c.fetchall()
for r in rows:
print r[0], r[1], r[2]
| [
"ancientkiten@gmail.com"
] | ancientkiten@gmail.com |
63c809a87efab7ded47c7ec9a43ff2325500a22e | 8581191f5b3f002b3a656bd2e5161e1bcc9f053b | /models/denseRNNExponentialWithWConcat.py | 2d9b02e30713efaf55c9f98d6bfc2777abd2092a | [] | no_license | Meikaijie/Dense-RNN-Project | b6b238962d3d9ce2d73e7296688fe3fb140a000c | dff2797a8e188c63c5df6c8a463deb1e8f18aea5 | refs/heads/master | 2021-08-24T15:42:38.592783 | 2017-12-10T06:01:44 | 2017-12-10T06:01:44 | 110,617,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,677 | py | from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
num_epochs = 100
total_series_length = 50000 * 1000
truncated_backprop_length = 15
state_size = 2
num_classes = 2
echo_step = 3
batch_size = 5
num_batches = total_series_length//batch_size//truncated_backprop_length
skip_connections = [1, 2, 4, 8, 16, 32, 64]
num_skip_connections = 2
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.9, 0.1]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [skip_connections[num_skip_connections-1], batch_size, state_size])
W = tf.Variable(np.random.rand(state_size*num_skip_connections+1, state_size), dtype=tf.float32)
b = tf.Variable(np.zeros((1,state_size)), dtype=tf.float32)
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1,num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1)
# Forward pass
current_state = init_state
states_series = []
for current_input in inputs_series:
current_input = tf.reshape(current_input, [batch_size, 1])
to_concat = [current_input]
for i in range(num_skip_connections):
to_concat.append(tf.squeeze(tf.slice(current_state, [skip_connections[num_skip_connections-1] - skip_connections[i], 0, 0], [1, batch_size, state_size]), axis=[0]))
input_and_state_concatenated = tf.concat(to_concat, 1) # Increasing number of columns
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition
states_series.append(next_state)
current_state = tf.concat([tf.slice(current_state, [1, 0, 0], [skip_connections[num_skip_connections-1]-1, batch_size, state_size]), tf.expand_dims(next_state,0)], 0)
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series,labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
def plot(loss_list, predictions_series, batchX, batchY):
plt.subplot(2, 3, 1)
plt.cla()
plt.plot(loss_list)
for batch_series_idx in range(5):
one_hot_output_series = np.array(predictions_series)[:, batch_series_idx, :]
single_output_series = np.array([(1 if out[0] < 0.5 else 0) for out in one_hot_output_series])
plt.subplot(2, 3, batch_series_idx + 2)
plt.cla()
plt.axis([0, truncated_backprop_length, 0, 2])
left_offset = range(truncated_backprop_length)
plt.bar(left_offset, batchX[batch_series_idx, :], width=1, color="blue")
plt.bar(left_offset, batchY[batch_series_idx, :] * 0.5, width=1, color="red")
plt.bar(left_offset, single_output_series * 0.3, width=1, color="green")
plt.draw()
plt.pause(0.0001)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
plt.ion()
plt.figure()
plt.show()
loss_list = []
for epoch_idx in range(num_epochs):
x,y = generateData()
_current_state = np.zeros((skip_connections[num_skip_connections-1], batch_size, state_size))
print("New data, epoch", epoch_idx)
for batch_idx in range(num_batches):
start_idx = batch_idx * truncated_backprop_length
end_idx = start_idx + truncated_backprop_length
batchX = x[:,start_idx:end_idx]
batchY = y[:,start_idx:end_idx]
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
[total_loss, train_step, current_state, predictions_series],
feed_dict={
batchX_placeholder:batchX,
batchY_placeholder:batchY,
init_state:_current_state,
})
#print(_current_state)
loss_list.append(_total_loss)
if batch_idx%100 == 0:
print("Step",batch_idx, "Loss", _total_loss)
#plot(loss_list, _predictions_series, batchX, batchY)
plt.ioff()
plt.show() | [
"sahilkanjiyani@Sahils-MacBook-Pro.local"
] | sahilkanjiyani@Sahils-MacBook-Pro.local |
05a175071847e83ae425fe49db215ec21df87888 | 9e345c784747cac37eee98e2b04167c3e70183f1 | /trying_rnn.py | b2d22a33ac4358507bb9abae5756c7c4bab49a3e | [] | no_license | leo-gan/PyTorch-SungKim | e1fae02a4603a8405fa31e35d3349c6377bf9ecc | 8024691721978c8fa27278ea875081b3f4bdb21c | refs/heads/master | 2021-05-08T23:36:39.922580 | 2018-02-01T17:06:37 | 2018-02-01T17:06:37 | 119,717,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,900 | py | import torch
from torch import nn
from torch.autograd import Variable
torch.manual_seed(123)
x = 'ihello'
y = 'helioo'
id2x = ['i', 'h', 'e', 'l', 'o']
# x_hot = {
# 'i':[1, 0, 0, 0, 0], # 'i'
# 'h':[0, 1, 0, 0, 0], # 'h'
# 'e':[0, 0, 1, 0, 0], # 'e'
# 'l':[0, 0, 0, 1, 0], # 'l'
# 'o':[0, 0, 0, 0, 1], # 'o'
# }
x2id = {c:i for i,c in enumerate(id2x)}
# input = [[x_hot[c] for c in x]]
input = [[x2id[id] for id in x]]
target = [x2id[id] for id in y]
print(len(input), input, '\n', len(target), target)
input = Variable(torch.LongTensor(input))
target = Variable(torch.LongTensor(target))
input_size = 5
num_classes = input_size
embedding_dim = 10
hidden_size = input_size
batch_size = 1
seq_len = 6
num_layers = 1
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.emb = nn.Embedding(input_size, embedding_dim=embedding_dim)
self.rnn = nn.RNN(input_size=embedding_dim, hidden_size=hidden_size, batch_first=True)
self.lin = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# h_0 size: batch_size,
h_0 = Variable(torch.zeros(batch_size, num_layers, hidden_size))
x = self.emb(x)
x = x.view(batch_size, seq_len, -1)
out, h_0 = self.rnn(x, h_0)
return self.lin(out.view(-1, num_classes))
model = Net()
print(model)
criterion = nn.CrossEntropyLoss()
opt = torch.optim.Adam(model.parameters(), lr=0.1, weight_decay=0.001)
for epoch in range(100):
opt.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
opt.step()
_, out_id = output.max(1)
idx = out_id.data.numpy()
res_str = [id2x[id] for id in idx]
print('{:2} loss: {:.4} Result: {}'.format(epoch, loss.data[0], ''.join(res_str)))
#print('{:2} loss: {:.4} Result: {}'.format(epoch, loss.data[0]), res_str) | [
"leo.gan.57@gmail.com"
] | leo.gan.57@gmail.com |
cb3b6dee35a6278db9f968b94e96589d790b669c | 699a43917ce75b2026a450f67d85731a0f719e01 | /comonprefix/venv/Scripts/pip3.7-script.py | e913bfc5f695c5d68e2ba38d50ad1f005852ef42 | [] | no_license | wusanshou2017/Leetcode | 96ab81ae38d6e04739c071acfc0a5f46a1c9620b | c4b85ca0e23700b84e4a8a3a426ab634dba0fa88 | refs/heads/master | 2021-11-16T01:18:27.886085 | 2021-10-14T09:54:47 | 2021-10-14T09:54:47 | 107,402,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | #!E:\lc\comonprefix\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"252652905@qq.com"
] | 252652905@qq.com |
20acc266a70d5447f23a333ff82231fd7cc9eac7 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/version_type_enumeration.py | 0e80ef2e2f270f90f4bcec2a491a896e7d6de716 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 155 | py | from enum import Enum
__NAMESPACE__ = "http://www.netex.org.uk/netex"
class VersionTypeEnumeration(Enum):
POINT = "point"
BASELINE = "baseline"
| [
"chris@komposta.net"
] | chris@komposta.net |
bfb19e5899d96f4700ef1718370addf13912b499 | 45eb2451c0cab69d38d485d407a5fabffa3b848f | /2014/round_1a/full_binary_tree.py | 6a3a5b0f0f7c987ba2c8b941c3d1eacf61aae94d | [
"Apache-2.0"
] | permissive | laichunpongben/CodeJam | 4b04f254182fe996e44e2365b1f637f25ab0fbcf | a048229bce1bc680dc85c8a69ef395a2f049732a | refs/heads/master | 2021-01-10T08:03:16.700622 | 2017-08-08T06:49:50 | 2017-08-08T06:49:50 | 43,106,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py |
import sys
from graph import Graph
def calc_min_deletions(n, edges):
graph = Graph()
for i in range(1, n + 1):
graph.add_vertex(i)
for edge in edges:
graph.add_edge(edge)
min_deletions = sys.maxint
for root in range(1, n + 1):
min_deletions = min(min_deletions, N - max_subtree_nodes(root, 0, graph))
return min_deletions
def max_subtree_nodes(current_node, parent, graph):
max_two = []
for x in neighbors of current_node:
if x == parent:
continue
max_two
update max_two with max_subtree_nodes(x, current_node)
if len(max_two) == 2:
return 1 + sum(max_two)
return 1
| [
"laichunpongben@gmail.com"
] | laichunpongben@gmail.com |
336c4ceef935ca67574f23848288f7334f4204ed | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/ZYXEL-RIP-MIB.py | a6c50ee23acbafeec25fe60539a149260b145846 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 4,835 | py | #
# PySNMP MIB module ZYXEL-RIP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-RIP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:51:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Gauge32, ObjectIdentity, iso, Counter32, Unsigned32, IpAddress, Counter64, MibIdentifier, Integer32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Gauge32", "ObjectIdentity", "iso", "Counter32", "Unsigned32", "IpAddress", "Counter64", "MibIdentifier", "Integer32", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt")
zyRouteDomainIpAddress, zyRouteDomainIpMaskBits = mibBuilder.importSymbols("ZYXEL-IP-FORWARD-MIB", "zyRouteDomainIpAddress", "zyRouteDomainIpMaskBits")
zyxelRip = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74))
if mibBuilder.loadTexts: zyxelRip.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts: zyxelRip.setOrganization('Enterprise Solution ZyXEL')
if mibBuilder.loadTexts: zyxelRip.setContactInfo('')
if mibBuilder.loadTexts: zyxelRip.setDescription('The subtree for Routing Information Protocol (RIP)')
zyxelRipSetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74, 1))
zyRipState = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyRipState.setStatus('current')
if mibBuilder.loadTexts: zyRipState.setDescription('Enabled/Disabled RIP on the Switch.')
zyRipDistance = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyRipDistance.setStatus('current')
if mibBuilder.loadTexts: zyRipDistance.setDescription('The administrative distance of RIP routes.')
zyxelRipRouteDomainTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74, 1, 3), )
if mibBuilder.loadTexts: zyxelRipRouteDomainTable.setStatus('current')
if mibBuilder.loadTexts: zyxelRipRouteDomainTable.setDescription('The table contains RIP route domain configuration.')
zyxelRipRouteDomainEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74, 1, 3, 1), ).setIndexNames((0, "ZYXEL-IP-FORWARD-MIB", "zyRouteDomainIpAddress"), (0, "ZYXEL-IP-FORWARD-MIB", "zyRouteDomainIpMaskBits"))
if mibBuilder.loadTexts: zyxelRipRouteDomainEntry.setStatus('current')
if mibBuilder.loadTexts: zyxelRipRouteDomainEntry.setDescription('An entry contains RIP route domain configuration.')
zyRipRouteDomainDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("none", 0), ("outgoing", 1), ("incoming", 2), ("both", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyRipRouteDomainDirection.setStatus('current')
if mibBuilder.loadTexts: zyRipRouteDomainDirection.setDescription('RIP direction which controls the sending and receiving of RIP packet.')
zyRipRouteDomainVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 74, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("v1", 0), ("v2b", 1), ("v2m", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyRipRouteDomainVersion.setStatus('current')
if mibBuilder.loadTexts: zyRipRouteDomainVersion.setDescription('RIP version which controls the format and the broadcasting method of the RIP packets that the switch sends.')
mibBuilder.exportSymbols("ZYXEL-RIP-MIB", zyxelRipRouteDomainTable=zyxelRipRouteDomainTable, zyRipRouteDomainVersion=zyRipRouteDomainVersion, zyxelRipRouteDomainEntry=zyxelRipRouteDomainEntry, zyxelRip=zyxelRip, zyRipDistance=zyRipDistance, zyRipRouteDomainDirection=zyRipRouteDomainDirection, zyxelRipSetup=zyxelRipSetup, PYSNMP_MODULE_ID=zyxelRip, zyRipState=zyRipState)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
8013ecb39e94b556952c18a9464769eb9dc15f69 | 81094b3034b7781166218ff1a0dff186b9fa6a45 | /main.py | 1ac2638f2d1e838efa80b81feab4cae6d15a37bf | [] | no_license | dennis-199/forloop3 | 01b25d610f56f95a970953c732d5a7ec453c3d95 | 9afc2d6ba64db2dd4f913362e3e6cc2f64205b8f | refs/heads/master | 2023-08-16T18:02:36.256131 | 2021-09-20T05:29:49 | 2021-09-20T05:29:49 | 408,320,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
for ch in "what is your name ":
print(ch)
| [
"otieno.dennis@strathmore.edu"
] | otieno.dennis@strathmore.edu |
7b4bb588bf1cf9c6d114bd85d3027e99acdfd100 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CreateClusterTemplateRequest.py | bc26ad850899542168d338eb9f7452070bb222f1 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 14,454 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class CreateClusterTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'CreateClusterTemplate')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_LogPath(self):
return self.get_query_params().get('LogPath')
def set_LogPath(self,LogPath):
self.add_query_param('LogPath',LogPath)
def get_MasterPwd(self):
return self.get_query_params().get('MasterPwd')
def set_MasterPwd(self,MasterPwd):
self.add_query_param('MasterPwd',MasterPwd)
def get_Configurations(self):
return self.get_query_params().get('Configurations')
def set_Configurations(self,Configurations):
self.add_query_param('Configurations',Configurations)
def get_SshEnable(self):
return self.get_query_params().get('SshEnable')
def set_SshEnable(self,SshEnable):
self.add_query_param('SshEnable',SshEnable)
def get_KeyPairName(self):
return self.get_query_params().get('KeyPairName')
def set_KeyPairName(self,KeyPairName):
self.add_query_param('KeyPairName',KeyPairName)
def get_MetaStoreType(self):
return self.get_query_params().get('MetaStoreType')
def set_MetaStoreType(self,MetaStoreType):
self.add_query_param('MetaStoreType',MetaStoreType)
def get_SecurityGroupName(self):
return self.get_query_params().get('SecurityGroupName')
def set_SecurityGroupName(self,SecurityGroupName):
self.add_query_param('SecurityGroupName',SecurityGroupName)
def get_MachineType(self):
return self.get_query_params().get('MachineType')
def set_MachineType(self,MachineType):
self.add_query_param('MachineType',MachineType)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_BootstrapActions(self):
return self.get_query_params().get('BootstrapAction')
def set_BootstrapActions(self, BootstrapActions):
for depth1 in range(len(BootstrapActions)):
if BootstrapActions[depth1].get('Path') is not None:
self.add_query_param('BootstrapAction.' + str(depth1 + 1) + '.Path', BootstrapActions[depth1].get('Path'))
if BootstrapActions[depth1].get('ExecutionTarget') is not None:
self.add_query_param('BootstrapAction.' + str(depth1 + 1) + '.ExecutionTarget', BootstrapActions[depth1].get('ExecutionTarget'))
if BootstrapActions[depth1].get('ExecutionMoment') is not None:
self.add_query_param('BootstrapAction.' + str(depth1 + 1) + '.ExecutionMoment', BootstrapActions[depth1].get('ExecutionMoment'))
if BootstrapActions[depth1].get('Arg') is not None:
self.add_query_param('BootstrapAction.' + str(depth1 + 1) + '.Arg', BootstrapActions[depth1].get('Arg'))
if BootstrapActions[depth1].get('Name') is not None:
self.add_query_param('BootstrapAction.' + str(depth1 + 1) + '.Name', BootstrapActions[depth1].get('Name'))
if BootstrapActions[depth1].get('ExecutionFailStrategy') is not None:
self.add_query_param('BootstrapAction.' + str(depth1 + 1) + '.ExecutionFailStrategy', BootstrapActions[depth1].get('ExecutionFailStrategy'))
def get_MetaStoreConf(self):
return self.get_query_params().get('MetaStoreConf')
def set_MetaStoreConf(self,MetaStoreConf):
self.add_query_param('MetaStoreConf',MetaStoreConf)
def get_EmrVer(self):
return self.get_query_params().get('EmrVer')
def set_EmrVer(self,EmrVer):
self.add_query_param('EmrVer',EmrVer)
def get_Tags(self):
return self.get_query_params().get('Tag')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_IsOpenPublicIp(self):
return self.get_query_params().get('IsOpenPublicIp')
def set_IsOpenPublicIp(self,IsOpenPublicIp):
self.add_query_param('IsOpenPublicIp',IsOpenPublicIp)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_InstanceGeneration(self):
return self.get_query_params().get('InstanceGeneration')
def set_InstanceGeneration(self,InstanceGeneration):
self.add_query_param('InstanceGeneration',InstanceGeneration)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_ClusterType(self):
return self.get_query_params().get('ClusterType')
def set_ClusterType(self,ClusterType):
self.add_query_param('ClusterType',ClusterType)
def get_AutoRenew(self):
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self,AutoRenew):
self.add_query_param('AutoRenew',AutoRenew)
def get_OptionSoftWareLists(self):
return self.get_query_params().get('OptionSoftWareList')
def set_OptionSoftWareLists(self, OptionSoftWareLists):
for depth1 in range(len(OptionSoftWareLists)):
if OptionSoftWareLists[depth1] is not None:
self.add_query_param('OptionSoftWareList.' + str(depth1 + 1) , OptionSoftWareLists[depth1])
def get_NetType(self):
return self.get_query_params().get('NetType')
def set_NetType(self,NetType):
self.add_query_param('NetType',NetType)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_UseCustomHiveMetaDb(self):
return self.get_query_params().get('UseCustomHiveMetaDb')
def set_UseCustomHiveMetaDb(self,UseCustomHiveMetaDb):
self.add_query_param('UseCustomHiveMetaDb',UseCustomHiveMetaDb)
def get_InitCustomHiveMetaDb(self):
return self.get_query_params().get('InitCustomHiveMetaDb')
def set_InitCustomHiveMetaDb(self,InitCustomHiveMetaDb):
self.add_query_param('InitCustomHiveMetaDb',InitCustomHiveMetaDb)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_IoOptimized(self):
return self.get_query_params().get('IoOptimized')
def set_IoOptimized(self,IoOptimized):
self.add_query_param('IoOptimized',IoOptimized)
def get_SecurityGroupId(self):
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self,SecurityGroupId):
self.add_query_param('SecurityGroupId',SecurityGroupId)
def get_EasEnable(self):
return self.get_query_params().get('EasEnable')
def set_EasEnable(self,EasEnable):
self.add_query_param('EasEnable',EasEnable)
def get_DepositType(self):
return self.get_query_params().get('DepositType')
def set_DepositType(self,DepositType):
self.add_query_param('DepositType',DepositType)
def get_DataDiskKMSKeyId(self):
return self.get_query_params().get('DataDiskKMSKeyId')
def set_DataDiskKMSKeyId(self,DataDiskKMSKeyId):
self.add_query_param('DataDiskKMSKeyId',DataDiskKMSKeyId)
def get_UseLocalMetaDb(self):
return self.get_query_params().get('UseLocalMetaDb')
def set_UseLocalMetaDb(self,UseLocalMetaDb):
self.add_query_param('UseLocalMetaDb',UseLocalMetaDb)
def get_TemplateName(self):
return self.get_query_params().get('TemplateName')
def set_TemplateName(self,TemplateName):
self.add_query_param('TemplateName',TemplateName)
def get_UserDefinedEmrEcsRole(self):
return self.get_query_params().get('UserDefinedEmrEcsRole')
def set_UserDefinedEmrEcsRole(self,UserDefinedEmrEcsRole):
self.add_query_param('UserDefinedEmrEcsRole',UserDefinedEmrEcsRole)
def get_DataDiskEncrypted(self):
return self.get_query_params().get('DataDiskEncrypted')
def set_DataDiskEncrypted(self,DataDiskEncrypted):
self.add_query_param('DataDiskEncrypted',DataDiskEncrypted)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_HostGroups(self):
return self.get_query_params().get('HostGroup')
def set_HostGroups(self, HostGroups):
for depth1 in range(len(HostGroups)):
if HostGroups[depth1].get('Period') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.Period', HostGroups[depth1].get('Period'))
if HostGroups[depth1].get('SysDiskCapacity') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.SysDiskCapacity', HostGroups[depth1].get('SysDiskCapacity'))
if HostGroups[depth1].get('PrivatePoolOptionsId') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.PrivatePoolOptionsId', HostGroups[depth1].get('PrivatePoolOptionsId'))
if HostGroups[depth1].get('DiskCapacity') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.DiskCapacity', HostGroups[depth1].get('DiskCapacity'))
if HostGroups[depth1].get('SysDiskType') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.SysDiskType', HostGroups[depth1].get('SysDiskType'))
if HostGroups[depth1].get('ClusterId') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.ClusterId', HostGroups[depth1].get('ClusterId'))
if HostGroups[depth1].get('DiskType') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.DiskType', HostGroups[depth1].get('DiskType'))
if HostGroups[depth1].get('HostGroupName') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.HostGroupName', HostGroups[depth1].get('HostGroupName'))
if HostGroups[depth1].get('VSwitchId') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.VSwitchId', HostGroups[depth1].get('VSwitchId'))
if HostGroups[depth1].get('DiskCount') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.DiskCount', HostGroups[depth1].get('DiskCount'))
if HostGroups[depth1].get('AutoRenew') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.AutoRenew', HostGroups[depth1].get('AutoRenew'))
if HostGroups[depth1].get('HostGroupId') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.HostGroupId', HostGroups[depth1].get('HostGroupId'))
if HostGroups[depth1].get('NodeCount') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.NodeCount', HostGroups[depth1].get('NodeCount'))
if HostGroups[depth1].get('InstanceType') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.InstanceType', HostGroups[depth1].get('InstanceType'))
if HostGroups[depth1].get('Comment') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.Comment', HostGroups[depth1].get('Comment'))
if HostGroups[depth1].get('ChargeType') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.ChargeType', HostGroups[depth1].get('ChargeType'))
if HostGroups[depth1].get('MultiInstanceTypes') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.MultiInstanceTypes', HostGroups[depth1].get('MultiInstanceTypes'))
if HostGroups[depth1].get('CreateType') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.CreateType', HostGroups[depth1].get('CreateType'))
if HostGroups[depth1].get('HostGroupType') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.HostGroupType', HostGroups[depth1].get('HostGroupType'))
if HostGroups[depth1].get('PrivatePoolOptionsMatchCriteria') is not None:
self.add_query_param('HostGroup.' + str(depth1 + 1) + '.PrivatePoolOptionsMatchCriteria', HostGroups[depth1].get('PrivatePoolOptionsMatchCriteria'))
def get_Configs(self):
return self.get_query_params().get('Config')
def set_Configs(self, Configs):
for depth1 in range(len(Configs)):
if Configs[depth1].get('ConfigKey') is not None:
self.add_query_param('Config.' + str(depth1 + 1) + '.ConfigKey', Configs[depth1].get('ConfigKey'))
if Configs[depth1].get('FileName') is not None:
self.add_query_param('Config.' + str(depth1 + 1) + '.FileName', Configs[depth1].get('FileName'))
if Configs[depth1].get('Encrypt') is not None:
self.add_query_param('Config.' + str(depth1 + 1) + '.Encrypt', Configs[depth1].get('Encrypt'))
if Configs[depth1].get('Replace') is not None:
self.add_query_param('Config.' + str(depth1 + 1) + '.Replace', Configs[depth1].get('Replace'))
if Configs[depth1].get('ConfigValue') is not None:
self.add_query_param('Config.' + str(depth1 + 1) + '.ConfigValue', Configs[depth1].get('ConfigValue'))
if Configs[depth1].get('ServiceName') is not None:
self.add_query_param('Config.' + str(depth1 + 1) + '.ServiceName', Configs[depth1].get('ServiceName'))
def get_HighAvailabilityEnable(self):
return self.get_query_params().get('HighAvailabilityEnable')
def set_HighAvailabilityEnable(self,HighAvailabilityEnable):
self.add_query_param('HighAvailabilityEnable',HighAvailabilityEnable) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
615db5443502b2ec242b740a6d00f144e229fbfe | 2429a0bc941485a7cf6e348e09198905e5af9af7 | /Python/蓝桥 Python/入门/class student_teacher.py | c190dbdcb4011e62104e6a365c6306c21ce193b1 | [] | no_license | DanDaydy/blacktea | f9fc9ebb0ad1939e400257c6edf15dc8ca76f4c6 | cf1f52a8ee928cf29f64306f4112136333743fcf | refs/heads/master | 2022-12-30T00:11:28.098461 | 2020-10-23T08:12:27 | 2020-10-23T08:12:27 | 302,578,509 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | #!/usr/bin/env python3
class Person(object):
"""
返回具有给定名称的 Person 对象
"""
def __init__(self, name):
self.name = name
def get_details(self):
"""
返回包含人名的字符串
"""
return self.name
class Student(Person):
"""
返回 Student 对象,采用 name, branch, year 3 个参数
"""
def __init__(self, name, branch, year):
Person.__init__(self, name)
self.branch = branch
self.year = year
def get_details(self):
"""
返回包含学生具体信息的字符串
"""
return "{} studies {} and is in {} year.".format(self.name, self.branch, self.year)
class Teacher(Person):
"""
返回 Teacher 对象,采用字符串列表作为参数
"""
def __init__(self, name, papers):
Person.__init__(self, name)
self.papers = papers
def get_details(self):
return "{} teaches {}".format(self.name, ','.join(self.papers))
person1 = Person('Sachin')
student1 = Student('Kushal', 'CSE', 2005)
teacher1 = Teacher('Prashad', ['C', 'C++'])
print(person1.get_details())
print(student1.get_details())
print(teacher1.get_details()) | [
"gerway@foxmail.com"
] | gerway@foxmail.com |
a3ca293a13db83da524d77e1379a87706ff88b52 | cb69279d7ca6bdec741cb6d78582cad10108142d | /manage.py | 38e735d0f112d46a63cb2233cc27c95bd14a9fcf | [] | no_license | Shubham-2007/hostapi_horeku | 199596b83ccec9deecacf802b4fd1d9d36264f3e | 7576a83985ca15f2df862f26a01a795cb9e58675 | refs/heads/main | 2023-03-09T04:32:19.812501 | 2021-02-24T20:11:41 | 2021-02-24T20:11:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hostapi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"58872826+Shubham-2441@users.noreply.github.com"
] | 58872826+Shubham-2441@users.noreply.github.com |
d7f29e6921ab75b1b29412c81af927e13e66678e | 34077c4b546c9a98ba2cedc58061508c91315227 | /carreira-python/design-patterns-python/pedido.py | 69ece308c419fd6d5a83bd5f08f780a887fb495b | [] | no_license | CodexZombie/EstudosWeb | c068aa3e7f00fa234f46198078b7cc386fd5beac | 6206ddb453a4713dd114e20709e7349639258e74 | refs/heads/master | 2021-07-05T08:41:25.667408 | 2019-03-20T03:14:05 | 2019-03-20T03:14:05 | 144,349,746 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | # -*- coding: UTF-8 -*-
# Command
#../design-patterns-python/pedido.py
from datetime import date
class Pedido(object):
def __init__(self, cliente, valor):
self.__cliente = cliente
self.__valor = valor
self.__status = 'NOVO'
self.__data_de_finalizacao = None
@property
def cliente(self):
return self.__cliente
@property
def valor(self):
return self.__valor
@property
def status(self):
return self.__status
@property
def data_de_finalizacao(self):
return self.__data_de_finalizacao
def paga(self):
self.__status = 'PAGO'
def finaliza(self):
self.__status = 'ENTREGUE'
self.__data_de_finalizacao = date.today()
from abc import ABCMeta, abstractmethod
class Comando(object):
__metaclass__ = ABCMeta
@abstractmethod
def executa(self): pass
class Fila_de_trabalho(object):
def __init__(self):
self.__comandos = []
def adiciona(self, comando):
self.__comandos.append(comando)
def processa(self):
for comando in self.__comandos:
comando.executa()
class Paga_pedido(Comando):
def __init__(self, pedido):
self.__pedido = pedido
def executa(self):
self.__pedido.paga()
class Finaliza_pedido(Comando):
def __init__(self, pedido):
self.__pedido = pedido
def executa(self):
self.__pedido.finaliza()
if __name__ == '__main__':
pedido1 = Pedido('Flávio', 150)
pedido2 = Pedido('Almeida', 250)
fila_de_trabalho = Fila_de_trabalho()
fila_de_trabalho.adiciona(Paga_pedido(pedido1))
fila_de_trabalho.adiciona(Paga_pedido(pedido2))
fila_de_trabalho.adiciona(Finaliza_pedido(pedido1))
fila_de_trabalho.processa() | [
"walvesdg@gmail.com"
] | walvesdg@gmail.com |
87b0cbd13cfd268b27a2c2fdc00de595ec99aad3 | fe6bef85f9d25ae1c37fd335093d4195b8d68336 | /main.py | d3e694a6c28eeaa496adc52358a75ea11c4462e0 | [] | no_license | Vektis/py_godwin | 8975807433bbdceb73068fb7c7cccfa8116893e8 | c48bef3b1c71049c99ca17c01feb03eac1b36316 | refs/heads/master | 2021-01-21T07:14:34.268183 | 2017-05-17T18:30:37 | 2017-05-17T18:30:37 | 91,608,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | print("yeah")
| [
"pbgaming2225@gmail.com"
] | pbgaming2225@gmail.com |
ec9afabfdd6a3fb5b54dcd3df3f3f3a0b67ae01e | a76790fa5f4eb96a8b731f891ca1aa4c16d21256 | /azext_iot/dps/providers/discovery.py | 433c7fe8409c6128ccc6fbaf4f22840408eae3da | [
"MIT"
] | permissive | digimaun/azure-iot-cli-extension | 414fb1c7c22b0f0d0891cd30c28d13366b9f7207 | 9999c536bbf67251d863d365c190866e1d5cc1ad | refs/heads/dev | 2023-06-24T09:42:51.069627 | 2022-12-14T23:29:58 | 2022-12-14T23:29:58 | 579,177,610 | 1 | 0 | NOASSERTION | 2022-12-16T21:25:31 | 2022-12-16T21:25:31 | null | UTF-8 | Python | false | false | 3,899 | py | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from azure.cli.core.commands.client_factory import get_subscription_id
from azext_iot.common._azure import IOT_SERVICE_CS_TEMPLATE
from azext_iot.common.base_discovery import BaseDiscovery
from azext_iot.common.shared import DiscoveryResourceType
from azext_iot.common.utility import ensure_iotdps_sdk_min_version
from azext_iot.constants import IOTDPS_TRACK_2_SDK_MIN_VERSION
from azext_iot.dps.models.dps_target import DPSTarget
from azext_iot._factory import iot_service_provisioning_factory
from typing import Any, Dict
logger = get_logger(__name__)
PRIVILEDGED_ACCESS_RIGHTS_SET = set(
["ServiceConfig", "EnrollmentWrite"]
)
class DPSDiscovery(BaseDiscovery):
def __init__(self, cmd):
super().__init__(
cmd=cmd,
necessary_rights_set=PRIVILEDGED_ACCESS_RIGHTS_SET,
resource_type=DiscoveryResourceType.DPS.value
)
def _initialize_client(self):
if not self.client:
# Track 2 could be supported
self.track2 = ensure_iotdps_sdk_min_version(IOTDPS_TRACK_2_SDK_MIN_VERSION)
if getattr(self.cmd, "cli_ctx", None):
# The client we want to use is an attribute of the client returned
# from the factory. This will have to be revisted if the DPS sdk changes.
self.client = iot_service_provisioning_factory(self.cmd.cli_ctx).iot_dps_resource
self.sub_id = get_subscription_id(self.cmd.cli_ctx)
else:
self.client = self.cmd
# Method get_keys_for_key_name needed for policy discovery (see
# BaseDiscovery.find_policy for usage) and is defined as
# list)keys_for_key_name in the DPS Sdk.
self.client.get_keys_for_key_name = self.client.list_keys_for_key_name
def _make_kwargs(self, **kwargs) -> Dict[str, Any]:
# The DPS client needs the provisioning_service_name argument
kwargs["provisioning_service_name"] = kwargs.pop("resource_name")
return kwargs
@classmethod
def get_target_by_cstring(cls, connection_string: str) -> DPSTarget:
return DPSTarget.from_connection_string(cstring=connection_string).as_dict()
def _build_target(
self, resource, policy, key_type: str = None, **kwargs
) -> Dict[str, str]:
# This is more or less a compatibility function which produces the
# same result as _azure.get_iot_dps_connection_string()
# In future iteration we will return a 'Target' object rather than dict
# but that will be better served aligning with vNext pattern for DPS
result = {}
result["cs"] = IOT_SERVICE_CS_TEMPLATE.format(
resource.properties.service_operations_host_name,
policy.key_name,
policy.primary_key if key_type == "primary" else policy.secondary_key,
)
result["entity"] = resource.properties.service_operations_host_name
result["policy"] = policy.key_name
result["primarykey"] = policy.primary_key
result["secondarykey"] = policy.secondary_key
result["subscription"] = self.sub_id
result["cmd"] = self.cmd
result["idscope"] = resource.properties.id_scope
return result
def get_id_scope(self, resource_name: str, rg: str = None) -> str:
"""Get the ID scope. Only needed for certain DPS operations."""
return self.find_resource(
resource_name=resource_name, rg=rg
).properties.id_scope
| [
"noreply@github.com"
] | digimaun.noreply@github.com |
faae9cb000964651106d75bbce939dd72351ace8 | 6efd23ba15723f3a3a6cdf09904c43cd541dd833 | /htcp/p2pnetwork/testtcp/sniff.py | 25c2fe0582a792e02fe58086bde8fdedb736dee3 | [] | no_license | myrual/python-stun | d108f80255c1799ff9519e4f797775e01d86a867 | 6d1477a42397c6c57ebfeddc79b223170448f998 | refs/heads/master | 2020-06-03T03:51:41.445978 | 2012-04-07T12:32:09 | 2012-04-07T12:32:09 | 3,957,628 | 15 | 11 | null | null | null | null | UTF-8 | Python | false | false | 4,052 | py | #!/usr/bin/env python2
import sys
import pcap
import string
import time
import socket
import struct
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import p2pNetwork.testTCP.spoof as spoof
protocols={socket.IPPROTO_TCP:'tcp',
socket.IPPROTO_UDP:'udp',
socket.IPPROTO_ICMP:'icmp'}
def decode_ip_packet(s):
d={}
d['version']=(ord(s[0]) & 0xf0) >> 4
d['header_len']=ord(s[0]) & 0x0f
d['tos']=ord(s[1])
d['total_len']=socket.ntohs(struct.unpack('H',s[2:4])[0])
d['id']=socket.ntohs(struct.unpack('H',s[4:6])[0])
d['flags']=(ord(s[6]) & 0xe0) >> 5
d['fragment_offset']=socket.ntohs(struct.unpack('H',s[6:8])[0] & 0x1f)
d['ttl']=ord(s[8])
d['protocol']=ord(s[9])
d['checksum']=socket.ntohs(struct.unpack('H',s[10:12])[0])
#d['source_address']=pcap.ntoa(struct.unpack('i',s[12:16])[0])
#d['destination_address']=pcap.ntoa(struct.unpack('i',s[16:20])[0])
d['source_address']='?'
d['destination_address']='?'
if d['header_len']>5:
d['options']=s[20:4*(d['header_len']-5)]
else:
d['options']=None
d['data']=s[4*d['header_len']:]
decode_tcp_packet(d['data'], d)
return d
def decode_tcp_packet(s, d):
d['synno']=struct.unpack('!L', s[4:8])[0]
d['ackno']=struct.unpack('!L', s[8:12])[0]
def dumphex(s):
bytes = map(lambda x: '%.2x' % x, map(ord, s))
for i in xrange(0,len(bytes)/16):
print ' %s' % string.join(bytes[i*16:(i+1)*16],' ')
print ' %s' % string.join(bytes[(i+1)*16:],' ')
def print_packet(timestamp, data, arg=''):
if not data:
return
if data[12:14]=='\x08\x00':
decoded=decode_ip_packet(data[14:])
print '\n%s.%f %s > %s' % (time.strftime('%H:%M',
time.localtime(timestamp)),
timestamp % 60,
decoded['source_address'],
decoded['destination_address'])
for key in ['version', 'header_len', 'tos', 'total_len', 'id',
'flags', 'fragment_offset', 'ttl']:
print ' %s: %d' % (key, decoded[key])
print ' protocol: %s' % protocols[decoded['protocol']]
print ' header checksum: %d' % decoded['checksum']
print ' data:'
dumphex(decoded['data'])
print ' SYNno:', decoded['synno']
print ' ACKno:', decoded['ackno']
#if __name__=='__main__':
def sniff(argv, udp_obj):
"""Sniff packets using pcap libriry
and call the method to send the SYN nomber to hte peer
or to Connection Broker"""
sys.argv = argv
if len(sys.argv) < 3:
print 'usage: sniff.py <interface> <expr>'
sys.exit(0)
dev = sys.argv[1]
#p = pcap.pcap(dev)
p = pcap.pcapObject()
#dev = pcap.lookupdev()
net, mask = pcap.lookupnet(dev)
# note: to_ms does nothing on linux
p.open_live(dev, 1600, 0, 100)
#p.dump_open('dumpfile')
p.setfilter(string.join(sys.argv[2:],' '), 0, 0)
# try-except block to catch keyboard interrupt. Failure to shut
# down cleanly can result in the interface not being taken out of promisc.
# mode
#p.setnonblock(1)
#udp_obj = UDP_factory()
#reactor.run()
udp_obj.punchHole()
try:
while 1:
#for ts, pkt in p:
#for i in range(1,9):
#print i
#print_packet(ts, pkt)
#p.dispatch(print_packet, -1)
p.dispatch(1, udp_obj.send_SYN_to_ConnectionBroker)
#p.loop(1, udp_obj.send_SYN_to_ConnectionBroker)
#udp_obj.send_SYN_to_ConnectionBroker(ts, pkt)
#break
print 'break'
break
# specify 'None' to dump to dumpfile, assuming you have called
# the dump_open method
# p.dispatch(0, None)
# the loop method is another way of doing things
# p.loop(1, print_packet)
# as is the next() method
# p.next() returns a (pktlen, data, timestamp) tuple
# apply(print_packet,p.next())
except KeyboardInterrupt:
print '%s' % sys.exc_type
print 'shutting down'
print '%d packets received, %d packets dropped, %d packets dropped by interface' % p.stats()
| [
"lilin@jiuan.com"
] | lilin@jiuan.com |
066c2cb64f494164c914df163bcf4478f1eb98b4 | 63818414989aadb81f362311899177038dde9190 | /services/migrations/0006_auto_20200809_2255.py | b61c506bb7788d27ede2b297b03913a420c4ed05 | [] | no_license | rahimifh/Personal_p_pro2 | 0d89ccd529c8aa06c294009dcdcc693c38719771 | 7393b4be7470dbc398b73386e8f770f5a2f5afa0 | refs/heads/master | 2022-12-05T12:09:01.189118 | 2020-08-18T05:24:48 | 2020-08-18T05:24:48 | 288,352,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | # Generated by Django 3.0.8 on 2020-08-09 18:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0005_libfile_libpartition'),
]
operations = [
migrations.RemoveField(
model_name='lesson',
name='like',
),
migrations.RemoveField(
model_name='lesson',
name='pub_date',
),
migrations.RemoveField(
model_name='lesson',
name='text2',
),
migrations.RemoveField(
model_name='lesson',
name='text3',
),
migrations.RemoveField(
model_name='lesson',
name='text4',
),
migrations.CreateModel(
name='lesson_detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=250)),
('textimg', models.ImageField(blank=True, upload_to='')),
('text1', models.TextField()),
('text2', models.TextField(blank=True)),
('text3', models.TextField(blank=True)),
('text4', models.TextField(blank=True)),
('like', models.IntegerField(default=0)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='services.lesson')),
],
),
]
| [
"noreply@github.com"
] | rahimifh.noreply@github.com |
b2e416b830f8a762c57a51d0493a629a1344ef3f | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/7c04cc141bd3fc5f0ce1/snippet.py | 4a77c7cc3e4f198f1906ec927652c704233af5b1 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 2,740 | py | def _download(host, creds, fp):
chunk_size = 512 * 1024
headers = {
'Content-Type': 'application/octet-stream'
}
filename = os.path.basename(fp)
uri = 'https://%s/mgmt/cm/autodeploy/software-image-downloads/%s' % (host, filename)
requests.packages.urllib3.disable_warnings()
with open(fp, 'wb') as f:
start = 0
end = chunk_size - 1
size = 0
current_bytes = 0
while True:
content_range = "%s-%s/%s" % (start, end, size)
headers['Content-Range'] = content_range
#print headers
resp = requests.get(uri,
auth=creds,
headers=headers,
verify=False,
stream=True)
if resp.status_code == 200:
# If the size is zero, then this is the first time through the
# loop and we don't want to write data because we haven't yet
# figured out the total size of the file.
if size > 0:
current_bytes += chunk_size
for chunk in resp.iter_content(chunk_size):
f.write(chunk)
# Once we've downloaded the entire file, we can break out of
# the loop
if end == size:
break
crange = resp.headers['Content-Range']
# Determine the total number of bytes to read
if size == 0:
size = int(crange.split('/')[-1]) - 1
# If the file is smaller than the chunk size, BIG-IP will
# return an HTTP 400. So adjust the chunk_size down to the
# total file size...
if chunk_size > size:
end = size
# ...and pass on the rest of the code
continue
start += chunk_size
if (current_bytes + chunk_size) > size:
end = size
else:
end = start + chunk_size - 1
if __name__ == "__main__":
import os, requests, argparse, getpass
parser = argparse.ArgumentParser(description='Download File from BIG-IP')
parser.add_argument("host", help='BIG-IP IP or Hostname', )
parser.add_argument("username", help='BIG-IP Username')
parser.add_argument("filepath", help='Destination Filename & Path')
args = vars(parser.parse_args())
hostname = args['host']
username = args['username']
filepath = args['filepath']
print "%s, enter your password: " % args['username'],
password = getpass.getpass()
_download(hostname, (username, password), filepath) | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
7fab1106e8d7ce276f3cfbbdf00034e458456586 | 37568c3e0e8ad4f342adb53c02d08283d553bc95 | /pyservos/protocol2.py | 547ee49f382cc5b533b9db6a5a670b55d190910b | [
"MIT"
] | permissive | MultipedRobotics/pyservos | c39bb3da3e57890fa68432b6f500b0a742cb896b | 26691ab8dd541dbbe4660c73b025ebe6b085e2fc | refs/heads/master | 2023-03-25T13:29:47.343650 | 2021-03-25T01:04:36 | 2021-03-25T01:04:36 | 121,900,021 | 6 | 4 | MIT | 2021-03-25T01:04:37 | 2018-02-17T23:02:25 | Python | UTF-8 | Python | false | false | 11,888 | py | from enum import IntFlag
from pyservos.utils import angle2int, le
from pyservos.common import ResetLevels
# ResetLevels = IntFlag('ResetLevels', 'all allButID allButIDDR')
crc_table = [
0x0000, 0x8005, 0x800F, 0x000A, 0x801B, 0x001E, 0x0014, 0x8011,
0x8033, 0x0036, 0x003C, 0x8039, 0x0028, 0x802D, 0x8027, 0x0022,
0x8063, 0x0066, 0x006C, 0x8069, 0x0078, 0x807D, 0x8077, 0x0072,
0x0050, 0x8055, 0x805F, 0x005A, 0x804B, 0x004E, 0x0044, 0x8041,
0x80C3, 0x00C6, 0x00CC, 0x80C9, 0x00D8, 0x80DD, 0x80D7, 0x00D2,
0x00F0, 0x80F5, 0x80FF, 0x00FA, 0x80EB, 0x00EE, 0x00E4, 0x80E1,
0x00A0, 0x80A5, 0x80AF, 0x00AA, 0x80BB, 0x00BE, 0x00B4, 0x80B1,
0x8093, 0x0096, 0x009C, 0x8099, 0x0088, 0x808D, 0x8087, 0x0082,
0x8183, 0x0186, 0x018C, 0x8189, 0x0198, 0x819D, 0x8197, 0x0192,
0x01B0, 0x81B5, 0x81BF, 0x01BA, 0x81AB, 0x01AE, 0x01A4, 0x81A1,
0x01E0, 0x81E5, 0x81EF, 0x01EA, 0x81FB, 0x01FE, 0x01F4, 0x81F1,
0x81D3, 0x01D6, 0x01DC, 0x81D9, 0x01C8, 0x81CD, 0x81C7, 0x01C2,
0x0140, 0x8145, 0x814F, 0x014A, 0x815B, 0x015E, 0x0154, 0x8151,
0x8173, 0x0176, 0x017C, 0x8179, 0x0168, 0x816D, 0x8167, 0x0162,
0x8123, 0x0126, 0x012C, 0x8129, 0x0138, 0x813D, 0x8137, 0x0132,
0x0110, 0x8115, 0x811F, 0x011A, 0x810B, 0x010E, 0x0104, 0x8101,
0x8303, 0x0306, 0x030C, 0x8309, 0x0318, 0x831D, 0x8317, 0x0312,
0x0330, 0x8335, 0x833F, 0x033A, 0x832B, 0x032E, 0x0324, 0x8321,
0x0360, 0x8365, 0x836F, 0x036A, 0x837B, 0x037E, 0x0374, 0x8371,
0x8353, 0x0356, 0x035C, 0x8359, 0x0348, 0x834D, 0x8347, 0x0342,
0x03C0, 0x83C5, 0x83CF, 0x03CA, 0x83DB, 0x03DE, 0x03D4, 0x83D1,
0x83F3, 0x03F6, 0x03FC, 0x83F9, 0x03E8, 0x83ED, 0x83E7, 0x03E2,
0x83A3, 0x03A6, 0x03AC, 0x83A9, 0x03B8, 0x83BD, 0x83B7, 0x03B2,
0x0390, 0x8395, 0x839F, 0x039A, 0x838B, 0x038E, 0x0384, 0x8381,
0x0280, 0x8285, 0x828F, 0x028A, 0x829B, 0x029E, 0x0294, 0x8291,
0x82B3, 0x02B6, 0x02BC, 0x82B9, 0x02A8, 0x82AD, 0x82A7, 0x02A2,
0x82E3, 0x02E6, 0x02EC, 0x82E9, 0x02F8, 0x82FD, 0x82F7, 0x02F2,
0x02D0, 0x82D5, 0x82DF, 0x02DA, 0x82CB, 0x02CE, 0x02C4, 0x82C1,
0x8243, 0x0246, 0x024C, 0x8249, 0x0258, 0x825D, 0x8257, 0x0252,
0x0270, 0x8275, 0x827F, 0x027A, 0x826B, 0x026E, 0x0264, 0x8261,
0x0220, 0x8225, 0x822F, 0x022A, 0x823B, 0x023E, 0x0234, 0x8231,
0x8213, 0x0216, 0x021C, 0x8219, 0x0208, 0x820D, 0x8207, 0x0202
]
class Protocol2:
"""
This is a wrapper class for the xl-320 and ax-12 servos. It can only talk
to one servo type at a time.
"""
# --------- INSTRUCTIONS -----
PING = 0x01
READ = 0x02
WRITE = 0x03
REG_WRITE = 0x04
ACTION = 0x05
RESET = 0x06
REBOOT = 0x08
CLEAR = 0X10
STATUS = 0x55
SYNC_READ = 0x82
SYNC_WRITE = 0x83
BULK_READ = 0x92
BULK_WRITE = 0x93
# def __init__(self, kind):
# self.base = kind()
def makePingPacket(self, ID=None):
"""
Pings a servo
"""
if not ID:
ID = self.BROADCAST_ADDR
pkt = self.makePacket(ID, self.PING)
return pkt
def makeWritePacket(self, ID, reg, values=None):
"""
Creates a packet that writes a value(s) to servo ID at location reg. Make
sure the values are in little endian (use Packet.le() if necessary) for 16 b
(word size) values.
"""
if values:
params = le(reg) + values
else:
params = le(reg)
pkt = self.makePacket(ID, self.WRITE, params)
return pkt
def makeReadPacket(self, ID, reg, values=None):
"""
Creates a packet that reads the register(s) of servo ID at location reg. Make
sure the values are in little endian (use Packet.le() if necessary) for 16 b
(word size) values.
"""
pkt = self.makePacket(ID, self.READ, [reg, values])
return pkt
# def makeResetPacket(self, ID, level=0):
# """
# Resets a servo.
# """
# params = [XL320.RESET_ALL_BUT_ID]
# pkt = self.makePacket(ID, self.RESET, params)
# return pkt
def makeResetPacket(self, ID, level):
"""
Resets a servo.
"""
if ResetLevels.all == level:
params = [self.RESET_ALL]
elif ResetLevels.allButID == level:
params = [self.RESET_ALL_BUT_ID]
elif ResetLevels.allButIDDR == level:
params = [self.RESET_ALL_BUT_ID_BAUD_RATE]
else:
raise Exception("Invalid reset level")
pkt = self.makePacket(ID, self.RESET, params)
return pkt
def makeRebootPacket(self, ID):
"""
Reboots a servo
"""
pkt = self.makePacket(ID, self.REBOOT)
return pkt
def makeServoMovePacket(self, ID, angle, degrees=True):
"""
Commands the servo to an angle (in degrees)
"""
# if degrees and not (0.0 <= angle <= 300.0):
# raise Exception('makeServoMovePacket(), angle [deg] out of bounds: {}'.format(angle))
# elif (not degrees) and (not (0.0 <= angle <= 5.23598776)):
# raise Exception('makeServoMovePacket(), angle [rads] out of bounds: {}'.format(angle))
# val = int(angle/300*1023)
val = angle2int(angle, degrees)
pkt = self.makeWritePacket(ID, self.GOAL_POSITION, val)
return pkt
def makeSyncMovePacket(self, info, degrees=True):
"""
Write sync angle information to servos.
info = [[ID, angle], [ID, angle], ...]
ID: servo ID
angle: 0-300 degrees or in radians
"""
data = []
# since all servo angles have the same register addr (GOAL_POSITION)
# and data size (2), a sinc packet is smart choice
# compare bulk vs sync for the same commands:
# bulk = 94 bytes
# sync = 50 bytes
for cmd in info:
data.append(cmd[0]) # ID
angle = angle2int(cmd[1], degrees)
data.append(angle[0]) # LSB
data.append(angle[1]) # MSB
pkt = self.makeSyncWritePacket(self.GOAL_POSITION, data)
return pkt
def makeSyncWritePacket(self, reg, info):
"""
Write sync angle information to servos.
Status Packet will not be returned because Broadcast ID(0xFE) is used
info = [[ID, data1, ...], [ID, data1, ...], ...]
"""
data = []
data.append(reg) # addr
data.append(len(info[0])-1) # data length not counting ID
for cmd in info:
data += cmd
ID = self.BROADCAST_ADDR
instr = self.SYNC_WRITE
pkt = self.makePacket(ID, instr, data) # create packet
return pkt
def makeBulkReadPacket(self, data):
"""
data = [[data len, ID, addr], [data len, ID, addr], ...]
"""
ID = self.BROADCAST_ADDR
instr = self.BULK_READ
pkt = self.makePacket(ID, instr, data) # create packet
return pkt
def makeLEDPacket(self, ID, value):
"""
Turn on/off the servo LED and also sets the color.
"""
# value = [value]
# elif self.SERVO_ID == XL320.SERVO_ID:
# # print('Not implemented yet')
# value = [0, value]
pkt = self.makeWritePacket(ID, self.LED, [value])
return pkt
def makeSpeedPacket(self, speed):
"""
Set max speed for all servos
speed - [0-1023] in units of 0.111 rpm. If speed = 0, then max motor
speed is used. You cannot exceed max servo speed.
"""
speed = speed if (speed <= self.MAX_RPM) else self.MAX_RPM
pkt = self.makeWritePacket(
self.BROADCAST_ADDR,
self.GOAL_VELOCITY,
le(speed)
)
return pkt
def decodePacket(self, pkts):
return self.find_packets(pkts)
def processStatusPacket(self, pkt):
return self.status_packet(pkt)
def check_sum(self, data_blk):
"""
Calculate crc
in: data_blk - entire packet except last 2 crc bytes
out: crc_accum - 16 word
"""
data_blk_size = len(data_blk)
crc_accum = 0
for j in range(data_blk_size):
i = ((crc_accum >> 8) ^ data_blk[j]) & 0xFF
crc_accum = ((crc_accum << 8) ^ crc_table[i])
crc_accum &= 0xffff # keep to 16 bits
return crc_accum
def makePacket(self, ID, instr, params=None):
"""
This makes a generic packet.
TODO: look a struct ... does that add value using it?
0xFF, 0xFF, 0xFD, 0x00, ID, LEN_L, LEN_H, INST, PARAM 1, PARAM 2, ..., PARAM N, CRC_L, CRC_H]
in:
ID - servo id
instr - instruction
params - [register, instruction parameter values]
out: packet
"""
pkt = []
# [header, reserved, 0x00, ID, len low, len high, instruction]
pkt += [0xFF, 0xFF, 0xFD, 0x00, ID, 0x00, 0x00, instr] # header
# pkt += [0x00] # reserved byte
# pkt += [ID]
# pkt += [0x00, 0x00] # length placeholder
# pkt += [instr] # instruction
# if reg:
# pkt += le(reg) # not everything has a register
if params:
pkt += params # not everything has parameters
length = le(len(pkt) - 5) # length = len(packet) - (header(3), reserve(1), id(1))
pkt[5] = length[0] # L
pkt[6] = length[1] # H
crc = self.check_sum(pkt)
pkt += le(crc)
return pkt
def find_packets(self, pkt):
"""
Search through a string of binary for a valid xl320 package.
in: buffer to search through
out: a list of valid data packet
"""
# print('findpkt', pkt)
# print('-----------------------')
ret = []
while len(pkt)-10 >= 0:
if pkt[0:4] != [0xFF, 0xFF, 0xFD, 0x00]:
pkt.pop(0) # get rid of the first index
# print(' - pop:', pkt)
continue
# print(' > good packet')
length = (pkt[6] << 8) + pkt[5]
# print(' > length', length)
crc_pos = 5 + length
pkt_crc = pkt[crc_pos:crc_pos + 2]
crc = le(self.check_sum(pkt[:crc_pos]))
# if len(pkt) < (crc_pos + 1):
# print('<<< need more data for findPkt >>>')
# print(' > calc crc', crc)
# print(' > pkt crc', pkt_crc)
if pkt_crc == crc:
pkt_end = crc_pos+2
ret.append(pkt[:pkt_end])
# print(' > found:', pkt[:pkt_end])
# print(' > pkt size', pkt_end)
del pkt[:pkt_end]
# print(' > remaining:', pkt)
else:
pkt_end = crc_pos+2
# print(' - crap:', pkt[:pkt_end])
del pkt[:pkt_end]
# print('findpkt ret:', ret)
return ret
def status_packet(self, pkt):
def getError(err):
errors = [
'Input Voltage', # 0
'Angle Limit',
'Overheating',
'Range',
'Checksum',
'Overload',
'Instrunction',
'None' # 7
]
ret = None
if err != 128:
err_str = []
for i in range(0, 8):
if (err >> i) & 1:
err_str.append(errors[i])
ret = ','.join(err_str)
else:
ret = errors[7]
return ret
ret = {
'id': pkt[2],
'error str': getError(pkt[4]),
'error num': pkt[4],
'params': pkt[5:-1],
'raw': list(pkt)
}
return ret
| [
"walchko@users.noreply.github.com"
] | walchko@users.noreply.github.com |
d0650ff6942b772d3ceb8e2f766ec26f000c88c9 | 0d15e76677f3bf97d21978e73292f0d5c535ebac | /spacy/en/language_data.py | 3c9db8fe2386b2f759130f0b5b16bdb093197839 | [
"MIT"
] | permissive | ExplodingCabbage/spaCy | 62c1adf7a588827f32c16bc990d70f1cfe2b56c2 | 001abe2b9d6690fcd7e7b96242bb1b9cee0f5784 | refs/heads/master | 2020-07-02T22:14:02.890909 | 2016-11-20T02:45:51 | 2016-11-20T02:45:51 | 74,277,675 | 1 | 0 | null | 2016-11-20T13:58:26 | 2016-11-20T13:58:24 | null | UTF-8 | Python | false | false | 63,126 | py | # encoding: utf8
from __future__ import unicode_literals
# improved list from Stone, Denis, Kwantes (2010)
STOP_WORDS = set("""
a about above across after afterwards again against all almost alone
along already also although always am among amongst amoungst amount
an and another any anyhow anyone anything anyway anywhere are around
as at back be became because become becomes becoming been before
beforehand behind being below beside besides between beyond bill
both bottom but by call can cannot cant co computer con could couldnt
cry de describe detail did didn do does doesn doing don done down due
during each eg eight either eleven else elsewhere empty enough etc
even ever every everyone everything everywhere except few fifteen
fify fill find fire first five for former formerly forty found four
from front full further get give go had has hasnt have he hence her
here hereafter hereby herein hereupon hers herself him himself his
how however hundred i ie if in inc indeed interest into is it its
itself keep last latter latterly least less ltd just kg km made make
many may me meanwhile might mill mine more moreover most mostly move
much must my myself name namely neither never nevertheless next nine
no nobody none noone nor not nothing now nowhere of off often on once
one only onto or other others otherwise our ours ourselves out over
own part per perhaps please put rather re quite rather really regarding
same say see seem seemed seeming seems serious several she should
show side since sincere six sixty so some somehow someone something
sometime sometimes somewhere still such system take ten than that the
their them themselves then thence there thereafter thereby therefore
therein thereupon these they thick thin third this those though three
through throughout thru thus to together too top toward towards twelve
twenty two un under until up unless upon us used using various very
very via was we well were what whatever when whence whenever where whereafter
whereas whereby wherein whereupon wherever whether which while whither
who whoever whole whom whose why will with within without would yet you
your yours yourself yourselves
""".split())
TAG_MAP = {
".": {"pos": "punct", "puncttype": "peri"},
",": {"pos": "punct", "puncttype": "comm"},
"-LRB-": {"pos": "punct", "puncttype": "brck", "punctside": "ini"},
"-RRB-": {"pos": "punct", "puncttype": "brck", "punctside": "fin"},
"``": {"pos": "punct", "puncttype": "quot", "punctside": "ini"},
"\"\"": {"pos": "punct", "puncttype": "quot", "punctside": "fin"},
"''": {"pos": "punct", "puncttype": "quot", "punctside": "fin"},
":": {"pos": "punct"},
"$": {"pos": "sym", "other": {"symtype": "currency"}},
"#": {"pos": "sym", "other": {"symtype": "numbersign"}},
"AFX": {"pos": "adj", "hyph": "hyph"},
"CC": {"pos": "conj", "conjtype": "coor"},
"CD": {"pos": "num", "numtype": "card"},
"DT": {"pos": "det"},
"EX": {"pos": "adv", "advtype": "ex"},
"FW": {"pos": "x", "foreign": "foreign"},
"HYPH": {"pos": "punct", "puncttype": "dash"},
"IN": {"pos": "adp"},
"JJ": {"pos": "adj", "degree": "pos"},
"JJR": {"pos": "adj", "degree": "comp"},
"JJS": {"pos": "adj", "degree": "sup"},
"LS": {"pos": "punct", "numtype": "ord"},
"MD": {"pos": "verb", "verbtype": "mod"},
"NIL": {"pos": ""},
"NN": {"pos": "noun", "number": "sing"},
"NNP": {"pos": "propn", "nountype": "prop", "number": "sing"},
"NNPS": {"pos": "propn", "nountype": "prop", "number": "plur"},
"NNS": {"pos": "noun", "number": "plur"},
"PDT": {"pos": "adj", "adjtype": "pdt", "prontype": "prn"},
"POS": {"pos": "part", "poss": "poss"},
"PRP": {"pos": "pron", "prontype": "prs"},
"PRP$": {"pos": "adj", "prontype": "prs", "poss": "poss"},
"RB": {"pos": "adv", "degree": "pos"},
"RBR": {"pos": "adv", "degree": "comp"},
"RBS": {"pos": "adv", "degree": "sup"},
"RP": {"pos": "part"},
"SYM": {"pos": "sym"},
"TO": {"pos": "part", "parttype": "inf", "verbform": "inf"},
"UH": {"pos": "intJ"},
"VB": {"pos": "verb", "verbform": "inf"},
"VBD": {"pos": "verb", "verbform": "fin", "tense": "past"},
"VBG": {"pos": "verb", "verbform": "part", "tense": "pres", "aspect": "prog"},
"VBN": {"pos": "verb", "verbform": "part", "tense": "past", "aspect": "perf"},
"VBP": {"pos": "verb", "verbform": "fin", "tense": "pres"},
"VBZ": {"pos": "verb", "verbform": "fin", "tense": "pres", "number": "sing", "person": 3},
"WDT": {"pos": "adj", "prontype": "int|rel"},
"WP": {"pos": "noun", "prontype": "int|rel"},
"WP$": {"pos": "adj", "poss": "poss", "prontype": "int|rel"},
"WRB": {"pos": "adv", "prontype": "int|rel"},
"SP": {"pos": "space"},
"ADD": {"pos": "x"},
"NFP": {"pos": "punct"},
"GW": {"pos": "x"},
"AFX": {"pos": "x"},
"HYPH": {"pos": "punct"},
"XX": {"pos": "x"},
"BES": {"pos": "verb"},
"HVS": {"pos": "verb"}
}
TOKENIZER_PREFIXES = r''', " ( [ { * < $ £ “ ' `` ` # US$ C$ A$ a- ‘ .... ...'''.split()
TOKENIZER_SUFFIXES = (r''', \" \) \] \} \* \! \? % \$ > : ; ' ” '' 's 'S ’s ’S ’'''
r'''\.\. \.\.\. \.\.\.\. (?<=[a-z0-9)\]”"'%\)])\. '''
r'''(?<=[0-9])km''').strip().split()
TOKENIZER_INFIXES = (r'''\.\.\.+ (?<=[a-z])\.(?=[A-Z]) (?<=[a-zA-Z])-(?=[a-zA-z]) '''
r'''(?<=[a-zA-Z])--(?=[a-zA-z]) (?<=[0-9])-(?=[0-9]) '''
r'''(?<=[A-Za-z]),(?=[A-Za-z])''').split()
TOKENIZER_EXCEPTIONS = {
"and/or": [
{
"F": "and/or",
"L": "and/or",
"pos": "CC"
}],
"Ph.D.": [
{
"F": "Ph.D."
}],
"d.": [
{
"F": "d."
}
],
"Theydve": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
":/": [
{
"F": ":/"
}
],
"shouldn't've": [
{
"F": "should"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"There'll": [
{
"F": "There"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"E.G.": [
{
"F": "E.G."
}
],
"howll": [
{
"F": "how"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"6a.m.": [
{
"F": "6"
},
{
"F": "a.m."
}
],
"Ore.": [
{
"F": "Ore."
}
],
"Hadn't've": [
{
"F": "Had",
"L": "have",
"pos": "VBD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
":>": [
{
"F": ":>"
}
],
"3p.m.": [
{
"F": "3"
},
{
"F": "p.m."
}
],
"who'll": [
{
"F": "who"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"5a.m.": [
{
"F": "5"
},
{
"F": "a.m."
}
],
":(": [
{
"F": ":("
}
],
":0": [
{
"F": ":0"
}
],
"10a.m.": [
{
"F": "10"
},
{
"F": "a.m."
}
],
"aint": [
{
"F": "ai",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
" ": [
{
"pos": "SP",
"F": " "
}
],
"Dec.": [
{
"F": "Dec."
}
],
"Shouldnt": [
{
"F": "Should"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Ky.": [
{
"F": "Ky."
}
],
"when's": [
{
"F": "when"
},
{
"F": "'s"
}
],
"Didnt": [
{
"F": "Did",
"L": "do",
"pos": "VBD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"itll": [
{
"L": "-PRON-",
"F": "it"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"Who're": [
{
"F": "Who"
},
{
"F": "'re"
}
],
"=D": [
{
"F": "=D"
}
],
"Ain't": [
{
"F": "Ai",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Can't": [
{
"F": "Ca",
"L": "can",
"pos": "MD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Whyre": [
{
"F": "Why"
},
{
"F": "re"
}
],
"Aren't": [
{
"F": "Are",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Neednt": [
{
"F": "Need"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"should've": [
{
"F": "should"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"shouldn't": [
{
"F": "should"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Idve": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"weve": [
{
"F": "we"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Va.": [
{
"F": "Va."
}
],
"D.C.": [
{
"F": "D.C."
}
],
"3am": [
{
"F": "3"
},
{
"L": "a.m.",
"F": "am"
}
],
"Ive": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Md.": [
{
"F": "Md."
}
],
";D": [
{
"F": ";D"
}
],
"Mrs.": [
{
"F": "Mrs."
}
],
"Minn.": [
{
"F": "Minn."
}
],
"they'd": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Youdve": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"theyve": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Weren't": [
{
"F": "Were"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"werent": [
{
"F": "were"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"whyre": [
{
"F": "why"
},
{
"F": "re"
}
],
"g.": [
{
"F": "g."
}
],
"I'm": [
{
"L": "-PRON-",
"F": "I"
},
{
"pos": "VBP",
"F": "'m",
"tenspect": 1,
"number": 1,
"L": "be"
}
],
":p": [
{
"F": ":p"
}
],
"She'd've": [
{
"L": "-PRON-",
"F": "She"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"not've": [
{
"F": "not",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"we'll": [
{
"F": "we"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
":O": [
{
"F": ":O"
}
],
"<33": [
{
"F": "<33"
}
],
"Don't": [
{
"L": "do",
"F": "Do"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Whyll": [
{
"F": "Why"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"''": [
{
"F": "''"
}
],
"they've": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"t.": [
{
"F": "t."
}
],
"wasn't": [
{
"F": "was"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"could've": [
{
"pos": "MD",
"F": "could"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"what've": [
{
"F": "what"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"havent": [
{
"pos": "VB",
"F": "have"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Who've": [
{
"F": "Who"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"11am": [
{
"F": "11"
},
{
"L": "a.m.",
"F": "am"
}
],
"Shan't": [
{
"F": "Sha"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"i'll": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"i.e.": [
{
"F": "i.e."
}
],
"you'd": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"w.": [
{
"F": "w."
}
],
"whens": [
{
"F": "when"
},
{
"F": "s"
}
],
"whys": [
{
"F": "why"
},
{
"F": "s"
}
],
"6pm": [
{
"F": "6"
},
{
"L": "p.m.",
"F": "pm"
}
],
"4p.m.": [
{
"F": "4"
},
{
"F": "p.m."
}
],
"Whereve": [
{
"F": "Where"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"o_o": [
{
"F": "o_o"
}
],
"Mo.": [
{
"F": "Mo."
}
],
"Kan.": [
{
"F": "Kan."
}
],
"\u00a0": [
{
"pos": "SP",
"L": " ",
"F": "\u00a0"
}
],
"there'd": [
{
"F": "there"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"N.H.": [
{
"F": "N.H."
}
],
"(^_^)": [
{
"F": "(^_^)"
}
],
"Mont.": [
{
"F": "Mont."
}
],
"hadn't've": [
{
"F": "had",
"L": "have",
"pos": "VBD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"whatll": [
{
"F": "what"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"wouldn't've": [
{
"F": "would"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"there's": [
{
"F": "there"
},
{
"F": "'s"
}
],
"2pm": [
{
"F": "2"
},
{
"L": "p.m.",
"F": "pm"
}
],
"Who'll": [
{
"F": "Who"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"o_O": [
{
"F": "o_O"
}
],
"Nev.": [
{
"F": "Nev."
}
],
"youll": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"wouldve": [
{
"F": "would"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Nov.": [
{
"F": "Nov."
}
],
"z.": [
{
"F": "z."
}
],
"xDD": [
{
"F": "xDD"
}
],
"Sen.": [
{
"F": "Sen."
}
],
"Wouldnt": [
{
"F": "Would"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Thered": [
{
"F": "There"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"Youre": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "re"
}
],
"Couldn't've": [
{
"pos": "MD",
"F": "Could"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"who're": [
{
"F": "who"
},
{
"F": "'re"
}
],
"Whys": [
{
"F": "Why"
},
{
"F": "s"
}
],
"mightn't've": [
{
"F": "might"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Wholl": [
{
"F": "Who"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"hadn't": [
{
"F": "had",
"L": "have",
"pos": "VBD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Havent": [
{
"pos": "VB",
"F": "Have"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Whatve": [
{
"F": "What"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
":)": [
{
"F": ":)"
}
],
"o.O": [
{
"F": "o.O"
}
],
"Thats": [
{
"F": "That"
},
{
"F": "s"
}
],
":((": [
{
"F": ":(("
}
],
"Gov.": [
{
"F": "Gov."
}
],
"Howll": [
{
"F": "How"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"p.": [
{
"F": "p."
}
],
"wouldn't": [
{
"F": "would"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"9pm": [
{
"F": "9"
},
{
"L": "p.m.",
"F": "pm"
}
],
"You'll": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"Ala.": [
{
"F": "Ala."
}
],
"12am": [
{
"F": "12"
},
{
"L": "a.m.",
"F": "am"
}
],
"=]": [
{
"F": "=]"
}
],
"Cant": [
{
"F": "Ca",
"L": "can",
"pos": "MD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"i'd": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"a.m.": [
{
"F": "a.m."
}
],
"weren't": [
{
"F": "were"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"would've": [
{
"F": "would"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"i'm": [
{
"L": "-PRON-",
"F": "i"
},
{
"pos": "VBP",
"F": "'m",
"tenspect": 1,
"number": 1,
"L": "be"
}
],
"why'll": [
{
"F": "why"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"we'd've": [
{
"F": "we"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Shouldve": [
{
"F": "Should"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"can't": [
{
"F": "ca",
"L": "can",
"pos": "MD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"thats": [
{
"F": "that"
},
{
"F": "s"
}
],
"1p.m.": [
{
"F": "1"
},
{
"F": "p.m."
}
],
"12a.m.": [
{
"F": "12"
},
{
"F": "a.m."
}
],
"Hes": [
{
"L": "-PRON-",
"F": "He"
},
{
"F": "s"
}
],
"Needn't": [
{
"F": "Need"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"It's": [
{
"L": "-PRON-",
"F": "It"
},
{
"F": "'s"
}
],
"St.": [
{
"F": "St."
}
],
"Why're": [
{
"F": "Why"
},
{
"F": "'re"
}
],
":(((": [
{
"F": ":((("
}
],
"Hed": [
{
"L": "-PRON-",
"F": "He"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"Mt.": [
{
"L": "Mount",
"F": "Mt."
}
],
"couldn't": [
{
"pos": "MD",
"F": "could"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"What've": [
{
"F": "What"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"4a.m.": [
{
"F": "4"
},
{
"F": "a.m."
}
],
"Ind.": [
{
"F": "Ind."
}
],
"It'd": [
{
"L": "-PRON-",
"F": "It"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"<3": [
{
"F": "<3"
}
],
"theydve": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"aren't": [
{
"F": "are",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Mightn't": [
{
"F": "Might"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"'S": [
{
"L": "'s",
"F": "'S"
}
],
"I've": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Whered": [
{
"F": "Where"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"Itdve": [
{
"L": "-PRON-",
"F": "It"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"I'ma": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "'ma"
}
],
"whos": [
{
"F": "who"
},
{
"F": "s"
}
],
"They'd": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"What'll": [
{
"F": "What"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
":Y": [
{
"F": ":Y"
}
],
"You've": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Mustve": [
{
"F": "Must"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"whod": [
{
"F": "who"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"mightntve": [
{
"F": "might"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"I'd've": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Must've": [
{
"F": "Must"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"it'd": [
{
"L": "-PRON-",
"F": "it"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Ark.": [
{
"F": "Ark."
}
],
"Wis.": [
{
"F": "Wis."
}
],
"6p.m.": [
{
"F": "6"
},
{
"F": "p.m."
}
],
"what're": [
{
"F": "what"
},
{
"F": "'re"
}
],
"N.C.": [
{
"F": "N.C."
}
],
"Wasn't": [
{
"F": "Was"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"what's": [
{
"F": "what"
},
{
"F": "'s"
}
],
"he'd've": [
{
"L": "-PRON-",
"F": "he"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Jan.": [
{
"F": "Jan."
}
],
"She'd": [
{
"L": "-PRON-",
"F": "She"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"shedve": [
{
"L": "-PRON-",
"F": "she"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Tenn.": [
{
"F": "Tenn."
}
],
"ain't": [
{
"F": "ai",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Wash.": [
{
"F": "Wash."
}
],
"She's": [
{
"L": "-PRON-",
"F": "She"
},
{
"F": "'s"
}
],
"i'd've": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"2a.m.": [
{
"F": "2"
},
{
"F": "a.m."
}
],
"We'd've": [
{
"F": "We"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"must've": [
{
"F": "must"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"That's": [
{
"F": "That"
},
{
"F": "'s"
}
],
"Sept.": [
{
"F": "Sept."
}
],
"whatre": [
{
"F": "what"
},
{
"F": "re"
}
],
"you'd've": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Dont": [
{
"L": "do",
"F": "Do"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"i.": [
{
"F": "i."
}
],
"Jun.": [
{
"F": "Jun."
}
],
"thered": [
{
"F": "there"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"Youd": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"couldn't've": [
{
"pos": "MD",
"F": "could"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Whens": [
{
"F": "When"
},
{
"F": "s"
}
],
"8a.m.": [
{
"F": "8"
},
{
"F": "a.m."
}
],
"Isnt": [
{
"F": "Is",
"L": "be",
"pos": "VBZ"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"mightve": [
{
"F": "might"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"'ol": [
{
"F": "'ol"
}
],
"2p.m.": [
{
"F": "2"
},
{
"F": "p.m."
}
],
"9a.m.": [
{
"F": "9"
},
{
"F": "a.m."
}
],
"q.": [
{
"F": "q."
}
],
"didnt": [
{
"F": "did",
"L": "do",
"pos": "VBD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"ive": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"It'd've": [
{
"L": "-PRON-",
"F": "It"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"e.g.": [
{
"F": "e.g."
}
],
"\t": [
{
"pos": "SP",
"F": "\t"
}
],
"Mich.": [
{
"F": "Mich."
}
],
"Itll": [
{
"L": "-PRON-",
"F": "It"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"didn't": [
{
"F": "did",
"L": "do",
"pos": "VBD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"3pm": [
{
"F": "3"
},
{
"L": "p.m.",
"F": "pm"
}
],
"Jul.": [
{
"F": "Jul."
}
],
"7pm": [
{
"F": "7"
},
{
"L": "p.m.",
"F": "pm"
}
],
"cant": [
{
"F": "ca",
"L": "can",
"pos": "MD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Miss.": [
{
"F": "Miss."
}
],
"im": [
{
"L": "-PRON-",
"F": "i"
},
{
"pos": "VBP",
"F": "m",
"tenspect": 1,
"number": 1,
"L": "be"
}
],
"Ariz.": [
{
"F": "Ariz."
}
],
"they'd've": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"f.": [
{
"F": "f."
}
],
"Co.": [
{
"F": "Co."
}
],
"Hadntve": [
{
"F": "Had",
"L": "have",
"pos": "VBD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Weve": [
{
"F": "We"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"1a.m.": [
{
"F": "1"
},
{
"F": "a.m."
}
],
"=3": [
{
"F": "=3"
}
],
"Mightnt": [
{
"F": "Might"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"1pm": [
{
"F": "1"
},
{
"L": "p.m.",
"F": "pm"
}
],
"youdve": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Shedve": [
{
"L": "-PRON-",
"F": "She"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"theyd": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"Ill.": [
{
"F": "Ill."
}
],
"N.D.": [
{
"F": "N.D."
}
],
"Cannot": [
{
"F": "Can",
"L": "can",
"pos": "MD"
},
{
"F": "not",
"L": "not",
"pos": "RB"
}
],
"s.": [
{
"F": "s."
}
],
"Hadn't": [
{
"F": "Had",
"L": "have",
"pos": "VBD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"What're": [
{
"F": "What"
},
{
"F": "'re"
}
],
"He'll": [
{
"L": "-PRON-",
"F": "He"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"wholl": [
{
"F": "who"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"They're": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "'re"
}
],
"Neb.": [
{
"F": "Neb."
}
],
"shouldnt": [
{
"F": "should"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"\n": [
{
"pos": "SP",
"F": "\n"
}
],
"whered": [
{
"F": "where"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"7a.m.": [
{
"F": "7"
},
{
"F": "a.m."
}
],
"youve": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"4am": [
{
"F": "4"
},
{
"L": "a.m.",
"F": "am"
}
],
"v.": [
{
"F": "v."
}
],
"notve": [
{
"F": "not",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"couldve": [
{
"pos": "MD",
"F": "could"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"mustve": [
{
"F": "must"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Youve": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"therell": [
{
"F": "there"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"might've": [
{
"F": "might"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Mustn't": [
{
"F": "Must"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"wheres": [
{
"F": "where"
},
{
"F": "s"
}
],
"they're": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "'re"
}
],
"idve": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"hows": [
{
"F": "how"
},
{
"F": "s"
}
],
"Fla.": [
{
"F": "Fla."
}
],
"N.M.": [
{
"F": "N.M."
}
],
"youre": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "re"
}
],
"Didn't": [
{
"F": "Did",
"L": "do",
"pos": "VBD"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Couldve": [
{
"pos": "MD",
"F": "Could"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"10p.m.": [
{
"F": "10"
},
{
"F": "p.m."
}
],
"Del.": [
{
"F": "Del."
}
],
"Oct.": [
{
"F": "Oct."
}
],
"Rep.": [
{
"F": "Rep."
}
],
"cannot": [
{
"F": "can",
"L": "can",
"pos": "MD"
},
{
"F": "not",
"L": "not",
"pos": "RB"
}
],
"Im": [
{
"L": "-PRON-",
"F": "I"
},
{
"pos": "VBP",
"F": "m",
"tenspect": 1,
"number": 1,
"L": "be"
}
],
"howd": [
{
"F": "how"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"Okla.": [
{
"F": "Okla."
}
],
"Feb.": [
{
"F": "Feb."
}
],
"you've": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"You're": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "'re"
}
],
"she'll": [
{
"L": "-PRON-",
"F": "she"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"Theyll": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"don't": [
{
"L": "do",
"F": "do"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"itd": [
{
"L": "-PRON-",
"F": "it"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
":-)": [
{
"F": ":-)"
}
],
"Hedve": [
{
"L": "-PRON-",
"F": "He"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"isnt": [
{
"F": "is",
"L": "be",
"pos": "VBZ"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"won't": [
{
"F": "wo"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"We're": [
{
"F": "We"
},
{
"F": "'re"
}
],
"3a.m.": [
{
"F": "3"
},
{
"F": "a.m."
}
],
"^_^": [
{
"F": "^_^"
}
],
"\u2018S": [
{
"L": "'s",
"F": "\u2018S"
}
],
"9p.m.": [
{
"F": "9"
},
{
"F": "p.m."
}
],
"dont": [
{
"L": "do",
"F": "do"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"ima": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "ma"
}
],
"Let's": [
{
"F": "Let"
},
{
"L": "us",
"F": "'s"
}
],
"he's": [
{
"L": "-PRON-",
"F": "he"
},
{
"F": "'s"
}
],
"we've": [
{
"F": "we"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"What's": [
{
"F": "What"
},
{
"F": "'s"
}
],
"Who's": [
{
"F": "Who"
},
{
"F": "'s"
}
],
"-__-": [
{
"F": "-__-"
}
],
"hedve": [
{
"L": "-PRON-",
"F": "he"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"he'd": [
{
"L": "-PRON-",
"F": "he"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"When's": [
{
"F": "When"
},
{
"F": "'s"
}
],
"Mightn't've": [
{
"F": "Might"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"We've": [
{
"F": "We"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"\u2018s": [
{
"L": "'s",
"F": "\u2018s"
}
],
"Couldntve": [
{
"pos": "MD",
"F": "Could"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Who'd": [
{
"F": "Who"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
":-/": [
{
"F": ":-/"
}
],
"haven't": [
{
"pos": "VB",
"F": "have"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Gen.": [
{
"F": "Gen."
}
],
"(:": [
{
"F": "(:"
}
],
"arent": [
{
"F": "are",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"You'd've": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"c.": [
{
"F": "c."
}
],
"(=": [
{
"F": "(="
}
],
"Wouldn't": [
{
"F": "Would"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"who's": [
{
"F": "who"
},
{
"F": "'s"
}
],
"12p.m.": [
{
"F": "12"
},
{
"F": "p.m."
}
],
"5am": [
{
"F": "5"
},
{
"L": "a.m.",
"F": "am"
}
],
"Mightve": [
{
"F": "Might"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Theredve": [
{
"F": "There"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"theredve": [
{
"F": "there"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Messrs.": [
{
"F": "Messrs."
}
],
"who'd": [
{
"F": "who"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Where's": [
{
"F": "Where"
},
{
"F": "'s"
}
],
"wont": [
{
"F": "wo"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"she'd've": [
{
"L": "-PRON-",
"F": "she"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"10pm": [
{
"F": "10"
},
{
"L": "p.m.",
"F": "pm"
}
],
"Corp.": [
{
"F": "Corp."
}
],
"Aug.": [
{
"F": "Aug."
}
],
"-_-": [
{
"F": "-_-"
}
],
"y.": [
{
"F": "y."
}
],
"Should've": [
{
"F": "Should"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"11pm": [
{
"F": "11"
},
{
"L": "p.m.",
"F": "pm"
}
],
"8am": [
{
"F": "8"
},
{
"L": "a.m.",
"F": "am"
}
],
"theyre": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "re"
}
],
"l.": [
{
"F": "l."
}
],
"Wouldntve": [
{
"F": "Would"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Ga.": [
{
"F": "Ga."
}
],
"1am": [
{
"F": "1"
},
{
"L": "a.m.",
"F": "am"
}
],
"Where've": [
{
"F": "Where"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"11a.m.": [
{
"F": "11"
},
{
"F": "a.m."
}
],
"mustn't": [
{
"F": "must"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"isn't": [
{
"F": "is",
"L": "be",
"pos": "VBZ"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Bros.": [
{
"F": "Bros."
}
],
"Aint": [
{
"F": "Ai",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"why's": [
{
"F": "why"
},
{
"F": "'s"
}
],
"V_V": [
{
"F": "V_V"
}
],
";p": [
{
"F": ";p"
}
],
"There'd": [
{
"F": "There"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"They'll": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"b.": [
{
"F": "b."
}
],
"how'll": [
{
"F": "how"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"Wedve": [
{
"F": "We"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"couldntve": [
{
"pos": "MD",
"F": "could"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"12pm": [
{
"F": "12"
},
{
"L": "p.m.",
"F": "pm"
}
],
"There's": [
{
"F": "There"
},
{
"F": "'s"
}
],
"we'd": [
{
"F": "we"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Dr.": [
{
"F": "Dr."
}
],
"Whod": [
{
"F": "Who"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
":-P": [
{
"F": ":-P"
}
],
"whatve": [
{
"F": "what"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Wouldve": [
{
"F": "Would"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"o.": [
{
"F": "o."
}
],
"there'll": [
{
"F": "there"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
":]": [
{
"F": ":]"
}
],
"needn't": [
{
"F": "need"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"shouldntve": [
{
"F": "should"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"why're": [
{
"F": "why"
},
{
"F": "'re"
}
],
"p.m.": [
{
"F": "p.m."
}
],
"Doesnt": [
{
"F": "Does",
"L": "do",
"pos": "VBZ"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"whereve": [
{
"F": "where"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"they'll": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"I'd": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Might've": [
{
"F": "Might"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"mightnt": [
{
"F": "might"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Kans.": [
{
"F": "Kans."
}
],
"Not've": [
{
"F": "Not",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"e.": [
{
"F": "e."
}
],
"mightn't": [
{
"F": "might"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"you're": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "'re"
}
],
"Mar.": [
{
"F": "Mar."
}
],
"They've": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"\")": [
{
"F": "\")"
}
],
"what'll": [
{
"F": "what"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"Calif.": [
{
"F": "Calif."
}
],
"Could've": [
{
"pos": "MD",
"F": "Could"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Would've": [
{
"F": "Would"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
";)": [
{
"F": ";)"
}
],
";(": [
{
"F": ";("
}
],
"Isn't": [
{
"F": "Is",
"L": "be",
"pos": "VBZ"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"let's": [
{
"F": "let"
},
{
"L": "us",
"F": "'s"
}
],
"'em": [
{
"F": "'em"
}
],
"She'll": [
{
"L": "-PRON-",
"F": "She"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"I.E.": [
{
"F": "I.E."
}
],
"You'd": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"wouldnt": [
{
"F": "would"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"6am": [
{
"F": "6"
},
{
"L": "a.m.",
"F": "am"
}
],
":P": [
{
"F": ":P"
}
],
"Why'll": [
{
"F": "Why"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"Where'd": [
{
"F": "Where"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Theyre": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "re"
}
],
"11p.m.": [
{
"F": "11"
},
{
"F": "p.m."
}
],
"Won't": [
{
"F": "Wo"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Couldn't": [
{
"pos": "MD",
"F": "Could"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"it's": [
{
"L": "-PRON-",
"F": "it"
},
{
"F": "'s"
}
],
"r.": [
{
"F": "r."
}
],
"it'll": [
{
"L": "-PRON-",
"F": "it"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"They'd've": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Ima": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "ma"
}
],
"5pm": [
{
"F": "5"
},
{
"L": "p.m.",
"F": "pm"
}
],
"10am": [
{
"F": "10"
},
{
"L": "a.m.",
"F": "am"
}
],
"m.": [
{
"F": "m."
}
],
"whats": [
{
"F": "what"
},
{
"F": "s"
}
],
"How's": [
{
"F": "How"
},
{
"F": "'s"
}
],
"Sep.": [
{
"F": "Sep."
}
],
"Shouldntve": [
{
"F": "Should"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"youd": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"Whatll": [
{
"F": "What"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"Wouldn't've": [
{
"F": "Would"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"How'd": [
{
"F": "How"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"doesnt": [
{
"F": "does",
"L": "do",
"pos": "VBZ"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"h.": [
{
"F": "h."
}
],
"Shouldn't": [
{
"F": "Should"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"He'd've": [
{
"L": "-PRON-",
"F": "He"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Mightntve": [
{
"F": "Might"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"couldnt": [
{
"pos": "MD",
"F": "could"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Haven't": [
{
"pos": "VB",
"F": "Have"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"<333": [
{
"F": "<333"
}
],
"doesn't": [
{
"F": "does",
"L": "do",
"pos": "VBZ"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Hasn't": [
{
"F": "Has"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"how's": [
{
"F": "how"
},
{
"F": "'s"
}
],
"hes": [
{
"L": "-PRON-",
"F": "he"
},
{
"F": "s"
}
],
"=[[": [
{
"F": "=[["
}
],
"xD": [
{
"F": "xD"
}
],
"he'll": [
{
"L": "-PRON-",
"F": "he"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"hed": [
{
"L": "-PRON-",
"F": "he"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"7p.m.": [
{
"F": "7"
},
{
"F": "p.m."
}
],
"how'd": [
{
"F": "how"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"u.": [
{
"F": "u."
}
],
"we're": [
{
"F": "we"
},
{
"F": "'re"
}
],
"vs.": [
{
"F": "vs."
}
],
"Hadnt": [
{
"F": "Had",
"L": "have",
"pos": "VBD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Shant": [
{
"F": "Sha"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Theyve": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Hows": [
{
"F": "How"
},
{
"F": "s"
}
],
"We'll": [
{
"F": "We"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"N.Y.": [
{
"F": "N.Y."
}
],
"x.": [
{
"F": "x."
}
],
"8p.m.": [
{
"F": "8"
},
{
"F": "p.m."
}
],
"i've": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Whove": [
{
"F": "Who"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"2am": [
{
"F": "2"
},
{
"L": "a.m.",
"F": "am"
}
],
"La.": [
{
"F": "La."
}
],
"i'ma": [
{
"L": "-PRON-",
"F": "i"
},
{
"F": "'ma"
}
],
"N.J.": [
{
"F": "N.J."
}
],
"Nebr.": [
{
"F": "Nebr."
}
],
"Howd": [
{
"F": "How"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"hadnt": [
{
"F": "had",
"L": "have",
"pos": "VBD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"shant": [
{
"F": "sha"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"There'd've": [
{
"F": "There"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Inc.": [
{
"F": "Inc."
}
],
"I'll": [
{
"L": "-PRON-",
"F": "I"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"Why's": [
{
"F": "Why"
},
{
"F": "'s"
}
],
"Adm.": [
{
"F": "Adm."
}
],
"Shouldn't've": [
{
"F": "Should"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"n.": [
{
"F": "n."
}
],
"Wasnt": [
{
"F": "Was"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"whove": [
{
"F": "who"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
";-p": [
{
"F": ";-p"
}
],
"hasn't": [
{
"F": "has"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"wouldntve": [
{
"F": "would"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Wheres": [
{
"F": "Where"
},
{
"F": "s"
}
],
"How'll": [
{
"F": "How"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"there'd've": [
{
"F": "there"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Whos": [
{
"F": "Who"
},
{
"F": "s"
}
],
"shes": [
{
"L": "-PRON-",
"F": "she"
},
{
"F": "s"
}
],
"Doesn't": [
{
"F": "Does",
"L": "do",
"pos": "VBZ"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
"Arent": [
{
"F": "Are",
"pos": "VBP",
"number": 2,
"L": "be"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Hasnt": [
{
"F": "Has"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"j.": [
{
"F": "j."
}
],
"He's": [
{
"L": "-PRON-",
"F": "He"
},
{
"F": "'s"
}
],
"wasnt": [
{
"F": "was"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"whyll": [
{
"F": "why"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"co.": [
{
"F": "co."
}
],
"mustnt": [
{
"F": "must"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"He'd": [
{
"L": "-PRON-",
"F": "He"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"I.e.": [
{
"F": "I.e."
}
],
"Shes": [
{
"L": "-PRON-",
"F": "She"
},
{
"F": "s"
}
],
"where've": [
{
"F": "where"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Youll": [
{
"L": "-PRON-",
"F": "You"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"Apr.": [
{
"F": "Apr."
}
],
":')": [
{
"F": ":')"
}
],
"Conn.": [
{
"F": "Conn."
}
],
"8pm": [
{
"F": "8"
},
{
"L": "p.m.",
"F": "pm"
}
],
"9am": [
{
"F": "9"
},
{
"L": "a.m.",
"F": "am"
}
],
"hasnt": [
{
"F": "has"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"theyll": [
{
"L": "-PRON-",
"F": "they"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"it'd've": [
{
"L": "-PRON-",
"F": "it"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"itdve": [
{
"L": "-PRON-",
"F": "it"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"Jr.": [
{
"F": "Jr."
}
],
"Rev.": [
{
"F": "Rev."
}
],
"k.": [
{
"F": "k."
}
],
"wedve": [
{
"F": "we"
},
{
"F": "d",
"L": "would",
"pos": "MD"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"=)": [
{
"F": "=)"
}
],
"Colo.": [
{
"F": "Colo."
}
],
"Mr.": [
{
"F": "Mr."
}
],
"Werent": [
{
"F": "Were"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Therell": [
{
"F": "There"
},
{
"F": "ll",
"L": "will",
"pos": "MD"
}
],
"shan't": [
{
"F": "sha"
},
{
"F": "n't",
"L": "not",
"pos": "RB"
}
],
";-)": [
{
"F": ";-)"
}
],
"Wont": [
{
"F": "Wo"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"hadntve": [
{
"F": "had",
"L": "have",
"pos": "VBD"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"who've": [
{
"F": "who"
},
{
"F": "'ve",
"L": "have",
"pos": "VB"
}
],
"Whatre": [
{
"F": "What"
},
{
"F": "re"
}
],
"'s": [
{
"L": "'s",
"F": "'s"
}
],
"where'd": [
{
"F": "where"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"shouldve": [
{
"F": "should"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"a.": [
{
"F": "a."
}
],
"where's": [
{
"F": "where"
},
{
"F": "'s"
}
],
"Ltd.": [
{
"F": "Ltd."
}
],
"Mass.": [
{
"F": "Mass."
}
],
"neednt": [
{
"F": "need"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Pa.": [
{
"F": "Pa."
}
],
"It'll": [
{
"L": "-PRON-",
"F": "It"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"7am": [
{
"F": "7"
},
{
"L": "a.m.",
"F": "am"
}
],
"We'd": [
{
"F": "We"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Whats": [
{
"F": "What"
},
{
"F": "s"
}
],
"\u2014": [
{
"pos": ":",
"L": "--",
"F": "\u2014"
}
],
"E.g.": [
{
"F": "E.g."
}
],
"Ms.": [
{
"F": "Ms."
}
],
":3": [
{
"F": ":3"
}
],
"5p.m.": [
{
"F": "5"
},
{
"F": "p.m."
}
],
"Itd": [
{
"L": "-PRON-",
"F": "It"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"May.": [
{
"F": "May."
}
],
"she'd": [
{
"L": "-PRON-",
"F": "she"
},
{
"F": "'d",
"L": "would",
"pos": "MD"
}
],
"Mustnt": [
{
"F": "Must"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"Notve": [
{
"F": "Not",
"L": "not",
"pos": "RB"
},
{
"F": "ve",
"L": "have",
"pos": "VB"
}
],
"you'll": [
{
"L": "-PRON-",
"F": "you"
},
{
"F": "'ll",
"L": "will",
"pos": "MD"
}
],
"Theyd": [
{
"L": "-PRON-",
"F": "They"
},
{
"F": "d",
"L": "would",
"pos": "MD"
}
],
"she's": [
{
"L": "-PRON-",
"F": "she"
},
{
"F": "'s"
}
],
"Couldnt": [
{
"pos": "MD",
"F": "Could"
},
{
"F": "nt",
"L": "not",
"pos": "RB"
}
],
"that's": [
{
"F": "that"
},
{
"F": "'s"
}
],
"4pm": [
{
"F": "4"
},
{
"L": "p.m.",
"F": "pm"
}
],
":))": [
{
"F": ":))"
}
]
}
| [
"honnibal+gh@gmail.com"
] | honnibal+gh@gmail.com |
fdfe941f2d276a821a9342bce3e3e89214a7ecfe | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v7/resources/types/video.py | da5d5c3d0b355e480d86c2f921f4d36b37f58b30 | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 1,756 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.resources",
marshal="google.ads.googleads.v7",
manifest={"Video",},
)
class Video(proto.Message):
r"""A video.
Attributes:
resource_name (str):
Output only. The resource name of the video. Video resource
names have the form:
``customers/{customer_id}/videos/{video_id}``
id (str):
Output only. The ID of the video.
channel_id (str):
Output only. The owner channel id of the
video.
duration_millis (int):
Output only. The duration of the video in
milliseconds.
title (str):
Output only. The title of the video.
"""
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.STRING, number=6, optional=True,)
channel_id = proto.Field(proto.STRING, number=7, optional=True,)
duration_millis = proto.Field(proto.INT64, number=8, optional=True,)
title = proto.Field(proto.STRING, number=9, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | Z2Xsoft.noreply@github.com |
5980ba5d1e205daa68bf0ca7e142f29d2e8a19f4 | 0969844f258a8a66cce134b28c71c03cbc7317e6 | /Day8/day8.py | f69a89c2b1e208d8a45be47e8632ffc422d863d4 | [] | no_license | vikaBu/Advent-of-code-2020 | de128e11e7a6171f4e6f1e220667cd4e79ba0790 | 01a3940cd1d517eb5d1d1493ae1888689a67b185 | refs/heads/master | 2023-02-05T03:45:13.296933 | 2020-12-22T16:26:57 | 2020-12-22T16:26:57 | 317,599,911 | 0 | 0 | null | 2020-12-04T09:52:49 | 2020-12-01T16:22:29 | Python | UTF-8 | Python | false | false | 1,536 | py | def get_input():
with open("./inputday8.txt") as f:
return f.read().split("\n")
def part1(instructions):
return runprogram(instructions)
def runprogram(instructions, change = -1):
pointer = 0
accumulator = 0
previous_pointers = []
success = True
while True and pointer < len(instructions):
if pointer in previous_pointers:
success = False
break
else:
previous_pointers.append(pointer)
operation, argument = instructions[pointer].split(" ")
argument = int(argument)
if pointer == change:
if operation == "nop":
operation = "jmp"
if operation == "jmp":
operation = "nop"
if operation == "nop":
pointer += 1
if operation == "jmp":
pointer += argument
if operation == "acc":
accumulator += argument
pointer += 1
return accumulator, success
#_______________________________________________________
def part2(instructions):
for i in range(len(instructions)):
if "jmp" in instructions[i]:
accumulator, success = runprogram(instructions, i)
if success:
return accumulator, success
if "nop" in instructions[i]:
accumulator, success = runprogram(instructions, i)
if success:
return accumulator, success
def main():
input = get_input()
print(part1(input))
print(part2(input))
main()
| [
"viktorija.buklajeva@zoopla.co.uk"
] | viktorija.buklajeva@zoopla.co.uk |
c963bd604f8baf0740eedddd93017a4b57ebd135 | 6456554a0836d7ed1b9b37e523daa5a91859f502 | /other/vipscrapy/vipscrapy/middlewares.py | 2a530d2541384228880e279974f55e6a0b319de5 | [] | no_license | zyt19910214/Studys | fb2436f4bd62f28b0c45d86ab92bc1ba48f57899 | d0b0f5d59f5d94e12ed138456a927047b7e55d96 | refs/heads/master | 2020-03-31T08:24:25.217574 | 2018-12-12T11:43:43 | 2018-12-12T11:43:43 | 152,055,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class VipscrapySpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class VipscrapyDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"707150586@qq.com"
] | 707150586@qq.com |
4728768877333c9060e01d5d66cd0b2dc8cd58e2 | 74f04d78486d4986e4f0ef8c3bc480ba00caae4a | /articles/models.py | c9ee25d3e4b0b92deaea58fc10f1b49de02c4ee6 | [] | no_license | kimjy392/reboot-django | e24dd90182ee5d317bf13872ae169ac738a71c6c | 4280c7bffacd759a1b785ae576e9e89a0c2269d8 | refs/heads/master | 2022-12-10T12:18:38.422146 | 2019-10-28T01:12:41 | 2019-10-28T01:12:41 | 207,240,898 | 0 | 10 | null | 2022-12-08T06:13:37 | 2019-09-09T06:36:38 | Python | UTF-8 | Python | false | false | 594 | py | from django.db import models
# Create your models here.
class Reporter(models.Model):
name = models.CharField(max_length=20)
class Article(models.Model):
title = models.CharField(max_length=50)
content = models.TextField()
image = models.ImageField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# reporter = models.ForeignKey(Reporter, on_delete=models.CASCADE)
class Comment(models.Model):
content = models.CharField(max_length=50)
article = models.ForeignKey(Article, on_delete=models.CASCADE) | [
"kimjy392@gmail.com"
] | kimjy392@gmail.com |
2f14e4dbbb349aed3998968c565c70758358ae4e | 23f73a7a0c0ced134f6c18bb9c200617ce31f1d5 | /src/fauxmo/handlers/hass.py | 6b72bc98a0323f0764aee236d59e2be04d96d90a | [
"MIT"
] | permissive | clach04/fauxmo | f586d0024648f3da6d2ff38b8fe06fdb345bcfbd | 06a0b8ff20f4811de9ac08663e0d76f8fdd83764 | refs/heads/master | 2021-11-27T19:13:36.583893 | 2016-07-22T13:02:39 | 2016-07-22T13:02:39 | 66,683,609 | 2 | 0 | null | 2016-08-26T22:42:52 | 2016-08-26T22:42:52 | null | UTF-8 | Python | false | false | 2,671 | py | # -*- coding: utf-8 -*-
import homeassistant.remote
from homeassistant.const import (SERVICE_TURN_ON, SERVICE_TURN_OFF,
SERVICE_MOVE_UP, SERVICE_MOVE_DOWN)
class HassAPIHandler:
"""Handler for Home Assistant (hass) Python API.
Allows users to specify Home Assistant services in their config.json and
toggle these with the Echo. While this can be done with Home Assistant's
REST API as well (example included), I find it easier to use the Python
API.
"""
def __init__(self, host, password, entity, port=8123):
"""Initialize a HassAPIHandler instance
Args:
host (str): IP address of device running Home Assistant
password (str): Home Assistant password
entity (str): `entity_id` used by hass, one easy way to find is to
curl and grep the REST API, eg:
`curl http://IP/api/bootstrap | grep entity_id`
Kwargs:
port (int): Port running hass on the host computer (default 8123)
"""
self.host = host
self.password = password
self.entity = entity
self.port = port
self.domain = self.entity.split(".")[0]
self.api = homeassistant.remote.API(self.host, self.password,
port=self.port)
self.service_map = {
'switch': {
'on': SERVICE_TURN_ON,
'off': SERVICE_TURN_OFF
},
'rollershutter': {
'on': SERVICE_MOVE_UP,
'off': SERVICE_MOVE_DOWN
}
}
def send(self, signal):
"""Send a signal to the hass `call_service` function, returns True.
The hass Python API doesn't appear to return anything with this
function, but will raise an exception if things didn't seem to work, so
I have it set to just return True, hoping for an exception if there was
a problem.
Args:
signal (const): signal imported from homeassistant.const. I have
imported SERVICE_TURN_ON and SERVICE_TURN_OFF, make sure you import
any others that you need.
"""
homeassistant.remote.call_service(self.api, self.domain, signal,
{'entity_id': self.entity})
return True
def on(self):
on_cmd = self.service_map[self.domain.lower()]['on']
return self.send(on_cmd)
def off(self):
off_cmd = self.service_map[self.domain.lower()]['off']
return self.send(off_cmd)
| [
"nate@n8henrie.com"
] | nate@n8henrie.com |
b70b4c6bf6bc9ff138b72e6f1fd75d39ff2da2a5 | 19f85c71b39d05b19127ab1beb465229c9ad8d1c | /authapi/serializer.py | 3c3f4a69dfd36d190f043dbd982047f702e5027f | [] | no_license | Evgenykurakin1989/art_backend | 3fb1438aa9c755efa1c48d17871ffc84599d7f7d | e7f9ebf48f9ea5a52a0408f88988e73ea4818590 | refs/heads/master | 2020-06-27T23:58:25.458119 | 2019-08-03T11:15:07 | 2019-08-03T11:15:07 | 200,086,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,774 | py | from rest_framework import serializers
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from rest_framework_jwt.serializers import JSONWebTokenSerializer
from django.contrib.auth import authenticate
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
User = get_user_model()
class UserSerializer(serializers.Serializer):
class Meta:
model = User
fields = ('email', 'password', 'first_name','last_name')
extra_kwargs = {
'password': {
'write_only': True,
},
}
class CustomJWTSerializer(JSONWebTokenSerializer):
email_field = 'email'
def validate(self, attrs):
password = attrs.get("password")
user_obj = User.objects.filter(email__iexact=attrs.get("email")).first()
if user_obj is not None:
credentials = {
'email':user_obj.email,
'password': password
}
if all(credentials.values()):
user = authenticate(**credentials)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
payload = jwt_payload_handler(user)
return {
'token': jwt_encode_handler(payload),
'user': user
}
else:
msg = {"error":_('Incorrect password.')}
raise serializers.ValidationError(msg)
else:
msg = {"error":_('Must include "{email_field}" and "password".')}
# msg = msg.format(email_field=self.email_field)
raise serializers.ValidationError(msg)
else:
msg = {"error":_('Account with this email/username does not exists')}
raise serializers.ValidationError(msg)
class LoginSerializer(serializers.Serializer):
email = serializers.CharField()
password = serializers.CharField()
class ResetEmailSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset email
"""
email = serializers.EmailField()
def validate(self, attrs):
email = attrs.get('email')
user = User.objects.filter(email__iexact=email).first()
if not user:
raise serializers.ValidationError('Invalid Email!')
return attrs
class PasswordTokenSerializer(serializers.Serializer):
old_password = serializers.CharField()
new_password = serializers.CharField()
token = serializers.CharField() | [
"evgenykurakin19891031@gmail.com"
] | evgenykurakin19891031@gmail.com |
0554e077b0db3b39fc887e6b4986a336cc20fc9a | 6a7d8b67aad59c51dafdfb8bcffd53864a3d65b0 | /LintCode/inorderSuccessorBST.py | 4967d1c45481e78e4f3fb69538a9e2576d98cf12 | [] | no_license | dicao425/algorithmExercise | 8bba36c1a08a232678e5085d24bac1dbee7e5364 | 36cb33af758b1d01da35982481a8bbfbee5c2810 | refs/heads/master | 2021-10-07T08:56:18.030583 | 2018-12-04T05:59:17 | 2018-12-04T05:59:17 | 103,611,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | #!/usr/bin/python
import sys
"""
Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
"""
class Solution:
"""
@param: root: The root of the BST.
@param: p: You need find the successor node of p.
@return: Successor of p.
"""
def inorderSuccessor(self, root, p):
# write your code here
if not root:
return
suc = None
while root and root.val != p.val:
if root.val > p.val:
suc = root
root = root.left
else:
root = root.right
if not root:
return
if not root.right:
return suc
else:
root = root.right
while root.left:
root = root.left
return root
def main():
aa = Solution()
return 0
if __name__ == "__main__":
sys.exit(main()) | [
"di.cao425@gmail.com"
] | di.cao425@gmail.com |
25b17dd9edbe9aef94eab4c040eacc6014eb9c6e | 31fed80218632215659e8786d41783b3a493be8c | /app/models.py | 6822c1ee9f12f37e389478a00c52ff9af367f59e | [] | no_license | kahl-l/Flask-Administration | ef80a58b5b801be7c431918fd2441a9ae0ad1d85 | e93bf3793beefd349acb25b2a7482a4d12827d6b | refs/heads/master | 2021-09-09T16:22:06.717868 | 2018-02-15T14:03:34 | 2018-02-15T14:03:34 | 119,517,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py | from datetime import datetime
from hashlib import md5
from time import time
from flask import current_app
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
from app import db, login
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
def get_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': self.id, 'exp': time() + expires_in},
current_app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, current_app.config['SECRET_KEY'], algorithm=['HS256'])['reset_password']
except:
return
return User.query.get(id)
def __repr__(self):
return '<User {}>'.format(self.username)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post {}>'.format(self.body)
@login.user_loader
def load_user(id):
return User.query.get(int(id)) | [
"ludovic.kahl@gmail.com"
] | ludovic.kahl@gmail.com |
2261cf66860e5e03da76218a1e66eb199a78667d | fc66f771e95ee36cd502d3cf7220794e6f263226 | /src/utils/at.py | ca3ac83c786efd55252a4fe7853b8b4d9a002805 | [
"MIT"
] | permissive | yuanniufei/IncetOps | 2bcb7851514f3db6bc409746d245da08032ecc06 | e21185a4931a10996a187e63f4487b4402544c69 | refs/heads/master | 2020-03-25T20:50:02.339329 | 2018-08-09T07:35:02 | 2018-08-09T07:35:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | # -*- coding: utf-8 -*-
"""
IncetOps.utils.at
~~~~~~~~~~~~~~
AES加密的实现模式CBC。
CBC使用密码和salt(起扰乱作用)按固定算法(md5)产生key和iv。然后用key和iv(初始向量,加密第一块明文)加密(明文)和解密(密文)。
:copyright: (c) 2018 by staugur.
:license: MIT, see LICENSE for more details.
"""
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
class KeyGenerationClass():
"""密钥生成器"""
def __init__(self, key):
self.key = key
self.mode = AES.MODE_CBC
def encrypt(self, text):
#加密函数,如果text不是16的倍数【加密文本text必须为16的倍数!】,那就补足为16的倍数
cryptor = AES.new(self.key, self.mode, self.key)
#这里密钥key 长度必须为16(AES-128)、24(AES-192)、或32(AES-256)Bytes 长度.目前AES-128足够用
length = 16
count = len(text)
add = length - (count % length)
text = text + ('\0' * add)
self.ciphertext = cryptor.encrypt(text)
#因为AES加密时候得到的字符串不一定是ascii字符集的,输出到终端或者保存时候可能存在问题
#所以这里统一把加密后的字符串转化为16进制字符串
return b2a_hex(self.ciphertext)
def decrypt(self, text):
#解密后,去掉补足的空格用strip() 去掉
cryptor = AES.new(self.key, self.mode, self.key)
plain_text = cryptor.decrypt(a2b_hex(text))
return plain_text.rstrip('\0') | [
"staugur@vip.qq.com"
] | staugur@vip.qq.com |
35ef8ca68b756962f165919cf8232b4e92cf6f2c | 7375c164c1988befb259681c029e21fbc3ff37ee | /todowoo/urls.py | 78a8d9073531e0d5e814f88b55276f758aa8d549 | [] | no_license | python-the-snake/Django3_todowoo | 0ad40ac8b7a0ae14d82568c7fb757108c8d2c875 | 2db5a25401b7663c3d9245901227672aa8cad879 | refs/heads/master | 2022-11-19T14:34:26.176650 | 2020-07-14T10:07:21 | 2020-07-14T10:07:21 | 271,991,868 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | """todowoo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from todo import views
import numba
urlpatterns = [
path('admin/', admin.site.urls),
# Auth
path('signup/', views.signupuser, name='signupuser'),
path('login/', views.loginuser, name='loginuser'),
path('logout/', views.logoutuser, name='logoutuser'),
# Todos
path('', views.home, name='home'),
path('create/', views.createtodo, name='createtodo'),
path('current/', views.currenttodos, name='currenttodos'),
path('completed/', views.completedtodos, name='completedtodos'),
path('todo/<int:todo_pk>', views.viewtodo, name='viewtodo'),
path('todo/<int:todo_pk>/complete', views.completetodo, name='completetodo'),
path('todo/<int:todo_pk>/delete', views.deletetodo, name='deletetodo'),
]
| [
"dmitriy-shalimov@yandex.ru"
] | dmitriy-shalimov@yandex.ru |
7297ba441c8c0b9dab5826a8bc9b803a2e5903ab | 332132ca9ee3471cdeae5a93dc5f255dfb1df627 | /view_counter/apps.py | 068fca926f3af76d612b636a41a93cc36ee8b299 | [] | no_license | shtormnick/Page_counter | 30518ba61daa78728425d03e96b4bf2edb53a6ee | cf3b7feafdb5712d6a65eff098812b7b892cfefe | refs/heads/master | 2022-06-29T17:45:00.815658 | 2020-05-11T09:02:39 | 2020-05-11T09:02:39 | 261,580,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from django.apps import AppConfig
class ViewCaunterConfig(AppConfig):
name = 'view_caunter'
| [
"foxred324@gmail.com"
] | foxred324@gmail.com |
76c7d338cb4b00b333780f614692ef669796fa1e | be0d0e9046ec5a97895dab9177e1d2bed9f575af | /KNN/classification_mnist_knn.py | dbd6609c0e2dcf3aa41584d444070438a56f6d5b | [] | no_license | caothetoan/machine_learning | a091699c3c709f2bc2797c0b0286df50f76fbce1 | fae39c60b61bd852bdd92d0b9f117e958acbc575 | refs/heads/master | 2021-07-04T15:19:02.950457 | 2020-08-16T16:13:03 | 2020-08-16T16:13:03 | 146,393,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | # %reset
import numpy as np
from mnist import MNIST # require `pip install python-mnist`
# https://pypi.python.org/pypi/python-mnist/
import matplotlib.pyplot as plt
from sklearn import neighbors
from sklearn.metrics import accuracy_score
import time
# you need to download the MNIST dataset first
# at: http://yann.lecun.com/exdb/mnist/
mndata = MNIST('../MNIST/') # path to your MNIST folder
mndata.load_testing()
mndata.load_training()
X_test = mndata.test_images
X_train = mndata.train_images
y_test = np.asarray(mndata.test_labels)
y_train = np.asarray(mndata.train_labels)
start_time = time.time()
clf = neighbors.KNeighborsClassifier(n_neighbors = 1, p = 2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
end_time = time.time()
print( "Accuracy of 1NN for MNIST: %.2f %%" %(100*accuracy_score(y_test, y_pred)) )
print("Running time: %.2f (s)" % (end_time - start_time)) | [
"caothetoan@gmail.com"
] | caothetoan@gmail.com |
4e107f975e9b205c04868eafff741a552f4302c0 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/gos_20200614060821.py | 3db5babdabe6fb83a5ab594602a18e4fa77fbc59 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,896 | py | # # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import requests
# # Ітеруємося по масиву з адресами зображень
# for i, pic_url in enumerate(["http://x.com/nanachi.jpg", "http://x.com/nezuko.jpg"]):
# # Відкриваємо файл базуючись на номері ітерації
# with open('pic{0}.jpg'.format(i), 'wb') as handle:
# # Отримуємо картинку
# response = requests.get(pic_url, stream=True)
# # Використовуючи умовний оператор перевіряємо чи успішно виконався запит
# if not response.ok:
# print(response)
# # Ітеруємося по байтах картинки та записуємо батчаси в 1024 до файлу
# for block in response.iter_content(1024):
# # Якщо байти закінчилися, завершуємо алгоритм
# if not block:
# break
# # Записуємо байти в файл
# handle.write(block)
# -----------------------------------------------------------------------------------------------------------
# # Створюємо клас для рахунку
# class Bank_Account:
# # В конструкторі ініціалізуємо рахунок як 0
# def __init__(self):
# self.balance=0
# print("Hello!!! Welcome to the Deposit & Withdrawal Machine")
# # В методі депозит, використовуючи функцію input() просимо ввести суму поповенння та додаємо цю суму до рахунку
# def deposit(self):
# amount=float(input("Enter amount to be Deposited: "))
# self.balance += amount
# print("\n Amount Deposited:",amount)
# # В методі депозит, використовуючи функцію input() просимо ввести суму отримання та віднімаємо цю суму від рахунку
# def withdraw(self):
# amount = float(input("Enter amount to be Withdrawn: "))
# # За допомогою умовного оператора перевіряємо чи достатнього грошей на рахунку
# if self.balance>=amount:
# self.balance-=amount
# print("\n You Withdrew:", amount)
# else:
# print("\n Insufficient balance ")
# # Виводимо бааланс на екран
# def display(self):
# print("\n Net Available Balance=",self.balance)
# # Створюємо рахунок
# s = Bank_Account()
# # Проводимо операції з рахунком
# s.deposit()
# s.withdraw()
# s.display()
# -----------------------------------------------------------------------------------------------------------
# Створюємо рекурсивну функцію яка приймає десяткове число
def decimalToBinary(n):
# перевіряємо чи число юільше 1
if(n > 1):
# Якщо так, ділемо на 2 юез остачі та рекурсивно викликаємо функцію
decimalToBinary(n//2)
# Якщо ні, виводимо на остачу ділення числа на 2
print(n%2, end=' ')
# Створюємо функцію яка приймає бі число
def binaryToDecimal(binary):
binary1 = binary
decimal, i, n = 0, 0, 0
while(binary != 0):
dec = binary % 10
decimal = decimal + dec * pow(2, i)
binary = binary//10
i += 1
print(decimal)
| [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
99f6eb1dcbc89a09e00875214f3d250ae91479e9 | d4d817614f870d2f1b1ad9edb5f6c40079b3f067 | /server/mysite/urls.py | efd846a3768fa02ba140fd4a15a4d1f1f03a8641 | [] | no_license | zloy276/ulsu_students_project | 46d02aff8ee820eb480acf15735015a41104b4f5 | 3723a4d3824a582b97d78968cd009799c233be2f | refs/heads/main | 2023-05-30T23:22:43.496404 | 2021-04-19T09:18:39 | 2021-04-19T09:18:39 | 350,739,637 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('nlp.urls'))
]
# test
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"mihan276@icloud.com"
] | mihan276@icloud.com |
046c163a076d51434d40ffd17124b8cbff4044dd | 2542514d6e9d87381ce9ef3ab15e2ca07f1e9bff | /2_Grading-132452.py | 09b9b443d67f16a1f201f7e476eb1fa0d7daf143 | [] | no_license | gaivits/code_Kidding | cb2404945866d76064d6a4982047b3bd3e830c0b | c97c1a78537dcbe52d23da0beac9a7debe70f544 | refs/heads/master | 2023-06-12T15:03:57.295712 | 2021-07-03T17:51:04 | 2021-07-03T17:51:04 | 237,955,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | a = int(input())
b = int(input())
c = int(input())
d = int(input())
n = a+b+c+d
if n >= 80:
print('A')
elif n >=75:
print('B+')
elif n>=70:
print('B')
elif n>=65:
print('C+')
elif n>=60:
print('C')
elif n>=55:
print('D+')
elif n>=50:
print('D')
else:
print('F')
| [
"noreply@github.com"
] | gaivits.noreply@github.com |
b0d54a8361f3157a93dad51a6fd6feb72e161750 | 28aa379cb384e964225e144d1c4dbd18996073d8 | /kick_project/manage.py | 5d9b9f41ae76280bdb056a0ac9f6b31844f30e5a | [] | no_license | mblasko09/Trace-Camp-projects | 43b7285d1f18ec96f6ef1676a997d46a293aab53 | 9401bfc476d21da1cd87cbb6404d31ef8b7133af | refs/heads/master | 2020-04-12T11:22:34.364887 | 2018-12-20T21:38:24 | 2018-12-20T21:38:24 | 162,457,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kick_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"mblask@194.161.21.198.tigernet.wifi.dyn.clemson.edu"
] | mblask@194.161.21.198.tigernet.wifi.dyn.clemson.edu |
786e8f6d3c14740e8ce83eac7bf53c2ad28f21ca | 5bdf57bb65a0798023fe73a95ae397440e80d4df | /n-mer_freq.py | 2859da21cc6eb2a8cfa9aeb35db10a274d794907 | [] | no_license | tanbwen/ViraMiner | 475eeb02666b3d59cabf57f57faaaf591e7e2818 | 941ee13c90119b7a5f42a16ae52517bc54daa6c8 | refs/heads/master | 2023-03-16T12:54:41.403473 | 2020-08-24T12:12:47 | 2020-08-24T12:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,903 | py | import numpy as np
from sklearn.metrics import confusion_matrix,roc_auc_score,accuracy_score
from helper_with_N import *
import itertools
import argparse
import pandas as pd
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
################################
##Read in the parameter values##
################################
parser = argparse.ArgumentParser()
parser.add_argument("--save_path", default = "models/kmer_FFnet") #model save name and location
parser.add_argument("--input_path", default = "data/cccccccccc") # data location
parser.add_argument("--nmer",type=int, default = 3) # which n-nmers we use
parser.add_argument("--NN",type=str2bool, default = False) # data location
parser.add_argument("--RF",type=str2bool, default = False) # data location
parser.add_argument("--LReg",type=str2bool, default = False) # data location
args = parser.parse_args()
#read in data
train_data = np.loadtxt(args.input_path+"_train.csv",delimiter="\t",dtype=np.uint8)
train_counts = train_data[:,:-1]
train_labels = train_data[:,-1]
del train_data
test_data = np.loadtxt(args.input_path+"_test.csv",delimiter="\t",dtype=np.uint8)
test_counts = test_data[:,:-1]
test_labels = test_data[:,-1]
del test_data
print "train data", train_counts.shape, " test data", test_counts.shape
if args.NN:
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=1)
model.fit(train_counts,train_labels) #neigbour
knn_preds = model.predict(test_counts)
print "KNN confusion_matrix\n", confusion_matrix(test_labels, knn_preds)
print "KNN accuracy", accuracy_score(test_labels, knn_preds)
assert False #stop here
###########
if args.RF:
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=1000,n_jobs=4)
print "starting to fit RF"
model.fit(train_counts,train_labels)
print "done fitting RF"
rf_preds_train = model.predict_proba(train_counts)
print "Random Forest TRAIN ROC area under the curve \n", roc_auc_score(train_labels, rf_preds_train[:,1])
rf_preds_test = model.predict_proba(test_counts)
print "Random Forest TEST ROC area under the curve \n", roc_auc_score(test_labels, rf_preds_test[:,1])
np.savetxt("rf_7mers_preds_test.txt", rf_preds_test[:,1], fmt="%.5f")
np.savetxt("rf_7mers_labels_test.txt", test_labels, fmt="%d")
assert False #stop here
############
if args.LReg:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(fit_intercept=True, max_iter=1000,penalty="l1",C=0.01)
print "Lreg input data", train_counts.shape
model.fit(train_counts,train_labels)
print "done fitting Log Reg"
log_preds = model.predict(test_counts)
print "LogRegr confusion_matrix\n", confusion_matrix(test_labels, log_preds)
accuracy = model.score(train_counts,train_labels)
print "overall train accuracy", accuracy
accuracy = model.score(test_counts, test_labels)
print "overall test accuracy", accuracy
log_preds_vals = model.predict_proba(test_counts)
print "LogRegr TEST ROC area under the curve \n", roc_auc_score(test_labels, log_preds_vals[:,1]) # AUC=0.70
log_preds_vals = model.predict_proba(train_counts)
print "LogRegr TRAIN ROC area under the curve \n", roc_auc_score(train_labels, log_preds_vals[:,1]) # AUC=0.70
#prms = model.coef_
#print prms , "\n and bias/intercept: ", model.intercept_
#np.savetxt("log_weights_reference_2mers_500.txt",prms,delimiter=',')
assert False #stop here
###############################
####Defining the model#########
###############################
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Conv1D, concatenate, Dropout, Flatten, MaxPooling1D,Reshape
from keras.layers import GlobalMaxPooling1D,GlobalAveragePooling1D
from keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler
from keras.optimizers import Adam, Nadam, SGD,RMSprop,Adagrad,Adadelta
from keras.initializers import RandomUniform
model = Sequential()
if args.nmer==3:
model.add(Dense(256,input_shape=(64,),activation="relu"))
elif args.nmer==1:
model.add(Dense(256,input_shape=(4,),activation="relu"))
elif args.nmer==2:
model.add(Dense(256,input_shape=(16,),activation="relu"))
elif args.nmer==4:
model.add(Dense(256,input_shape=(256,),activation="relu"))
elif args.nmer==5:
model.add(Dense(256,input_shape=(1024,),activation="relu"))
else:
assert False
model.add(Dense(256,activation="relu"))
model.add(Dense(256,activation="relu"))
model.add(Dense(1,activation="sigmoid", use_bias=True))
model.compile(optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True,clipnorm=0.5), loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
###############################
####Fitting the model##########
###############################
#saving the model
callbacks = [ModelCheckpoint(filepath=args.save_path+".hdf5", verbose=1, save_best_only=False)]
callbacks.append(LearningRateScheduler(char_lrate_decay))
# This fitting procedure feeds datapoints to the network one by one
model.fit(train_counts,train_labels,batch_size=100, epochs=200, callbacks=callbacks,verbose=1, validation_data=(test_counts,test_labels)) #steps per epoch is set by the smallest training set size
###############################
####Testing the model##########
###############################
print "TRAIN eval:", model.evaluate(train_counts,train_labels)
print "TEST eval:", model.evaluate(test_counts,test_labels)
print "##########################"
pred_probas = model.predict(test_counts)
print np.shape(pred_probas), type(pred_probas)
preds = pred_probas>0.5
print pred_probas[:10],preds[:10]
print "confusion_matrix\n", confusion_matrix(test_labels, np.array(preds,dtype=int))
print "ROC area under the curve \n", roc_auc_score(test_labels, pred_probas)
#np.savetxt("30N_cls_cnf_predictions.csv",pred_matrix[:,:3],delimiter=',', fmt=["%d","%d","%.3f"])
| [
"neurocsut@gmail.com"
] | neurocsut@gmail.com |
b0a03beaebad91464458a98d735bd08142322c1c | f5402ef17bab15cc2512616db8e00c2e25b27e3e | /sngconnect/announcements/forms.py | cb0d5cad499b429953f9ae39f2f40ce976a8b52c | [] | no_license | fikander/sngtec-sngconnect | 2b5b804513d5ff94be8a0a9f5b7ee62aa3151034 | 34f6188247d2e7d6dd8cc5f358e8910c9f722560 | refs/heads/master | 2021-01-17T20:01:05.694515 | 2013-09-07T16:40:11 | 2013-09-07T16:40:11 | 62,445,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from wtforms import fields, validators
from sngconnect.forms import SecureForm
from sngconnect.translation import _
class CreateAnnouncementForm(SecureForm):
content = fields.TextAreaField(
_("Content"),
validators=(
validators.DataRequired(message=_("This field is required.")),
validators.Length(min=5, max=100000),
)
)
| [
"m.siedlarek@fatico.com"
] | m.siedlarek@fatico.com |
7b39357643d6f4b36235e1711c80c6434fd3e428 | 4c0ca5fa550efc572e18169328b0b961fe5b3849 | /edabit_challenges/get_sum_of_elements.py | 671198e0d30c7c29d3483b1e0f91dd772e0bd625 | [] | no_license | c-romano/code_practice | fd5e6f3b0ede8d103ddef1235ec6b3ba0c173e10 | 60b3f0806f1c055497bd10701d7498b672247d0b | refs/heads/master | 2023-07-03T11:50:14.140941 | 2021-07-22T21:15:02 | 2021-07-22T21:15:02 | 274,518,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | # takes a list and returns sum
# optimal solution
get_sum_of_elements=sum
# alternate
def get_sum_of_elements2(lst):
return sum(lst)
# my corrected solution
def get_sum_of_elements3(lst):
sum = 0
if len(lst) > 0:
for x in lst:
sum += x
return sum
else:
return ''
print(get_sum_of_elements([1,2,3]))
| [
"conortheginger@gmail.com"
] | conortheginger@gmail.com |
225cc84d1b8df33afa6b99407f6dad6ab6b09f7f | 1d007e58c7739f36bdb85cb9aa13b3f4584cdfb9 | /day1/day1/urls.py | 28f2a39c7c890b2c071f918d1dcef7609bddfad4 | [] | no_license | rahuladream/Django-Challenge | 65410f053c06f2556628b449b817244dac32e1ac | 0b81a6b69b743799f3d8562d6ec784950980716c | refs/heads/master | 2020-06-14T14:19:54.370195 | 2019-07-03T13:02:07 | 2019-07-03T13:02:07 | 195,024,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | """day1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
# from . import admin_site
urlpatterns = [
path('polls/', include('polls.urls', namespace="polls")),
# path('myadmin/', include('admin_site.urls')), # grappelli URLS
path('admin/', admin.site.urls),
]
| [
"rahul.singh4800@gmail.com"
] | rahul.singh4800@gmail.com |
c435c4a601960625cb708fde9fbd0bf1c3785c31 | ecb97bcfe87c76ae37446bbb715e9e703f00a789 | /udpdebug.py | ae5703ace398c9604387d58e628254ab9134c956 | [
"MIT"
] | permissive | winxos/python | 056eed745746146cae46130ce657217e8e37845e | 22f57a5dfa6366b0d149dfd81203b11ed258f9d3 | refs/heads/master | 2020-06-04T06:23:07.262773 | 2017-06-20T03:46:04 | 2017-06-20T03:46:04 | 37,751,553 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | # -*- coding: utf-8 -*-
'''
p2p server.
winxos 2015-12-04
'''
import socket
import threading
import os
import time
port = 9010
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # DGRAM -> UDP
is_exit = False
class getcmd(threading.Thread):
global s, clients
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while not is_exit:
try:
cmd = raw_input()
cmds = cmd.split()
op = cmds[0]
if op == "to":
if cmds[3].isdigit():#port range
for i in range(int(cmds[2]),int(cmds[3])):
s.sendto(" ".join(cmds[4:]), (cmds[1],i))
if i%1000==0:
time.sleep(1);
else:
s.sendto(" ".join(cmds[3:]), (cmds[1],int(cmds[2])))
except Exception, e:
print("[shell err] %s" % e)
class listener(threading.Thread):
global s, clients
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while not is_exit:
try:
data, addr = s.recvfrom(1024)
print("%s:%s" % (addr, data))
except Exception, e:
print("[listen err] %s" % e)
if __name__ == '__main__':
port = raw_input("input port:")
s.bind(('0.0.0.0', int(port)))
t = getcmd()
t.setDaemon(True) # important
t.start()
l = listener()
l.setDaemon(True)
l.start()
print("listening...")
try:
while t.isAlive() and l.isAlive():
pass
except KeyboardInterrupt:
print("[sys err] user stop.")
is_exit = True
print("server exit.")
os._exit(0)
| [
"winxos@hotmail.com"
] | winxos@hotmail.com |
1b6741fe528edc00851aec30898bc2139cc337c5 | 7a76bec230b228c9778d69b0aaa8cdfd542b59d6 | /Demo/retest.py | 181fd19a9393e6b131faec0a5b7188edc0bee026 | [] | no_license | liyongc0427/120Django | 260f4321fe173d59cc5ae107d0b9c4bcb21cd616 | 04a73760daf68726caf7429dbcf865bc5db78a6a | refs/heads/master | 2021-01-02T19:05:02.834866 | 2020-02-11T12:32:14 | 2020-02-11T12:32:14 | 239,756,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | """
正则:是一种字符串的处理方式,用于字符串匹配
字符串的匹配分为两种方式:
内容匹配:
例如:
python中的re模块(re模块含义:处理正则的一个模块)
js中匹配
通过要匹配的内容类型,长度进行匹配
结构匹配:
xpath(获取到某个内容的标签进行匹配),通过获取内容在整个文档中的结构 进行匹配
html div p img
内容匹配讲解:
类型匹配
原样匹配 . \d \D \w \W [] | ()
长度匹配
* + ? {}
特殊匹配
^ $
"""
import re
# re.findall() 尽可能 以列表的形式 返回结果
#原样匹配 . \d \D \w \W [] | ()
# str = "hello \n \t wORld ___ 123"
#原样匹配 在str中匹配前面的内容
# res = re.findall("ll",str)
# print(res)
#. 匹配除了\n 的所有内容
# res = re.findall(".",str)
# print(res)
#\d 匹配数字
#\D 匹配除了数字的所有内容
# \w 匹配字母 数字 下划线
# \W 匹配除了字母 数字 下划线的内容
#[] 返回符合 括号 中的内容
# res = re.findall("[a-zA-Z1-9]",str)
# print(res)
# | 返回任意一边符合的内容 (在str中存在的内容)
# res = re.findall("hello|wORld",str)
# print(res)
# 组匹配 # () 组 含义:先以 将括号外面的内容当做条件进行匹配
# str = "hllo \n \t wORld ___ 123"
# res = re.findall("(\w)l",str)
# #先将str中内容 以\w为条件进行匹配后,并且满足后面字母为l。 注意明白ll re.findall() 匹配所有的不重叠的匹配成功的部分
# print(res)
# str = "123 444 554"
# res = re.findall("(\d)4",str)
# print(res)
# 组匹配 起 组名
#(?P<id>\d):起组名 组名叫id; 不会影响结果与上面结果相同
# str = "123 444 554"
# res = re.findall("(?P<id>\d)4",str)
# print(res) # 4 5
##调用组名的内容
##(?P=id):使用 前面的匹配结果 \d44 \d45
# str = "123 444 554"
# res = re.findall("(?P<id>\d)4(?P=id)",str)
# print(res)
#长度匹配
str = "hllo \n \t wORld ___ 123"
# # * 匹配 0个:(0个所以打印"") 或 多个 满足条件的
# res = re.findall("\d*",str)
# # + 匹配1个或者多个 (:不存在0个 所以没有"")
# res = re.findall("\d+",str)
# #? 匹配0个 或者 1个
# res = re.findall("\d?",str)
# #{} 匹配多次,匹配{}内指定的次数
# res = re.findall("\d{1}",str)
# # print(res)
#特殊匹配
#^ 匹配以什么开头
res = re.findall("^hl",str)
print(res)
# $ 匹配以什么结束
res = re.findall("3$",str)
print(res)
| [
"1073029629@qq.com"
] | 1073029629@qq.com |
aadea7c7b59d25826bf1369514b6a919f0c140a6 | 752898bc395a17b2d646278915c7d60186517bfd | /EBV genome deletion/virus_deletion_main.py | 6873f79b2b1d6beeb6644a7b53fe81e244e95261 | [] | no_license | hanbw/ebv_script | 3677eabdf236a26fb40ff442e157286472ca56b2 | 67e18854115ced3f62a8b324be42fe6e2bade12b | refs/heads/master | 2020-03-16T17:36:20.004646 | 2018-05-13T08:08:03 | 2018-05-13T08:08:03 | 132,839,791 | 4 | 1 | null | 2018-05-13T08:04:12 | 2018-05-10T02:48:49 | Python | UTF-8 | Python | false | false | 13,665 | py | #! /data/home/hanbw/software/anaconda2/bin/python2.7
import os
import sys
import optparse
import time
import threading
LIB_PATH = './'
sys.path.append(LIB_PATH)
import virus_deletion_subs
# PATH to software
Trim_galore_PATH = "~/bin/trim_galore"
BWA_PATH = "~/bin/bwa-0.7.12/bwa"
Samtools_PATH = "~/bin/samtools-1.2/samtools"
SortSam_PATH = "~/bin/picard-tools-1.119/SortSam.jar"
Picard_PATH = "~/bin/picard/1.127.11/dist/picard.jar"
AdapterRemoval_PATH = "~/bin/AdaptorRemoval/2.1.7/bin/AdapterRemoval"
HISAT_PATH = "~/bin/hisat2"
bedtools_intersectBed_path = "~/bin/bedtools/2.23.0/bin/intersectBed"
bedtools_path = "~/bin/bedtools/2.23.0/bin/bedtools"
# PATH to annotation
hg19_PATH = "~/database/hg19/ucsc_hg19"
hg19_EBV_PATH = "~/database/hg19_EBV/hg19_ebv_all"
EBV_NCBI = "~/database/hg19_EBV/ebv_NC_007605.1"
EBV_PATH = "~/hg19_EBV/EBV_genome_common"
repeatmasker_annotation_file_path = "~/database/hg19/hg19.repeatmasker.anno"
# get arguments
optParser = optparse.OptionParser(
usage="%prog -g [host_genome] -v [virus_genome] -s [virus_standard_genome] -c [combined_genome] -1 <fastq_file_1> -2 <fastq_file_2>",
description=
"This script analyse virus integration sites in both virus and host genome",
epilog=
"Written by Han Bowei (hanbw@sysucc.org), SYSUCC, Dec 2016 \n"
)
optParser.add_option("-1", "--fastq1", type="string", dest="fastq1", default="",
help="input fastq file 1")
optParser.add_option("-2", "--fastq2", type="string", dest="fastq2", default="",
help="input fastq file 2")
optParser.add_option("-g", "--host_genome", type="string", dest="host_genome", default=hg19_PATH,
help="pathway to host genome (default: hg19)")
optParser.add_option("-v", "--virus_genome", type="string", dest="virus_genome", default=EBV_PATH,
help="pathway to virus genome (default: EBV)")
optParser.add_option("-s", "--virus_standard_genome", type="string", dest="virus_standard_genome", default=EBV_NCBI,
help="pathway to standard virus genome (default: EBV_NC_007605.1)")
optParser.add_option("-c", "--combined_genome", type="string", dest="combined_genome", default=hg19_EBV_PATH,
help="pathway to combined genome (default: hg19 + EBV)")
optParser.add_option("-p", "--project_prefix", type="string", dest="prefix", default="",
help="sample prefix")
optParser.add_option("-m", "--merge", type="choice", dest="merge_reads",
choices = ( "True", "T", "False", "F" ), default="False",
help="merge paired reads into a long read (True/False, default: False)")
optParser.add_option("-t", "--thread", type="string", dest="thread", default="8",
help="thread to use (default: 8)")
optParser.add_option("-r", "--run_merging_result", type="choice", dest="run_merge",
choices = ("True", "T", "False", "F"), default="True",
help="Run steps to merge result (default: True)")
optParser.add_option("-i", "--insertion_length", type="int", dest="insertion_length",
default=450, help="Max insertion length of sequencing library (default: 450)")
# optParser.add_option("-t", "--type", type="choice", dest="vcf_type",
# choices = ( "RNA", "DNA" ), default="RNA",
# help="VCF file generate from RNA-seq or DNA-seq (default: RNA)")
if len(sys.argv) == 1:
optParser.print_help()
sys.exit(1)
(opts, args) = optParser.parse_args()
if opts.fastq1 == "" or opts.fastq2 == "":
if opts.prefix != "" and os.path.exists(opts.prefix + "step01.sig"):
opts.fastq1 = opts.prefix + "_R1.fastq.gz"
opts.fastq2 = opts.prefix + "_R2.fastq.gz"
else:
sys.stderr.write(sys.argv[0] + ": Error: Please provide fastq file or file prefix.\n")
sys.stderr.write(" Call with '-h' to get usage information.\n")
sys.exit(1)
if opts.prefix == "":
prefix = opts.fastq1.split("/")[-1].split("_R1")[0]
else:
prefix = opts.prefix
# Step 1. Data cleaning
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write("Step 1. Data cleaning " + current_time +"\n")
if os.path.exists(prefix+"_Vdl_step01.sig"):
sys.stdout.write("Step 1 result files exist, pass.\n")
fastq_clean_1 = opts.fastq1.split(".")[0] + "_val_1.fq"
fastq_clean_2 = opts.fastq2.split(".")[0] + "_val_2.fq"
else:
fastq_clean_1, fastq_clean_2 = virus_deletion_subs.data_clean(Trim_galore_PATH, opts.fastq1, opts.fastq2)
print opts.fastq1 + " and " + opts.fastq2
with open(prefix+"_Vdl_step01.sig", "w") as sig_file:
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sig_file.write(current_time)
# Step 2. Mapping to hg19 and EBV
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write("Step 2. Mapping to EBV " + current_time +"\n")
if os.path.exists(prefix + "_Vdl_step02.sig"):
sys.stdout.write("Step 2 result files exist, pass. \n")
fastq_EBV_1 = fastq_clean_1.split(".")[0] + "_read1.EBV.fq"
fastq_EBV_2 = fastq_clean_2.split(".")[0] + "_read2.EBV.fq"
else:
outsam_hg19_1 = prefix + "_read1.hg19.sam"
outsam_hg19_2 = prefix + "_read2.hg19.sam"
postfix = ".hg19.fq"
virus_deletion_subs.mapping_single_end(BWA_PATH, prefix, hg19_PATH, fastq_clean_1, outsam_hg19_1, opts.thread)
virus_deletion_subs.mapping_single_end(BWA_PATH, prefix, hg19_PATH, fastq_clean_2, outsam_hg19_2, opts.thread)
fastq_hg19_1, fastq_hg19_2 = virus_deletion_subs.removing_mapped_intersection(outsam_hg19_1, outsam_hg19_2, fastq_clean_1, fastq_clean_2, postfix)
outsam_EBV_1 = prefix + "_read1.EBV.sam"
outsam_EBV_2 = prefix + "_read2.EBV.sam"
postfix = ".EBV.fq"
virus_deletion_subs.mapping_single_end(BWA_PATH, prefix, EBV_PATH, fastq_hg19_1, outsam_EBV_1, opts.thread)
virus_deletion_subs.mapping_single_end(BWA_PATH, prefix, EBV_PATH, fastq_hg19_2, outsam_EBV_2, opts.thread)
fastq_EBV_1, fastq_EBV_2 = virus_deletion_subs.removing_mapped_intersection(outsam_EBV_1, outsam_EBV_2, fastq_hg19_1, fastq_hg19_2, postfix)
os.system("rm -f %s %s %s %s" % (outsam_EBV_1, outsam_EBV_2, outsam_hg19_1, outsam_hg19_2))
with open(prefix+"_Vdl_step02.sig", "w") as sig_file:
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sig_file.write(current_time)
# Step 3. Collapsing reads and remapping
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write("Step 3. Collapsing reads " + current_time +"\n")
if os.path.exists(prefix + "_Vdl_step03.sig"):
sys.stdout.write("Step 3 result files exist, pass. \n")
outsam_merge_reads = prefix + "_merge.sam"
outsam_single_read = prefix + "_single.sam"
if opts.merge_reads == "True" or "T":
merge_reads, single_reads_1, single_reads_2 = (prefix + ".collapsed", prefix + ".pair1.truncated", prefix + ".pair2.truncated")
outsam_merge_reads, outsam_single_read = (prefix + "_merge.sam", prefix + "_single.sam")
else:
merge_reads, single_reads_1, single_reads_2 = ("", fastq_EBV_1, fastq_EBV_2)
outsam_merge_reads, outsam_single_read = ("", prefix + "single.sam")
else:
virus_deletion_subs.fastq_tab(fastq_EBV_1)
virus_deletion_subs.fastq_tab(fastq_EBV_2)
try:
virus_deletion_subs.fastq_tab(fastq_hg19_1)
virus_deletion_subs.fastq_tab(fastq_hg19_2)
except:
pass
if opts.merge_reads == "True" or "T": # reads go through insert fragments (e.g. MISEQ for targeting sequencing)
merge_reads, single_reads_1, single_reads_2 = virus_deletion_subs.collapsing_reads(AdapterRemoval_PATH, prefix, fastq_EBV_1, fastq_EBV_2, opts.thread)
outsam_merge_reads = prefix + "_merge.sam"
outsam_single_read = prefix + "_single.sam"
virus_deletion_subs.hisat_mapping_single_end(HISAT_PATH, EBV_PATH, merge_reads, outsam_merge_reads, opts.thread)
virus_deletion_subs.hisat_mapping_paired_ends(HISAT_PATH, EBV_PATH, single_reads_1, single_reads_2, outsam_single_read, opts.thread)
else: # reads not go through insert fragments (e.g. HISEQ for WGS)
outsam_merge_reads, merge_reads = ("", "")
outsam_single_read = prefix + "single.sam"
single_reads_1,single_reads_2 = fastq_EBV_1, fastq_EBV_2
virus_deletion_subs.hisat_mapping_paired_ends(HISAT_PATH, EBV_PATH, fastq_EBV_1, fastq_EBV_2, outsam_single_read, opts.thread)
with open(prefix + "_Vdl_step03.sig", "w") as sig_file:
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sig_file.write(current_time)
# Step 4. Picking gap reads
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write("Step 4. Picking gap reads " + current_time +"\n")
merge_clipping_reads_fq = merge_reads + ".clipping.fq"
single_clipping_reads_fq_1 = prefix + ".clipping_read1.fq"
single_clipping_reads_fq_2 = prefix + ".clipping_read2.fq"
if os.path.exists(prefix + "_Vdl_step04.sig"):
sys.stdout.write("Step 4 result files exist, pass. \n")
else:
if opts.merge_reads == "True" or "T":
merge_clipping_reads = virus_deletion_subs.analyze_merged_deletion_reads(outsam_merge_reads)
virus_deletion_subs.delete_reads_ori(merge_reads, merge_clipping_reads_fq, merge_clipping_reads)
#del merge_clipping_reads
single_clipping_reads = virus_deletion_subs.analyze_single_deletion_reads(outsam_single_read)
virus_deletion_subs.delete_reads_ori(single_reads_1, single_clipping_reads_fq_1, single_clipping_reads)
virus_deletion_subs.delete_reads_ori(single_reads_2, single_clipping_reads_fq_2, single_clipping_reads)
#del single_clipping_reads
# os.system("rm -f %s %s " % (outsam_merge_reads, outsam_single_read))
else:
single_clipping_reads = virus_deletion_subs.analyze_single_deletion_reads(outsam_single_read)
virus_deletion_subs.delete_reads_ori(single_reads_1, single_clipping_reads_fq_1, single_clipping_reads)
virus_deletion_subs.delete_reads_ori(single_reads_2, single_clipping_reads_fq_2, single_clipping_reads)
#del single_clipping_reads
# os.system("rm -f %s" % (outsam_single_read))
# os.system("rm -f %s %s %s " % (merge_reads, single_reads_1, single_reads_2))
with open(prefix + "_Vdl_step04.sig", "w") as sig_file:
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sig_file.write(current_time)
# Step 5. Remapping gap reads
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write("Step 5. Remapping gap reads " + current_time +"\n")
all_pos_list = prefix + "_all_del_pos.txt"
if os.path.exists(prefix + "_Vdl_step05.sig"):
sys.stdout.write("Step 5 result files exist, pass. \n")
else:
outsam_clipping_merge_reads = prefix + ".merge_clipping.sam"
outsam_clipping_single_reads = prefix + ".single_clipping.sam"
library_max_size = 2000 # sequencing library insertion size
if opts.merge_reads == "True" or "T":
virus_deletion_subs.hisat_mapping_single_end(HISAT_PATH, EBV_NCBI, merge_clipping_reads_fq, outsam_clipping_merge_reads, opts.thread)
virus_deletion_subs.hisat_mapping_paired_ends(HISAT_PATH, EBV_NCBI, single_clipping_reads_fq_1, single_clipping_reads_fq_2, outsam_clipping_single_reads, opts.thread)
merge_pos_list = virus_deletion_subs.output_merged_deletion_reads(outsam_clipping_merge_reads)
single_pos_list = virus_deletion_subs.output_single_deletion_reads(outsam_clipping_single_reads, library_max_size)
os.system("cat %s %s > %s" % (single_pos_list, merge_pos_list, all_pos_list))
os.system("rm -f %s %s %s %s " % (outsam_clipping_merge_reads, outsam_clipping_single_reads, single_pos_list, merge_pos_list))
else:
virus_deletion_subs.hisat_mapping_paired_ends(HISAT_PATH, EBV_NCBI, single_clipping_reads_fq_1, single_clipping_reads_fq_2, outsam_clipping_single_reads, opts.thread)
single_pos_list = virus_deletion_subs.output_single_deletion_reads(outsam_clipping_single_reads, library_max_size)
os.system("mv %s %s" % (single_pos_list, all_pos_list))
os.system("rm -f %s " % (outsam_clipping_single_reads))
with open(prefix + "_Vdl_step05.sig", "w") as sig_file:
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sig_file.write(current_time)
# Step 6. Repeatmasker and GC filter
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write("Step 6. Repeatmasker and GC filter " + current_time +"\n")
if os.path.exists(prefix + "_Vdl_step06.sig"):
sys.stdout.write("Step 6 result files exist, pass. \n")
all_pos_list_filter = prefix + "_all_del_pos_filter.txt"
pos_dict = virus_deletion_subs.read_pos_list(all_pos_list_filter)
else:
# read position list
pos_dict = virus_deletion_subs.read_pos_list(all_pos_list)
# bedtools filtering repeatmasker region
bedtools_tmp_name = prefix + "_repeatmasker_tmp.bed"
pos_dict = virus_deletion_subs.remove_repeat_masker_bedtools(pos_dict, bedtools_tmp_name, bedtools_intersectBed_path, repeatmasker_annotation_file_path)
# bedtools filtering GC percentage
bedtools_tmp_name = prefix + "_GC_tmp.bed"
pos_dict = virus_deletion_subs.gc_adjust(pos_dict, bedtools_tmp_name, bedtools_path, EBV_NCBI)
# output result
all_pos_list_filter = prefix + "_all_del_pos_filter.txt"
with open(all_pos_list_filter, "w") as pos_list_filter:
pos_list_filter.write("read_ID\tread_start\tgap_start\tgap_end\tread_end\tdel_size\tother_info\n")
for bam_id, pos_list in pos_dict.iteritems():
pos_list_filter.write("\t".join(pos_list) + "\n")
with open(prefix + "_Vdl_step06.sig", "w") as sig_file:
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sig_file.write(current_time)
# Step 7. Group positions into windows and write positions with read counts
if opts.run_merge == "True":
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write("Step 7. Grouping positions into windows and writing positions with read counts " + current_time +"\n")
if os.path.exists(prefix + "_Vdl_step07.sig"):
sys.stdout.write("Step 7 result files exist, pass. \n")
else:
output_file_name = prefix + "_del_distribution.txt"
virus_deletion_subs.group_pos(pos_dict, output_file_name)
with open(prefix + "_Vdl_step07.sig", "w") as sig_file:
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
sig_file.write(current_time)
| [
"hanbw@users.noreply.github.com"
] | hanbw@users.noreply.github.com |
6e23a61c7d0442b27a1eda15329cfefdd1f45cd2 | d807a3baa054d00fb0136cc2b04187b04c8bf9f2 | /checkout/models.py | 06eb6730f32bebcaccfcea0cfbf99061ec16ace1 | [] | no_license | Code-Institute-Submissions/SuperGibbon-MS4 | c14a093cd3329a7cb1e696d8cbb028dfeaf44d50 | cb9143058895d3b1e2365df256dea7416f184d44 | refs/heads/main | 2023-07-09T12:31:33.139733 | 2021-08-09T13:02:13 | 2021-08-09T13:02:13 | 394,693,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | import uuid
from django.db import models
from django.conf import settings
from profiles.models import UserProfile
class Order(models.Model):
order_number = models.CharField(max_length=32, null=False, editable=False)
user_profile = models.ForeignKey(UserProfile, on_delete=models.SET_NULL,
null=True, blank=True,
related_name='orders')
full_name = models.CharField(max_length=50, null=False, blank=False)
email = models.EmailField(max_length=254, null=False, blank=False)
date = models.DateTimeField(auto_now_add=True)
order_total = models.DecimalField(max_digits=10, decimal_places=2,
null=False, default=0)
def _generate_order_number(self):
return uuid.uuid4().hex.upper()
def save(self, *args, **kwargs):
if not self.order_number:
self.order_number = self._generate_order_number()
super().save(*args, **kwargs)
def __str__(self):
return self.order_number
| [
"super_gibbon@hotmail.com"
] | super_gibbon@hotmail.com |
949ec44663b9181db5548c8f3a054238b153f16c | ba628db2302825adbb1151613248405e7dfba2b5 | /排序算法/插入排序.py | 1404ab2606e4f030b2654e1b01000cd5bf84b8a9 | [] | no_license | EricWangyz/Exercises | 5257840503e2f22d8c8fc8f26e0d1d77fcc627fd | 5647b418cc932cceb9e311fec016c089ca7d22d5 | refs/heads/master | 2022-04-21T02:31:46.768537 | 2020-04-20T17:10:20 | 2020-04-20T17:10:20 | 257,349,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/7 11:15
# @Author : Eric Wang
# @File : 插入排序.py
def insertionSort(nums):
'''
分为有序列表和无序列表,将无序列表中小于有序列表最后一个数的数插入到有序列表中合适的位置
:param nums:
:return:
'''
length = len(nums)
for i in range(1, length):
j = i -1
if nums[i] < nums[j]:
tmp = nums[i]
nums[i] = nums[j]
j = j - 1
while j >= 0 and nums[j] > tmp:
nums[j+1] = nums[j]
j = j - 1
nums[j+1] = tmp
return nums
# def insert(nums):
# length = len(nums)
# for i in range(1,length):
# j = i - 1
#
# if nums[i] < nums[j]:
# tmp = nums[i]
# nums[i] = nums[j]
#
# j = j - 1
# while j > 0 and nums[j] > tmp:
# nums[j+1] = nums[j]
# j = j -1
# nums[j+1] = tmp
def insert(nums):
length = len(nums)
for i in range(1, length):
j = j - 1
if nums[i] < nums[j]:
tmp = nums[i]
nums[i] = nums[j]
j = j - 1
while j > 0 and nums[j] > tmp:
nums[j+1] = nums[j]
j = j - 1
nums[j+1] = tmp
return nums
| [
"eric_wangyz@foxmail.com"
] | eric_wangyz@foxmail.com |
661db6856baca6b9f360cd409aee8237450c09dc | 9e64e562f73cad9f74da1cd643ba74e920915ec9 | /player.py | 3c5db729e2c74accbe3cc2a37af1bf4fba7e1c5a | [] | no_license | tommyli3318/DiscordBattleBot | 53c3a9ef648bf92f5d621206ae240f8a3d7edba2 | e6f8986ab4220783bc728087c134808276fe31f0 | refs/heads/master | 2023-05-19T16:25:07.165245 | 2021-06-09T00:24:26 | 2021-06-09T00:24:26 | 367,477,770 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | class Player:
def __init__(self, name, base_damage=10):
self.name = name
self.health = 100
self.base_damage = base_damage
self.armor = 0
self.lifesteal = 0
self.crit = .05 # crit chance
self.spell = None
self.items = set()
| [
"tommyli3318@gmail.com"
] | tommyli3318@gmail.com |
d66c46d60deb88604a47bf22874b86741218b0cb | 6bbe7809113378c56fc18637d8983ee82dee5ad7 | /main.py | ff0f557d192f8060475ab1cdbe63cf2512c9ac49 | [] | no_license | jmattern/FastSLAM-Humanoid-Robot | a25483ecec55918870c7fdedfe1a09c20f97fe2c | d92dee8650ae7c842ef4e7a601cd1604c44db102 | refs/heads/master | 2020-03-21T01:48:57.004377 | 2018-06-22T01:01:12 | 2018-06-22T01:01:12 | 137,964,239 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,442 | py | import matplotlib.pyplot as plt
import numpy as np
import os
import argparse
import sys
from robot_data_loader import *
from humanoidrobot import *
parser = argparse.ArgumentParser(description='FastSLAM on Humanoid Robot')
parser.add_argument('-jp', '--joint-path', default='', type=str,
help='path to joint data')
parser.add_argument('-lp', '--lidar-path', default='', type=str,
help='path to lidar data')
parser.add_argument('-uf', '--update-frequency', default='', type=int,
help='Update every n data points')
def main(arg_string = ""):
global args
# if arg_string is empty then running from console
if not arg_string:
args = parser.parse_args()
else:
arg_list = arg_string.split()
args = parser.parse_args(arg_list)
# create dataloader object and robot object
dataloader = RobotDataLoader(args.joint_path, args.lidar_path)
robot = HumanoidRobot(sensor_head_dist = 0.15, head_body_dist = 0.33,
floor_body_dist = 0.93, N_eff_threshold = 5)
# customize the fastSLAM parameters
robot.fastslam = FastSLAM(x_range = (-30,30), y_range = (-30,30), map_res = 0.06,
dtheta_res = 1, pos_sweep_ind = np.arange(-4,5,1),
lidar_angles = np.arange(-135,135.25,0.25), lidar_range = (0.1,30),
lidar_sweep_ind = np.arange(-4,5,1),
num_particles = 50, x_sigma = 0.001, y_sigma = 0.001, theta_sigma = 0.015)
# Run FastSLAM
x_path = [] #path output
y_path = [] #path output
data_length = len(dataloader)
for i in range(data_length):
sys.stdout.flush()
sys.stdout.write('\rFastSLAM Progress: {}/{}'.format(i+1, data_length))
(l_scan, l_pose_x, l_pose_y, l_pose_yaw, h_pitch, h_yaw, ts) = dataloader.get_item(i)
if i == 0:
robot.setup_map(l_scan, l_pose_x, l_pose_y, l_pose_yaw, h_pitch, h_yaw, ts)
T_w2b_best = robot.predict(l_scan, l_pose_x, l_pose_y, l_pose_yaw, h_pitch, h_yaw, ts)
x_path.append(T_w2b_best[0,3])
y_path.append(T_w2b_best[1,3])
elif (i+1)%args.update_frequency == 0:
T_w2b_best = robot.predict_and_update(l_scan, l_pose_x, l_pose_y, l_pose_yaw, h_pitch, h_yaw, ts)
x_path.append(T_w2b_best[0,3])
y_path.append(T_w2b_best[1,3])
else:
T_w2b_best = robot.predict(l_scan, l_pose_x, l_pose_y, l_pose_yaw, h_pitch, h_yaw, ts)
print("")
print("FastSLAM Completed")
return robot, x_path, y_path
if __name__ == "__main__":
# Run FastSLAM on Humanoid robot
robot, x_path, y_path = main()
# Get pixel locations for path
x_path_is = np.ceil((np.array(x_path) - robot.fastslam.xmin) / robot.fastslam.map_res ).astype(np.int16)-1
y_path_is = np.ceil((np.array(y_path) - robot.fastslam.ymin) / robot.fastslam.map_res ).astype(np.int16)-1
# Get map output
slam_map = robot.get_map()
# Create threshold map
map_out = np.zeros(slam_map.shape).astype(np.uint8)
thresh = slam_map < 0.
map_out[thresh] = 0
thresh = slam_map > 0.
map_out[thresh] = 255
thresh = slam_map == 0.
map_out[thresh] = 125
# Plot outputs
plt.figure(figsize = (10,10))
plt.imshow(map_out,cmap="gray")
plt.plot(y_path_is,x_path_is,linewidth=2, color='blue')
plt.show() | [
"wayclef.mattern@gmail.com"
] | wayclef.mattern@gmail.com |
8bf604e5545abce9b382470def829932c7d40127 | 7e7780dc97dd39316dfeac8d8c95713ad35258a6 | /heat/optim/tests/__init__.py | 7f22d97477344da5575a454e6865b721475ef603 | [
"MIT"
] | permissive | Cdebus/heat | 9bddd71a24c620e72e5ad9af1c66f8b3db7c23ff | 38ef4a95957956769be1a00de0d9b219fc5dc8c7 | refs/heads/master | 2021-06-22T15:16:33.348969 | 2021-02-26T14:05:07 | 2021-02-26T14:05:07 | 169,711,012 | 0 | 0 | null | 2019-02-08T09:26:38 | 2019-02-08T09:26:38 | null | UTF-8 | Python | false | false | 26 | py | from .test_optim import *
| [
"daniel.coquelin@gmail.com"
] | daniel.coquelin@gmail.com |
027aac9247c4f8604733246af3f42d7bf613df87 | b543aa765ee0a7df07a6d2c2d1c5451007f4fd4d | /sub.py | 4613b408bc4873949891d86307e05888b2134e5c | [] | no_license | sztosz/delme | 948fce7ba52d0afdc38616bf4bb83dd5054806f3 | 69a15d96604f2934596041add74242cd77787ef1 | refs/heads/master | 2020-07-14T10:57:52.084537 | 2016-09-11T07:02:22 | 2016-09-11T07:02:22 | 67,915,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | import something.storage as storage
def run():
print(storage.retrieve("The"))
| [
"sztosz@gmail.com"
] | sztosz@gmail.com |
1f638f6038f33df2aa4f2e79d8b32c4280c955fd | 3db8bc4c7297895c687be374a206b63d5d329e5e | /Python3/019_Remove_Nth_Node_From_End_of_List.py | 0d1d00b05fb6965d0c5f5762555d56236207eb67 | [
"MIT"
] | permissive | Jian-jobs/Leetcode-Python3 | dd06d3238b69ae1419754810dec68705d3344a41 | f2d3bb6ecb7d5d0bca4deaed26162fbe0813a73e | refs/heads/master | 2020-05-15T00:59:28.160898 | 2018-11-16T04:44:32 | 2018-11-16T04:44:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #!usr/bin/env python3
# -*- coding:utf-8 -*-
'''
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Define this to check if it works well
def myPrint(self):
print(self.val)
if self.next:
self.next.myPrint()
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
if not head:
return head
point = ListNode(-1)
point.next = head
prev = point
cur = point
while prev and n >=0:
prev = prev.next
n -= 1
while prev:
prev = prev.next
cur = cur.next
cur.next = cur.next.next
return point.next
if __name__ == "__main__":
n5 = ListNode(5)
n4 = ListNode(4)
n3 = ListNode(3)
n2 = ListNode(2)
n1 = ListNode(1)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
result = Solution().removeNthFromEnd(n1, 5)
result.myPrint()
| [
"gzr2008@vip.qq.com"
] | gzr2008@vip.qq.com |
eed66ce765aa9eae0228a51ffc68c16ad9405ae4 | 1816378da612c7db376934b033e4fd64951338b6 | /gui/jails/migrations/0007_add_model_JailTemplate.py | dc21d06fbe871543b3648239738a169c72011b35 | [] | no_license | quater/freenas-9.2-xen | 46517a7a23546764347d3c91108c70a8bd648ec6 | 96e580055fa97575f0a0cb23a72495860467bcfb | refs/heads/master | 2021-01-16T22:21:38.781962 | 2014-02-07T05:59:13 | 2014-02-07T05:59:13 | 16,609,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,422 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from freenasUI.jails.utils import get_jails_index
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'JailTemplate'
db.create_table(u'jails_jailtemplate', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('jt_name', self.gf('django.db.models.fields.CharField')(max_length=120)),
('jt_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'jails', ['JailTemplate'])
#
# The standard jail types
#
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('pluginjail', '%s/freenas-pluginjail.tgz')" % get_jails_index())
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('portjail', '%s/freenas-portjail.tgz')" % get_jails_index())
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('standard', '%s/freenas-standard.tgz')" % get_jails_index())
#
# And... some Linux jail templates
#
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('debian-7.1.0', '%s/linux-debian-7.1.0.tgz')" % get_jails_index())
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('gentoo-20130820', '%s/linux-gentoo-20130820.tgz')" % get_jails_index())
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('ubuntu-13.04', '%s/linux-ubuntu-13.04.tgz')" % get_jails_index())
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('centos-6.4', '%s/linux-centos-6.4.tgz')" % get_jails_index())
db.execute("insert into jails_jailtemplate (jt_name, jt_url) "
"values ('suse-12.3', '%s/linux-suse-12.3.tgz')" % get_jails_index())
def backwards(self, orm):
# Deleting model 'JailTemplate'
db.delete_table(u'jails_jailtemplate')
models = {
u'jails.jails': {
'Meta': {'object_name': 'Jails'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jail_alias_bridge_ipv4': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_alias_bridge_ipv6': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_alias_ipv4': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_alias_ipv6': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_autostart': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_bridge_ipv4': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_bridge_ipv6': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_defaultrouter_ipv4': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_defaultrouter_ipv6': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_host': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'jail_ipv4': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_ipv6': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_mac': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_nat': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'jail_status': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'jail_type': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'jail_vnet': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'})
},
u'jails.jailsconfiguration': {
'Meta': {'object_name': 'JailsConfiguration'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jc_collectionurl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'jc_ipv4_network': ('freenasUI.freeadmin.models.Network4Field', [], {'max_length': '18', 'blank': 'True'}),
'jc_ipv4_network_end': ('freenasUI.freeadmin.models.Network4Field', [], {'max_length': '18', 'blank': 'True'}),
'jc_ipv4_network_start': ('freenasUI.freeadmin.models.Network4Field', [], {'max_length': '18', 'blank': 'True'}),
'jc_ipv6_network': ('freenasUI.freeadmin.models.Network6Field', [], {'max_length': '43', 'blank': 'True'}),
'jc_ipv6_network_end': ('freenasUI.freeadmin.models.Network6Field', [], {'max_length': '43', 'blank': 'True'}),
'jc_ipv6_network_start': ('freenasUI.freeadmin.models.Network6Field', [], {'max_length': '43', 'blank': 'True'}),
'jc_path': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'jails.jailtemplate': {
'Meta': {'object_name': 'JailTemplate'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jt_name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'jt_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'jails.nullmountpoint': {
'Meta': {'object_name': 'NullMountPoint'},
'destination': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jail': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '300'})
}
}
complete_apps = ['jails']
| [
"john@ixsystems.com"
] | john@ixsystems.com |
56d822b095f416bf950641e5022a67600623868e | df55f2149144920b0cbca1ea0913ccc1a89beda2 | /lib/mod/crop_and_livestock_production.py | a2716de4897bc4496020e88940ee8c0ee557fdd2 | [] | no_license | thomasball355/C-LLAMA1.0 | 5ae2e478410a7ebee1b3c2831ec1d68feab6568b | 1d02709bdeab72b28dc2c42cd47531694536f3fd | refs/heads/main | 2023-07-11T17:55:26.104688 | 2021-08-13T11:33:27 | 2021-08-13T11:33:27 | 329,949,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,208 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import lib.funcs.perc_contributions_WRAP
import lib.funcs.dat_io as io
import lib.funcs.foodsupply_trajectory
import lib.dat.colours
import lib.dat.food_commodity_seperation
def main(area_index):
vegetal_commodity_production_ratios = io.load("lib\\dat\\production", "vegetal_commodity_production_ratios")
animal_commodity_production_ratios = io.load("lib\\dat\\production", "animal_commodity_production_ratios")
vegetal_production_required = pd.DataFrame( index = vegetal_commodity_production_ratios.columns,
columns = np.arange(2013, 2051, 1), data = 0)
animal_production_required = pd.DataFrame( index = animal_commodity_production_ratios.columns,
columns = np.arange(2013, 2051, 1), data = 0)
for continent in area_index.Continent.unique():
for region in area_index[area_index.Continent == continent].Region.unique():
for area in area_index[area_index.Region == region].index.to_list():
area_production_vegetal = pd.DataFrame( index = vegetal_production_required.index,
columns = vegetal_production_required.columns)
production_energy_for_human = io.load( f"data\\{continent}\\{region}\\food_supply",
f"production_energy_for_human_{area}")
PEfH_V = production_energy_for_human.xs("Vegetal Products",
level = "Group").xs("MJ/year",
level = "Unit")
PEfH_V.index.name = None
PEfH_A = production_energy_for_human.xs("Animal Products",
level = "Group").xs("MJ/year",
level = "Unit")
PEfH_A.index.name = None
vegetal_production_required = vegetal_production_required.add(PEfH_V.values,
fill_value = 0)
animal_production_required = animal_production_required.add(PEfH_A.values,
fill_value = 0)
crop_properties = pd.read_excel("lib\\dat\\vegetal_product_properties.xlsx", index_col = 0)
animal_properties = pd.read_excel("lib\\dat\\animal_product_properties.xlsx", index_col = 0)
for continent in area_index.Continent.unique():
for region in area_index[area_index.Continent == continent].Region.unique():
for area in area_index[area_index.Region == region].index.to_list():
area_multiplier_vegetal = vegetal_commodity_production_ratios.loc[area]
area_multiplier_animal = animal_commodity_production_ratios.loc[area]
area_production_vegetal = (vegetal_production_required.T \
* area_multiplier_vegetal).T #energy MJ/year
area_production_animal = (animal_production_required.T \
* area_multiplier_animal).T #energy MJ/year
crop_properties = crop_properties[np.logical_not(crop_properties.index.isin(["Sugar cane", "Sugar Crops"]))]
area_production_vegetal_mass = (area_production_vegetal.T / crop_properties["energy_density"]).T # kilograms
area_production_animal_mass = (area_production_animal.T / animal_properties["energy_density"]).T #
path = f"data\\{continent}\\{region}\\production"
io.save(path, f"production_energy_for_human_vegetal_{area}", area_production_vegetal)
io.save(path, f"production_mass_vegetal_for_human_{area}", area_production_vegetal_mass)
io.save(path, f"production_energy_for_human_animal_{area}", area_production_animal)
io.save(path, f"production_mass_animal_for_human_{area}", area_production_animal_mass)
| [
"thomasball355@googlemail.com"
] | thomasball355@googlemail.com |
1e2bdc3a3def52d296cd26d604daa8f574a89953 | 08fec2f65d42feeff81b980670f6e0849aead18f | /tools/norm_flat_uyghur_dict.py | 8c3bf71f67039eb73948ae9a4628f646e95b7b8a | [] | no_license | sjmielke/uyghur-dictprocessing | a868e71caffe3ed508dd89ce210906c0ccca5711 | e54091027f7fa3f1fc956993fe4e054eb59065dd | refs/heads/master | 2020-07-05T04:33:54.195562 | 2016-11-18T12:30:42 | 2016-11-18T12:30:42 | 74,128,134 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 12,229 | py | #!/usr/bin/env python3
import argparse
import sys
import codecs
import itertools
import subprocess
import re
import unicodedata
from collections import defaultdict
import spacy
en_nlp = spacy.load('en')
def is_between(x1, c, x2):
return ord(x1) <= ord(c) <= ord(x2)
def get_strippable_char_set(s):
def is_messed_up_line(s):
s = unicodedata.normalize('NFKC', s)
# Something went very wrong.
for weirdchar in list("()ª³µ¹º¼ÀÆÉÊËÎÏÐÑÒÔ×àô،") + ['§', '+', '`']:
if weirdchar in s:
return True
# no cyrillic/chinese stuff
for c in s:
if is_between('А', c, 'я') or is_between('一', c, chr(99999)):
return True
return False
s = unicodedata.normalize('NFKD', "".join(filter(lambda x: not is_messed_up_line(x), s.splitlines())))
strippables = []
for c in sorted(list(set(s))):
# A-Za-z0-9 is fine
if is_between('a', c, 'z') or is_between('A', c, 'Z') or is_between('0', c, '9'):
continue
# Arab chars are fine
if is_between('', c, 'ۿ') or c in list("¯"):
continue
# Some punctuation is to be expected
if c in list(" !?.,;-()[]{}|/=:_@\"'~&%\n\t«»—–”“’<>"):
continue
# Consciously stripping:
# - # (that will leave src sep marks |, -<-<, and »»»)
# - ¡<soft hyphen>
# - *
# - all accent marks!
# - left-to-right marker
try:
name = unicodedata.name(c)
except:
name = '?'
print(c + '\t' + str(ord(c)) + '\t' + name)
strippables.append(c)
return strippables
def apply_until_convergence(f, val):
while True:
next_val = f(val)
if val == next_val:
break
else:
val = next_val
return val
def strip_parens(s):
s = apply_until_convergence(lambda v: re.sub(r'\[[^\[]*?\]', '', re.sub(r'\([^(]*?\)', '', v)), s)
return s
def no_of_upper_chars(s):
return len(list(filter(lambda c: c.isupper(), s)))
def pick_uppercased(args, l):
if not args.removelowercased:
return l
results = []
for entry1 in l:
best_entry = entry1
for entry2 in l:
if entry1[1].lower() == entry2[1].lower():
if no_of_upper_chars(entry1[1]) < no_of_upper_chars(entry2[1]):
best_entry = entry2
elif no_of_upper_chars(entry1[1]) == no_of_upper_chars(entry2[1]) and entry1[1] != entry2[1]:
print("Error ", entry1, " == ", entry2, file = sys.stderr)
results.append(best_entry)
return results
def cleanabbrevs(s):
return re.sub(r'([^\w]|^)th\. ' , "\\1the ",\
re.sub(r'([^\w]|^)esp\. ', "\\1especially ",\
re.sub(r'([^\w]|^)sb\.' , "\\1somebody",\
re.sub(r'([^\w]|^)smb\.' , "\\1somebody",\
re.sub(r'([^\w]|^)sth\.' , "\\1something",\
re.sub(r'([^\w]|^)smth\.', "\\1something",\
s))))))
def spacysplit(in_trg):
results = []
trg = in_trg.strip()
if trg == '':
return trg
# Spacy really chokes on non-ascii chars, which is bad in "see XYZBLA BLUBB"
if len(trg) > 3 and trg[0:4] == 'see ' and not len(trg) == len(trg.encode()):
real_sentences = [trg]
else:
# Recursive split for sentences, spacy isn't idempotent...
def gimme_all_sents(s):
s = s.strip()
# Or don't, if thats better
sents = [str(se) for se in list(en_nlp(s).sents)]
# Add a dot to make sure the last thing will be it's own sentence, if that's better
sents_dot = [str(se) for se in list(en_nlp(s + '.').sents)]
if len(sents_dot) > len(sents):
sents = sents_dot
sents[-1] = sents[-1][:-1]
if sents[-1] == '':
sents = sents[:-1]
# Or remove last punctuation, if it's possible and helps!
if len(s) > 1 and s[-1] in ['.', '!', '?']:
removed_dot = s[-1]
sents_antidot = [str(se) for se in list(en_nlp(s[:-1]).sents)]
if len(sents_antidot) > len(sents):
sents = sents_antidot
sents[-1] = sents[-1] + removed_dot
# Stop recursion
if len(sents) == 1:
return [str(s)]
# Recurse at will!
results = []
for sent in sents:
results += gimme_all_sents(sent)
#print(str(s), ' ~> ', results)
return results
# Sadly, spacy oversplits at the dash
real_sentences = []
for spacy_sentence in gimme_all_sents(trg):
if real_sentences != [] and real_sentences[-1][-1] in ['–', '\'']:
real_sentences[-1] = real_sentences[-1] + ' ' + str(spacy_sentence)
else:
real_sentences.append(str(spacy_sentence))
# Now we have proper sentences
for sentence in real_sentences:
# Remove "..." and "word." and trim
clipped_sent = re.sub(r'\.\.+', '', \
re.sub(r'([^\.A-Z])\.$','\\1', \
str(sentence))).strip()
# If theres anything useful left, okay!
if clipped_sent.replace('.','') != "":
results.append(clipped_sent)
return results
def main():
parser = argparse.ArgumentParser(description="Given LRLP lexicon flat representation attempt to normalize it to short phrase form",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--infile", "-i", nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="input lexicon file")
parser.add_argument("--outfile", "-o", nargs='?', type=argparse.FileType('w'), default=sys.stdout, help="output instruction file")
parser.add_argument("--explainfile", "-e", nargs='?', type=argparse.FileType('w'), default=None, help="explanation output file")
parser.add_argument("--nosplit", "-n", action='store_true', default=False, help="don't split target on commas/semicolons/or/slash")
parser.add_argument("--targetlimit", "-l", type=int, default=4, help="maximum length of target entry after splitting")
parser.add_argument("--earlytargetlimit", "-L", type=int, default=20, help="maximum length of target entry (number of words) before splitting")
parser.add_argument("--singletons", "-s", action='store_true', default=False, help="only split src/trg pairs")
parser.add_argument("--removelowercased", action='store_true', default=False, help="remove lowercased variants of uppercased translations")
parser.add_argument("--nospacysplit", action='store_true', default=False, help="don't split targets using spacy.io parser")
try:
args = parser.parse_args()
except IOError as msg:
parser.error(str(msg))
outfile = args.outfile
stderr = sys.stderr
infile = args.infile.read()
strippables = get_strippable_char_set(infile)
originaldict = defaultdict(list)
cleaneddict = defaultdict(list)
resultdict = defaultdict(list)
bad = 0
toomanywords = 0
wrote = 0
for line in infile.splitlines():
# Do global cleanup
line = "".join(filter(lambda c: c not in strippables, unicodedata.normalize('NFKD', line.replace('ı', 'i'))))
try:
srcs, origin, trgs = line.strip().split("\t")
originaldict[srcs].append((origin, trgs))
except:
bad += 1
continue
if len(trgs.split()) > args.earlytargetlimit:
toomanywords += 1
continue
# clean up source side
srcs = strip_parens(srcs)
# normalize whitespace
srcs = " ".join(srcs.split())
# now split
srcs = [srcs] if args.nosplit else re.split(r'[;,/،]|-<-<|\||»+| or |. [0-9]+', srcs)
# clean up target side
trgs = strip_parens(trgs)
trgs = re.sub(r'e\.g\..*', '', trgs) # e.g. comes before garbage
trgs = re.sub(r'«[A-Z]+»', '', trgs) # strip categories like «BOT»
trgs = trgs.replace("«MEC]", "").replace("«TEX]", "").replace("«СINE»", "") # a few unclean ones
# normalize whitespace
trgs = " ".join(trgs.split())
# clean sth. -> something
trgs = cleanabbrevs(trgs)
# delete full stop at end
#trgs = trgs[0:-1] if len(trgs) > 0 and trgs[-1] == '.' else trgs
# split on commas, semicolons, "or" and sense disambiguations/ends on target side
trgs = [trgs] if args.nosplit else re.split(r'[;,/،]|-<-<|\||»+| or |. [0-9]+', trgs)
for src in srcs:
src = src.strip()
for coarse_trg in trgs:
# filter unsure entries
if "??" in coarse_trg or "\"" in coarse_trg:
continue
# Otherwise do spacy.io's fine splitting
fine_targets = spacysplit(coarse_trg.strip()) if not args.nospacysplit else [coarse_trg.strip()]
for trg in fine_targets:
# Only strip actual infinitives, not prepositions!
if len(trg) > 3 and trg[3].islower():
trg = re.sub(r'^to ', '', trg)
trg = re.sub(r'^be ', '', trg)
trg = re.sub(r'^NO_GLOSS$', '', trg)
trg = re.sub(r'^dial> ', '', trg)
trg = trg.strip()
# cleaned out all junk?
if len(trg) == 0 or len(src) == 0:
continue
# nothing too long
if len(trg.split()) > args.targetlimit:
toomanywords += 1
continue
# Now NFKC again
src = unicodedata.normalize('NFKC', src)
origin = unicodedata.normalize('NFKC', origin)
trg = unicodedata.normalize('NFKC', trg)
cleaneddict[src].append((origin, trg))
def is_arab_char(c):
return ord(c) >= 1536 and ord(c) <= 1791
def is_weird_char(c):
return is_arab_char(c) or not c.isalpha()
# Resolve all mentions!
def trans_through(word: str, path: [str] = []) -> [(str, str)]:
# Stop recursion
if word in path:
return []
# Check for mentions
res = []
for (origin, t) in cleaneddict[word]:
next_word = re.sub('^see|^form of|^[a-z]+ form of', '', t)
if next_word != t: # we did find a match
next_word = "".join(itertools.takewhile(is_weird_char, next_word)).strip()
if next_word != "":
# We found a match so let's resolve that instead of appending
res += trans_through(next_word, path + [word])
continue
res.append((origin, t))
return res
for src in list(cleaneddict.keys()):
uppercaseds = pick_uppercased(args, set(trans_through(src)))
translations = sorted(list(set(uppercaseds)))
# Singletons or "normal" entries?
if not args.singletons:
resultdict[src] = translations
else:
allsrcs = src.split()
if len(allsrcs) > 1:
for (origin, trg) in translations:
alltrgs = trg.split()
if len(allsrcs) == len(alltrgs):
for (singleton_src, singleton_trg) in zip(allsrcs, alltrgs):
if all(map(is_arab_char, singleton_src)) and (origin, singleton_trg) not in cleaneddict[singleton_src]: # 25255
resultdict[singleton_src].append((origin, singleton_trg))
for src in sorted(list(resultdict.keys())):
for (origin, trg) in sorted(resultdict[src]):
outfile.write("%s\t%s\t%s\n" % (src, origin, trg))
wrote += 1
if args.explainfile != None:
# Now print what we did!
all_sources = sorted(list(set(list(resultdict.keys()) + list(originaldict.keys()))))
# First romanize all sources.
#romanized, _ = subprocess.Popen("/home/sjm/documents/ISI/uroman-v0.5/bin/uroman.pl", stdin=subprocess.PIPE, stdout=subprocess.PIPE).communicate("\n".join(all_sources).encode('utf-8'))
#romanized_sources = romanized.decode('utf-8').splitlines().reversed()
for src in all_sources:
print("»{}«".format(src), file = args.explainfile)
print("(original)", file = args.explainfile)
for (origin, t) in sorted(originaldict[src]):
print(" " + t, file = args.explainfile)
print("(result)", file = args.explainfile)
for (origin, t) in sorted(resultdict[src]):
print(" " + t, file = args.explainfile)
print("", file = args.explainfile)
#assert romanized_sources == []
assert len(sorted(list(resultdict.keys()))) == len(set(sorted(list(resultdict.keys()))))
for src in sorted(list(resultdict.keys())):
assert len(resultdict[src]) == len(set(resultdict[src]))
stderr.write("%d bad %d too many target words %d wrote\n" % (bad, toomanywords, wrote))
if __name__ == '__main__':
main()
| [
"sjm@sjmielke.com"
] | sjm@sjmielke.com |
e00c9f5ecffc4ec7a8b30e5e05d2c50b3a35acf7 | 34073967e0b1706b1b9ba9ea7b903b1b56d211cc | /application.py | 2246097bdf56e00419bf53f43e57f9c3abdc00c3 | [] | no_license | anandsainath/visual-analytics-for-web-and-documents | 5d1dfc694952232c5d85f37721113aac49a7a446 | 3d0cabfb897150dee01d66c1c0ff82a73f8902af | refs/heads/master | 2021-01-21T17:06:33.135740 | 2015-07-22T12:34:57 | 2015-07-22T12:34:57 | 23,821,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from flask import Flask, render_template, url_for
from mongoengine import *
app = Flask(__name__)
@app.route('/')
def index():
return render_template('home.html')
@app.route('/testdb')
def test():
connect('jigsaw')
return "Connection Success"
if __name__ == '__main__':
app.run(debug=True) | [
"anand.sainath@gmail.com"
] | anand.sainath@gmail.com |
1e585984517286d083e98c957b055e80ae1e2398 | dd6bd3b39fead49cd2e48baf0fc28bd7792fc13a | /tests/test_service_handler.py | 868a33f559f2c47175fa1b32900998eda48c2157 | [] | no_license | skensell201/devops_test | 5eb78b9970a82b5e97a2d95020db69125db06e48 | 5cf9abda1c464df257050169818ac585d1b5db3e | refs/heads/master | 2022-07-02T05:04:23.578234 | 2020-03-11T19:09:12 | 2020-03-11T19:09:12 | 263,444,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from aiohttp.test_utils import TestClient
async def test(client: TestClient):
async with client.get("/api/service") as resp:
assert resp.status == 200
data = await resp.json()
assert set(data) == {"service", "version", "build"}
| [
"drwebfrontend@gmail.com"
] | drwebfrontend@gmail.com |
498f5c40fc4c91683d8e499bd23fa154777797d6 | d421c531e17b8292a9a3aec965dfa335301d3bdc | /simple_env.py | 72af90c60119c170f0e10366673d78afadc42b98 | [] | no_license | matthew-haines/financial-drl | 57ed9dc73ae57e680790896ac4252c0b2d3f3828 | 0a2005c32f0ae5020f5a2f396abcbb0871f6498c | refs/heads/master | 2020-05-02T01:27:27.259570 | 2019-03-26T00:42:01 | 2019-03-26T00:42:01 | 177,686,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Environment():
def __init__(self, data, starting_capital=0.5, starting_step=10, initial_position=1):
"""starting_capital in BTC"""
self.data = data # Features by timesteps
self.initial_position = initial_position
self.position = self.initial_position
self.starting_capital = starting_capital
self.capital = self.starting_capital
self.current_step = starting_step
self.state = torch.Tensor(self.data[self.current_step])
self.fee = 1 - 0.000750 # Percentage
self.spread = 0.5 # USD
self.done = False
def reset(self):
self.current_step = self.initial_position
self.capital = self.starting_capital
self.state = torch.Tensor(self.data[self.current_step])
def get_value(self):
return (self.position ** 2) ** (1/2.0) * self.data[self.current_step] # At close
def step(self, action):
# Action space of (Buy, Sell), whichever one is highest gets picked
if self.current_step == self.data.shape[1]:
self.done = True
new_state = self.data[self.current_step]
self.state = new_state
return new_state, "Done"
if self.get_value() <= 0:
self.done = True
new_state = self.data[self.current_step]
self.state = new_state
return new_state, "Bankrupted"
chosen_action = torch.argmax(action)
if chosen_action == 0:
# Buy Order
old_capital = self.capital
self.capital = self.get_value()
reward = self.capital - old_capital
self.position = self.fee * (self.capital - self.spread / self.data[self.current_step])
new_state = self.data[self.current_step]
self.state = new_state
return new_state, "Bought"
if chosen_action == 1:
# Sell Order
old_capital = self.capital
self.capital = self.get_value()
reward = self.capital - old_capital
self.position = -(self.fee * (self.capital - self.spread / self.data[self.current_step]))
new_state = self.data[self.current_step]
self.state = new_state
return new_state, "Sold"
self.current_step += 1
class policy(nn.Module):
def __init__(self, input_dim):
super(policy, self).__init__()
self.l1 = nn.Linear(input, 128)
self.l2 = nn.Linear(128, 128)
self.output = nn.Linear(128, 2)
def forward(self, x)
x = F.elu(self.l1(x))
x = F.elu(self.l2(x))
return F.sigmoid(self.output(x)) | [
"m.haines1000@gmail.com"
] | m.haines1000@gmail.com |
e2cb2c8e89a8b49e48345e5c5ac0b7f4d4038e0c | d913bac9fa42473aa8cee68c8ad8b4eba5484b89 | /Scripts/features/VoidTender_POS.py | 6f88141ad64358009955cec6efcfc5ed742ca805 | [] | no_license | priyatam0509/Automation-Testing | 07e7c18b4522976f0ade2b72bd46cffd55c5634e | d24805456e5a0126c036c1688a5d112bdcf4467a | refs/heads/main | 2023-02-26T19:07:41.761905 | 2021-01-30T10:13:34 | 2021-01-30T10:13:34 | 334,376,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,424 | py | """
File name: VoidTender_POS.py
Tags:
Description:
Author: Gene Todd
Date created: 2020-04-16 09:40:28
Date last modified:
Python Version: 3.7
"""
import logging, time
from app import Navi, mws, pos, system
from app.framework.tc_helpers import setup, test, teardown, tc_fail
class VoidTender_POS():
"""
Description: Test class that provides an interface for testing.
"""
def __init__(self):
self.log = logging.getLogger()
@setup
def setup(self):
"""
Performs any initialization that is not default.
"""
#if not system.restore_snapshot():
# self.log.debug("No snapshot to restore, if this is not expected please contact automation team")
pos.connect()
pos.sign_on()
@test
def test_voidCash(self):
"""
Basic void tender case using cash. Reason codes enabled.
"""
self.prep_trans()
self.log.info("Adding tender")
pos.enter_keypad(100, after="Enter")
# Assume the tender has already been selected when it was added
self.log.info("Voiding cash tender")
pos.click_tender_key("Void")
# Confirms the reason codes appeared
pos.select_list_item("Cashier Error")
pos.click("Enter")
# Confirm the tender is gone
jrnl = pos.read_transaction_journal()
for line in jrnl:
if "Cash" in line:
tc_fail("Cash tender found in transaction after being voided")
self.log.info("Cash confirmed no longer in transaction journal")
# Pay out the transaction for the next test
self.log.info("Paying out transaction")
pos.click_tender_key("Exact Change")
pos.is_element_present(pos.controls['function keys']['tools'], timeout=5)
@test
def test_noReasonCodes(self):
"""
Tests our ability to void tenders without reason codes enabled
"""
# Disable reason codes
pos.close()
self.log.info("Removing void tender reason code")
Navi.navigate_to('Register Group Maintenance')
mws.click_toolbar('Change')
mws.select_tab('Reason Codes')
mws.set_value('Available Functions', 'Void Tender')
mws.set_value('Require Reason Code', False)
mws.click_toolbar('Save')
pos.connect()
tries = 0
while mws.get_top_bar_text() and tries < 10:
self.log.info("Waiting for reload options...")
tries = tries + 1
time.sleep(.5)
self.prep_trans()
self.log.info("Adding tender")
pos.enter_keypad(100, after="Enter")
# Assume the tender has already been selected when it was added
self.log.info("Voiding cash tender")
pos.click_tender_key("Void")
# Wait for void to process
pos.is_element_present(pos.controls['pay']['exact_amount'], timeout=5)
# Confirm the tender is gone
jrnl = pos.read_transaction_journal()
for line in jrnl:
if "Cash" in line:
tc_fail("Cash tender found in transaction after being voided")
self.log.info("Cash confirmed no longer in transaction journal")
# Pay out the transaction for the next test
self.log.info("Paying out transaction")
pos.click_tender_key("Exact Change")
pos.is_element_present(pos.controls['function keys']['tools'], timeout=5)
@teardown
def teardown(self):
"""
Performs cleanup after this script ends.
"""
pos.close()
# Re-enable reason codes
self.log.info("Removing void tender reason code")
Navi.navigate_to('Register Group Maintenance')
mws.click_toolbar('Change')
mws.select_tab('Reason Codes')
mws.set_value('Available Functions', 'Void Tender')
mws.set_value('Require Reason Code', True)
mws.click_toolbar('Save')
def prep_trans(self):
"""
Helper function for adding an item and getting to the pay screen for tests
"""
self.log.info("Setting up transaction for VoidTender test...")
pos.click("Item 1")
pos.enter_keypad(1000, after="Enter")
pos.click("Pay")
self.log.info("... Setup complete")
| [
"piyushpriya34@gmail.com"
] | piyushpriya34@gmail.com |
714f10f1cdf810ee577d228a32f31af48c09c4ca | 93e55f080779f16f47a7382a3fb0b29a4189e074 | /convertor/huawei/te/lang/cce/te_compute/conv3d_compute.py | 41cc76d90ec5f7cf33e700dfbfa39e91468a5d7b | [] | no_license | jizhuoran/caffe-huawei-atlas-convertor | b00cfdec3888da3bb18794f52a41deea316ada67 | 148511a31bfd195df889291946c43bb585acb546 | refs/heads/master | 2022-11-25T13:59:45.181910 | 2020-07-31T07:37:02 | 2020-07-31T07:37:02 | 283,966,371 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 29,385 | py | """
Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use this file
except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
conv3d compute
"""
from __future__ import division
from te import tvm
from te.platform import CUBE_MKN
from te.platform import get_soc_spec
from te.domain.tiling.tiling_query import tiling_query
from te.utils import op_utils
from . import util as te_util
from .cube_util import im2col_fractal_3d, im2col_row_major
OP_TAG = "conv3d_"
TENSOR_MAP = {}
DIM_MAP = {}
NAME_INDEX = [0]
SQRT = {}
# filterD must be in [1,255]
FILTER_DHW_MIN = 1
FILTER_DHW_MAX = 255
# pad must be in [0,255]
PAD_MIN = 0
PAD_MAX = 255
# stride must be in [1,63]
STRIDE_MIN = 1
STRIDE_MAX = 63
# fmap H and W must be in [1, 4096]
FMAP_HW_MIN = 1
FMAP_HW_MAX = 4096
def check_d_dimension(fmap_d, filter_d, pad_d, stride_d):
if filter_d < FILTER_DHW_MIN or filter_d > FILTER_DHW_MAX:
raise RuntimeError("kernel D must be in [1,255].")
if (fmap_d + pad_d[0] + pad_d[1]) < filter_d:
raise RuntimeError(
"the depth of feature map after padding"
"can not be less than shape_filter's")
if pad_d[0] < PAD_MIN or pad_d[1] < PAD_MIN or \
pad_d[0] > PAD_MAX or pad_d[1] > PAD_MAX:
raise RuntimeError("padd must be in [0,255].")
if pad_d[0] >= filter_d or pad_d[1] >= filter_d:
raise RuntimeError("padd must be less than shape_filter's")
if stride_d < STRIDE_MIN or stride_d > STRIDE_MAX:
raise RuntimeError("strided must be in [1,63].")
def check_h_dimension(fmap_h, filter_h, pad_h, stride_h):
if fmap_h < FMAP_HW_MIN or fmap_h > FMAP_HW_MAX:
raise RuntimeError("feature H must be in [1,4096].")
if filter_h < FILTER_DHW_MIN or filter_h > FILTER_DHW_MAX:
raise RuntimeError("kernel H must be in [1,255].")
if pad_h[0] < PAD_MIN or pad_h[1] < PAD_MIN or \
pad_h[0] > PAD_MAX or pad_h[1] > PAD_MAX:
raise RuntimeError("padh must be in [0,255].")
if filter_h > (fmap_h + pad_h[0] + pad_h[1]):
# Chip Design demand, Load3D
raise RuntimeError("feature H(after pad) must >= kernel H")
if stride_h < STRIDE_MIN or stride_h > STRIDE_MAX:
raise RuntimeError("strideh must be in [1,63].")
if pad_h[0] >= filter_h or pad_h[1] >= filter_h:
raise RuntimeError("kernel H must > Pad H")
def check_w_dimension(fmap_w, filter_w, pad_w, stride_w):
if fmap_w < FMAP_HW_MIN or fmap_w > FMAP_HW_MAX:
raise RuntimeError("feature W must be in [1,4096].")
if filter_w < FILTER_DHW_MIN or filter_w > FILTER_DHW_MAX:
raise RuntimeError("kernel W must be in [1,255].")
if pad_w[0] < PAD_MIN or pad_w[1] < PAD_MIN or \
pad_w[0] > PAD_MAX or pad_w[1] > PAD_MAX:
raise RuntimeError("padw must be in [0,255].")
if filter_w > (fmap_w + pad_w[0] + pad_w[1]):
# Chip Design demand, Load3D
raise RuntimeError("feature W(after pad) must >= kernel W")
if stride_w < STRIDE_MIN or stride_w > STRIDE_MAX:
raise RuntimeError("stridew must be in [1,63].")
def check_conv3d_shape(shape_fm, shape_filter, pads, stride_dhw, fmp_dtype,
w_dtype):
"""
algorithm: check the input params of conv3d
Parameters
----------
shape_fm: the shape of feature, format is 'NCDHW'.
a list/tuple of 'int' that has length `== 5`
shape_filter: the shape of filter, format is 'NCDHW'.
a list of 'int' that has length `== 5`
pads: tuple/list of 6 integers
[pad_head, pad_tail, pad_top, pad_bottom, pad_left, pad_right]
stride_dhw: A list of `ints` that has length `== 3`.
fmp_dtype: the dtype of feature
w_dtype: the dtype of filter
Returns
-------
None
"""
if shape_fm[1] != shape_filter[1]:
raise RuntimeError("input feature map channel should"
"equal to filter channel")
fmap_n, fmap_c, fmap_d, fmap_h, fmap_w = shape_fm
filter_n, filter_c, filter_d, filter_h, filter_w = shape_filter
pad_d = [pads[0], pads[1]]
check_d_dimension(fmap_d, filter_d, pad_d, stride_dhw[0])
pad_h = [pads[2], pads[3]]
check_h_dimension(fmap_h, filter_h, pad_h, stride_dhw[1])
pad_w = [pads[4], pads[5]]
check_w_dimension(fmap_w, filter_w, pad_w, stride_dhw[2])
# C dimension should align 16
block_size_k = CUBE_MKN[fmp_dtype]['mac'][1]
block_size_m = CUBE_MKN[fmp_dtype]['mac'][0]
famp_c = ((fmap_c + block_size_k - 1) //
block_size_k) * block_size_k
filter_c = fmap_c
block_size_n = CUBE_MKN[w_dtype]['mac'][2]
filter_n = ((filter_n + block_size_n - 1) //
block_size_n) * block_size_n
# calculated by h_i and w_i
h_out = (fmap_h + (pad_h[0] + pad_h[1]) - filter_h) // stride_dhw[1] + 1
w_out = (fmap_w + (pad_w[0] + pad_w[1]) - filter_w) // stride_dhw[2] + 1
d_out = (fmap_d + (pad_d[0] + pad_d[1]) - filter_d) // stride_dhw[0] + 1
load2d_pass_flag = (filter_d == 1) and (filter_h == 1) and \
(filter_w == 1) and \
(list(pads) == [0, 0, 0, 0, 0, 0]) and \
(list(stride_dhw) == [1, 1, 1])
# Chip Design demand only h_dimesion constraint
only_fhkh_pass_flag = (1 <= filter_h <= 11) and \
(stride_dhw[1] == 1) and \
(h_out == 1)
# Chip Design demand both h_dimesion and w_dimension constraint
fhkh_fwkw_pass_flag = (1 <= filter_w <= 11) and (1 <= filter_h <= 11) and \
(stride_dhw[1] == 1) and (stride_dhw[2] == 1) and \
(h_out == 1) and (w_out == 1)
if load2d_pass_flag or only_fhkh_pass_flag or fhkh_fwkw_pass_flag:
pass
else:
if w_out < 2:
# Chip Design demand w_out must >=2
raise RuntimeError("FM_W + pad_left + pad_right - KW>=strideW")
if h_out < 2:
# Chip Design demand h_out must >=2
raise RuntimeError("FM_H + pad_top + pad_bottom - KH>=strideH")
# check for not bigger than L1
l1_buffer_size = get_soc_spec("L1_SIZE")
m_bit_ratio = {"float16": 2, "int8": 1}
point_per_w = (fmap_w - filter_w +
pad_w[0] + pad_w[1]) // stride_dhw[2] + 1
w_in = block_size_m // point_per_w + 2
tmp = ((w_in - 1) * stride_dhw[1] + filter_h) * fmap_w
max_feature_map_l1 = block_size_k * tmp * m_bit_ratio[w_dtype]
if max_feature_map_l1 > l1_buffer_size:
raise RuntimeError(
"Input feature is too large, "
"the minimum tiling may exceeds L1_Buffer")
class Conv3DParam:
"""
class of ConvParam
"""
def __init__(self):
pass
def get_tensor_map(self):
"""
get the tensor_map in convparam
"""
return self.TENSOR_MAP
TENSOR_MAP = {}
dim_map = {}
tiling = None
tiling_query_param = {}
def cube_3d_compute(fmap,
weight,
mad_dtype,
res_dtype,
pads,
stride_dhw,
shape_filter_ncdhw,
cyclebuffer_flag,
bias=False,
tiling=None):
"""
conv
Parameters
----------
fmap : tvm.tensor, Feature Map
weight: tvm.tensor, Filter
mad_dtype : the compute data type
res_dtype : the result data type
pads: the padding shape
[head, tail, top, bottom, left, right]
stride_dhw: the stride value
[stride_d, stride_h, stride_w]
shape_filter_ncdhw: the filter shape
bias: the tag for bias or not
tiling: default none, tiling
-------
Returns
wrapped_tensor
"""
in_dtype = fmap.dtype
w_dtype = weight.dtype
TENSOR_MAP["fmap"] = fmap
TENSOR_MAP["filter"] = weight
if isinstance(bias, tvm.tensor.Tensor):
TENSOR_MAP["bias"] = bias
fmap_shape = te_util.shape_to_list(fmap.shape)
batch_size = fmap_shape[0]
fmap_d = fmap_shape[1]
fmap_c1 = fmap_shape[2]
fmap_h = fmap_shape[3]
fmap_w = fmap_shape[4]
fmap_c0 = fmap_shape[5]
filter_cout, _, filter_d, filter_h, filter_w = shape_filter_ncdhw
pad_head, pad_tail, pad_top, pad_bottom, pad_left, pad_right = pads
stride_d, stride_h, stride_w = stride_dhw
TENSOR_MAP["filter_d"] = filter_d
height_out = (fmap_h + pad_top + pad_bottom - filter_h) // stride_h + 1
width_out = (fmap_w + pad_left + pad_right - filter_w) // stride_w + 1
d_out = (fmap_d + pad_head + pad_tail - filter_d) // stride_d + 1
config = CUBE_MKN[in_dtype]
block_size_k = config['mac'][1]
block_size_m = config['mac'][0]
opti_h_flag = filter_h == 1 and stride_h > 1
TENSOR_MAP["opti_h_flag"] = opti_h_flag
TENSOR_MAP["d_out"] = d_out
TENSOR_MAP["d_dim"] = tiling["block_dim"][-1]
fmap_fuse_shape = (batch_size * d_out, filter_d * fmap_c1, fmap_h, fmap_w,
fmap_c0)
fuse_fmap_tensor = get_fuse_fmap_tensor(fmap_fuse_shape,
fmap,
d_out,
filter_d,
stride_d,
stride_h,
pad_head,
tiling,
opti_h_flag,
cyclebuffer_flag,
tag=OP_TAG)
TENSOR_MAP["fmap_do_tensor"] = fuse_fmap_tensor
# set_fmatrix
# new data layout (N,C1,H,W,C0) -> (N,HoWo,C1,Hk,Wk,C0)
fmap_im2col_row_major_shape = (fmap_fuse_shape[0], height_out * width_out,
fmap_fuse_shape[1], filter_h, filter_w,
fmap_c0)
pad_hw = pads[2:]
stride_hw = [stride_h, stride_w]
fmap_im2col_row_major_res = im2col_row_major(fmap_im2col_row_major_shape,
fuse_fmap_tensor,
filter_w,
pad_hw,
stride_hw,
fmap.dtype,
opti_h_flag,
tag=OP_TAG)
TENSOR_MAP["fmap_im2col_row_major_res"] = fmap_im2col_row_major_res
# im2col
# small-z-big-Z
howo_mad = (height_out * width_out + block_size_m -
1) // block_size_m * block_size_m
# new data layout (N,HoWo,C1,Hk,Wk,C0) -> (N,loop_m,loop_k,cube_m,cube_k)
fmap_im2col_fractal_shape = (fmap_fuse_shape[0], howo_mad // block_size_m,
fmap_fuse_shape[1] * filter_h * filter_w,
block_size_m, block_size_k)
fmap_im2col_fractal_res = im2col_fractal_3d(fmap_im2col_fractal_shape,
fmap_im2col_row_major_res,
fmap_c1,
d_out,
filter_d,
stride_d,
cyclebuffer_flag,
tag=OP_TAG)
TENSOR_MAP["fmap_im2col_fractal_res"] = fmap_im2col_fractal_res
config = CUBE_MKN[res_dtype]
l0a_load2d_flag = get_load2d_flag(stride_dhw, pads, shape_filter_ncdhw)
TENSOR_MAP["l0a_load2d_flag"] = l0a_load2d_flag
mad_shape = (fmap_fuse_shape[0],
(filter_cout + config['mac'][2] - 1) // (config['mac'][2]),
howo_mad, config['mac'][2])
config = CUBE_MKN[w_dtype]
if l0a_load2d_flag:
c_col = mad_by_load2d(mad_shape, fmap, weight, config, mad_dtype, pads,
stride_d, d_out, filter_d)
else:
c_col = mad(mad_shape, fmap_im2col_fractal_res, weight, config,
mad_dtype, pads, stride_d, d_out, fmap_d, filter_d)
TENSOR_MAP["c_col"] = c_col
conv_shape = (fmap_fuse_shape[0],
(filter_cout + config['mac'][2] - 1) // (config['mac'][2]),
height_out * width_out, config['mac'][2])
DIM_MAP["out_img_shape"] = conv_shape
c_ub = tvm.compute(mad_shape,
lambda n, i, j, k: c_col(n, i, j, k).astype(res_dtype),
name='C_UB',
tag=OP_TAG + "C_UB",
attrs={
'true_shape': conv_shape,
'sqrt': False,
'res_dtype': res_dtype,
'kernel_h': filter_h,
'kernel_w': filter_w,
'padding': pads[2:],
'stride': stride_dhw[1:]
})
TENSOR_MAP["c_ub"] = c_ub
dim_map1 = im2col_dim(te_util.shape_to_list(fuse_fmap_tensor.shape),
shape_filter_ncdhw, list(pads), list(stride_dhw),
config)
dim_map_copy = DIM_MAP.copy()
dim_map_copy.update(dim_map1)
Conv3DParam.TENSOR_MAP = TENSOR_MAP
Conv3DParam.dim_map = dim_map_copy
Conv3DParam.tiling = None
return c_ub
def get_fuse_fmap_tensor(fmap_fuse_shape, fmap, d_out, kernel_d, stride_d,
stride_h, pad_head, tiling, opti_h_flag,
cyclebuffer_flag, tag):
"""
calculate expand tensor
Parameters
----------
fmap_fuse_shape : the shape of new tensor
fmap : the input feature
d_out : the D dimension of out shape
stride_d : the D dimension of strides
pad_head : the pad head of pads
tag : the tensor tag
Returns
-------
new tensor
"""
_, fmap_d, fmap_c1, _, _, _ = fmap.shape
# multi core
d_dim = tiling["block_dim"][-1]
if cyclebuffer_flag:
if opti_h_flag:
fmap_fuse_shape = list(fmap_fuse_shape)
fmap_fuse_shape[2] = (fmap_fuse_shape[2] - 1) // stride_h + 1
fuse_fmap_tensor = tvm.compute(
fmap_fuse_shape,
lambda n, dc, h, w, c0: tvm.select(
tvm.all(
n % d_out * stride_d + (dc // fmap_c1 + n % d_out *
(kernel_d - stride_d)) % kernel_d -
pad_head >= 0, n % d_out * stride_d + \
(dc // fmap_c1 + n % d_out * \
(kernel_d - stride_d)) % kernel_d - pad_head < fmap_d,
tvm.any(
n % d_out * stride_d + \
(dc // fmap_c1 + n % d_out * \
(kernel_d - stride_d)) % kernel_d > \
(n % d_out - 1) * stride_d + kernel_d - 1, n % \
(d_out // d_dim) == 0)),
fmap(
n // d_out, n % d_out * stride_d +\
(dc // fmap_c1 + n % d_out * (kernel_d - stride_d)\
) % kernel_d - pad_head, dc % fmap_c1, h*stride_h, w, c0)),
name='fuse_fmap_tensor',
tag=tag + 'fuse_fmap_tensor')
else:
fuse_fmap_tensor = tvm.compute(
fmap_fuse_shape,
lambda n, dc, h, w, c0: tvm.select(
tvm.all(
n % d_out * stride_d + (dc // fmap_c1 + n % d_out *
(kernel_d - stride_d)) % kernel_d -
pad_head >= 0, n % d_out * stride_d + \
(dc // fmap_c1 + n % d_out * \
(kernel_d - stride_d)) % kernel_d - pad_head < fmap_d,
tvm.any(
n % d_out * stride_d + \
(dc // fmap_c1 + n % d_out * \
(kernel_d - stride_d)) % kernel_d > \
(n % d_out - 1) * stride_d + kernel_d - 1, n % \
(d_out // d_dim) == 0)),
fmap(
n // d_out, n % d_out * stride_d +\
(dc // fmap_c1 + n % d_out * (kernel_d - stride_d)\
) % kernel_d - pad_head, dc % fmap_c1, h, w, c0)),
name='fuse_fmap_tensor',
tag=tag + 'fuse_fmap_tensor')
else:
if opti_h_flag:
fmap_fuse_shape = list(fmap_fuse_shape)
fmap_fuse_shape[2] = (fmap_fuse_shape[2] - 1) // stride_h + 1
fuse_fmap_tensor = tvm.compute(
fmap_fuse_shape,
lambda n, dc, h, w, c0: tvm.select(
tvm.all((n % d_out) * stride_d - pad_head + dc // fmap_c1 >= 0,
(n % d_out) * \
stride_d - pad_head + dc // fmap_c1 < fmap_d),
fmap(n // d_out, (n % d_out) * stride_d - pad_head + dc // \
fmap_c1, dc % fmap_c1, h*stride_h, w, c0)),
name='fuse_fmap_tensor',
tag=tag + 'fuse_fmap_tensor')
else:
fuse_fmap_tensor = tvm.compute(
fmap_fuse_shape,
lambda n, dc, h, w, c0: tvm.select(
tvm.all((n % d_out) * stride_d - pad_head + dc // fmap_c1 >= 0,
(n % d_out) * \
stride_d - pad_head + dc // fmap_c1 < fmap_d),
fmap(n // d_out, (n % d_out) * stride_d - pad_head + dc // \
fmap_c1, dc % fmap_c1, h, w, c0)),
name='fuse_fmap_tensor',
tag=tag + 'fuse_fmap_tensor')
return fuse_fmap_tensor
def mad_by_load2d(mad_shape, fmap, weight, config, mad_dtype, pads, stride_d,
d_out, filter_d):
"""
calculate mad
Parameters
----------
mad_shape : the shape of new tensor
fmap : the input feature
weight : the input filter
config : the MKN config
mad_dtype : the compute dtype of mad
Returns
-------
new tensor
"""
fmap_shape = te_util.shape_to_list(fmap.shape)
batch_size = fmap_shape[0]
fmap_d = fmap_shape[1]
fmap_c1 = fmap_shape[2]
fmap_h = fmap_shape[3]
fmap_w = fmap_shape[4]
fmap_c0 = fmap_shape[5]
shape_al1_load2d = (batch_size * fmap_d, fmap_c1, fmap_h * fmap_w, fmap_c0)
al1_load2d = tvm.compute(
shape_al1_load2d,
lambda n, c1, m, c0: fmap(n // fmap_d, n % fmap_d, c1, m // fmap_w, m %
fmap_w, c0),
name=OP_TAG + "al1_load2d")
TENSOR_MAP["al1_load2d"] = al1_load2d
hw_dim = te_util.int_ceil_div(fmap_h * fmap_w,
CUBE_MKN[fmap.dtype]["mac"][0])
shape_al0_load2d = (batch_size * fmap_d, hw_dim, fmap_c1,
CUBE_MKN[fmap.dtype]["mac"][0], fmap_c0)
al0_load2d = tvm.compute(
shape_al0_load2d,
lambda n, m1, c1, m0, c0: al1_load2d(
n, c1, m0 + CUBE_MKN[fmap.dtype]["mac"][0] * m1, c0),
name=OP_TAG + "al0_load2d")
TENSOR_MAP["al0_load2d"] = al0_load2d
c_col = mad(mad_shape, al0_load2d, weight, config, mad_dtype, pads,
stride_d, d_out, fmap_d, filter_d)
return c_col
def get_load2d_flag(stride, pads, shape_filter_ncdhw):
"""
calculate use load2d or not
Parameters
----------
stride : the input strides
pads : the input pads
shape_filter_ncdhw : the shape of filter
Returns
-------
True or False
"""
l0a_load2d_flag = False
_, _, filter_d, filter_h, filter_w = shape_filter_ncdhw
if list(pads) == [0, 0, 0, 0, 0, 0] and list(stride) == [1, 1, 1] and \
[filter_d, filter_h, filter_w] == [1, 1, 1]:
l0a_load2d_flag = True
return l0a_load2d_flag
def get_cyclebuffer_flag(tiling, shape_w, w_dtype, channel_c1, stride_d,
l0a_load2d_flag):
"""
calculate whether to do cyclebuffer
Parameters
----------
tiling : tiling_new
shape_w : filter shape
channel_c1 : fmap c1
stride_d : d channel stride
l0a_load2d_flag : whether fmap to load2d
return
----------
cyclebuffer_flag
"""
cyclebuffer_flag = False
filter_d = shape_w[1]
cyc_size = 0
if tiling["AL1_shape"]:
cyc_size = int(tiling["AL1_shape"][0] * tiling["AL1_shape"][-1] // \
(shape_w[-3] * shape_w[-2] * CUBE_MKN[w_dtype]['mac'][1]))
if cyc_size == filter_d * channel_c1:
cyclebuffer_flag = True
if l0a_load2d_flag or filter_d <= stride_d:
cyclebuffer_flag = False
return cyclebuffer_flag
def im2col_dim(shape_fmap, shape_filter_ncdhw, pads, stride_dhw, config):
"""
calculate shape
Parameters
----------
shape_fmap : shape of feature
shape_filter_ncdhw : shape of filter
pads : the padding shape
stride_dhw : the stride value
config : the MKN infor
Returns
-------
img_shape, fmap_matrix_dim
"""
mac_dim = config['mac']
batch, fmap_c1, fmap_h, fmap_w, fmap_c0 = shape_fmap
filter_cout, _, _, filter_h, filter_w = shape_filter_ncdhw
_, _, pad_top, pad_bottom, pad_left, pad_right = pads
out_h = ((fmap_h + pad_top + pad_bottom) - filter_h) // stride_dhw[1] + 1
out_w = ((fmap_w + pad_left + pad_right) - filter_w) // stride_dhw[2] + 1
fmap_valid_dim = (batch, out_h * out_w,
fmap_c1 * filter_h * filter_w * fmap_c0)
fmap_matrix_dim = (fmap_valid_dim[0],
((fmap_valid_dim[-2] + mac_dim[0] - 1) // mac_dim[0]),
((fmap_valid_dim[-1] + mac_dim[1] - 1) // mac_dim[1]),
mac_dim[0], mac_dim[1])
filter_valid_dim = (fmap_valid_dim[-1], filter_cout)
filter_matrix_dim = ((filter_valid_dim[-2] + mac_dim[1] - 1) // mac_dim[1],
(filter_valid_dim[-1] + mac_dim[2] - 1) // mac_dim[2],
mac_dim[2], mac_dim[1])
return {
"img_shape": shape_fmap,
"fmap_matrix_dim": fmap_matrix_dim,
"filter_matrix_dim": filter_matrix_dim,
"shape_filter_ncdhw": shape_filter_ncdhw
}
def mad(mad_shape, fmap, weight, config, mad_dtype, pads, stride_d, d_out,
fmap_d, filter_d):
"""
calculate mad result tensor
Parameters
----------
mad_shape : shape of mad result
fmap : feature map
weight : filter
config: the config of cube
mad_dtype: dtype of mad output
pads: input pad
stride_d: stride for d channel
d_out: output d channel
fmap_d: input fmap d channel
filter_d: input filter d channel
Returns
-------
mad result tensor
"""
block_size = config['mac'][1]
block_size_m = config['mac'][0]
pad_head = pads[0]
c1khkw = weight.shape[0] // filter_d
axis_k1 = tvm.reduce_axis((0, weight.shape[0]), name='k1')
axis_k0 = tvm.reduce_axis((0, block_size), name='k0')
if mad_dtype in ["float16", "int32"]:
mode = 'f162f16'
else:
mode = 'f162f32'
c_col = tvm.compute(
mad_shape,
lambda n, index_j1, i, index_j0: tvm.sum(
(fmap[n, i // block_size_m, axis_k1, i % block_size_m, axis_k0] *
weight[axis_k1, index_j1, index_j0, axis_k0]).astype(mad_dtype),
axis=[axis_k1, axis_k0]),
name='mad1',
tag=OP_TAG + "c_col",
attrs={
'mode': mode,
'pad_head': pad_head,
'fmap_d': fmap_d,
'stride_d': stride_d,
'd_out': d_out
})
return c_col
def bias_add(in_tensor0, in_tensor1):
"""
calculate conv res + bias in UB
Parameters
----------
in_tensor0: cnv res tensor
in_tensor1: bias vector
Returns
-------
in_tensor0+in_tensor1 tensor
"""
dim_map = {}
dim_map["out_img_shape"] = te_util.shape_to_list(in_tensor0.shape)
NAME_INDEX[0] += 1
with tvm.tag_scope('conv_vector_bias_add'):
c_add_vector = tvm.compute(
dim_map["out_img_shape"],
lambda *indice: in_tensor0(*indice) + in_tensor1(indice[
1] * CUBE_MKN[in_tensor0.dtype]['mac'][2] + indice[3]),
name='bias_add_vector' + "_cc_" + str(NAME_INDEX[0]))
return c_add_vector
def remove_pad(res, res_remove_pad_shape):
"""
remove pad
Parameters
----------
res: input tensor
res_remove_pad_shape: true shape
Returns
-------
res_remove_pad tensor
"""
NAME_INDEX[0] += 1
with tvm.tag_scope('conv_vector_remove_pad'):
res_tensor = tvm.compute(res_remove_pad_shape,
lambda *indice: res(*indice),
name='remove_pad' + "_cc_" +
str(NAME_INDEX[0]))
return res_tensor
@tvm.target.generic_func
def conv3d(data, weight, para_dict):
"""
conv
Parameters
----------
data: feature map
weight: filter
para_dict: dict of params
Returns
-------
tensor : res
"""
in_dtype = data.dtype
w_dtype = weight.dtype
bias_tensor = para_dict["bias_tensor"]
bias_flag = (bias_tensor is not None)
pads = para_dict["pads"]
pad_head, pad_tail, pad_top, pad_bottom, pad_left, pad_right = pads
pad_d = [pad_head, pad_tail]
pad_w = [pad_left, pad_right]
pad_h = [pad_top, pad_bottom]
stride_dhw = para_dict["stride_dhw"]
stride_d, stride_h, stride_w = stride_dhw
shape_filter_ncdhw = para_dict["shape_filter_ncdhw"]
filter_n, filter_c, filter_d, filter_h, filter_w = shape_filter_ncdhw
mad_dtype = para_dict["mad_dtype"]
res_dtype = para_dict["res_dtype"]
block_size_k = CUBE_MKN[w_dtype]['mac'][1]
filter_c1 = (filter_c + block_size_k - 1) // block_size_k
shape_w_ndc1hwc0 = (filter_n, filter_d, filter_c1, filter_h, filter_w,
block_size_k)
fmap_shape_ndc1hwc0 = te_util.shape_to_list(data.shape)
Conv3DParam.tiling_query_param = {
"fmap_shape_ndc1hwc0": fmap_shape_ndc1hwc0,
"shape_w_ndc1hwc0": shape_w_ndc1hwc0,
"in_dtype": in_dtype,
"w_dtype": w_dtype,
"res_dtype": res_dtype,
"mad_dtype": mad_dtype,
"padw": pad_w,
"padh": pad_h,
"padd": pad_d,
"strideh": stride_h,
"stridew": stride_w,
"strided": stride_d,
"bias_flag": bias_flag,
"default_tiling": False
}
tiling_new = tiling_query(a_shape=fmap_shape_ndc1hwc0,
b_shape=shape_w_ndc1hwc0,
a_dtype=in_dtype,
b_dtype=w_dtype,
c_dtype=res_dtype,
mad_dtype=mad_dtype,
padl=pad_w[0],
padr=pad_w[1],
padu=pad_h[0],
padd=pad_h[1],
padf=pad_d[0],
padb=pad_d[1],
strideh=stride_h,
stridew=stride_w,
strided=stride_d,
bias_flag=bias_flag,
op_tag="convolution_3d")
TENSOR_MAP["tiling_new"] = tiling_new
l0a_load2d_flag = get_load2d_flag(stride_dhw, pads, shape_filter_ncdhw)
cyclebuffer_flag = get_cyclebuffer_flag(tiling_new, shape_w_ndc1hwc0,
w_dtype, fmap_shape_ndc1hwc0[2],
stride_d, l0a_load2d_flag)
TENSOR_MAP["cyclebuffer_flag"] = cyclebuffer_flag
conv_res = cube_3d_compute(data,
weight,
mad_dtype,
res_dtype,
pads,
stride_dhw,
shape_filter_ncdhw,
cyclebuffer_flag,
bias=False,
tiling=tiling_new)
res = conv_res
if bias_flag:
res = bias_add(conv_res, bias_tensor)
# Remove H-aligned data in the output shape
res_remove_pad_shape = list(res.shape)
res_remove_pad_shape[2] = conv_res.op.attrs['true_shape'][2].value
res_remove_pad = remove_pad(res, res_remove_pad_shape)
return res_remove_pad
| [
"jizr@connect.hku.hk"
] | jizr@connect.hku.hk |
5d05881a5a8f294a903f300d5329edd75b5f38ba | d27f9109deedf2b5309eaf51a8e6bfe2b75fcf17 | /djangoapp/views.py | 8d632b654d2aed1c70ef488b2535a2e940ce85d5 | [] | no_license | vccorimanya/django-app | 0b93986f78ae14bb4c85285d0982089eeb6a5abe | fb993150e7d3659456b1ede0cddaae78fcf8fe1a | refs/heads/master | 2023-07-18T12:37:43.075407 | 2021-08-20T19:28:06 | 2021-08-20T19:28:06 | 398,094,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #Django
from djangoapp.settings import DEBUG
from django.http import HttpResponse
#utilities
from datetime import datetime
import json
def hello_world(request):
time_now = datetime.now().strftime('%b %dth, %Y - %H:%M hrs')
return HttpResponse(f'Current time sever is: {time_now}')
def sorted_numbers(request):
numbers = [int(n) for n in request.GET['numbers'].split(',')]
sorted_numbers = sorted(numbers)
data = {
'status': 'ok',
'numbers': sorted_numbers,
'message': 'Integer sorted succesfully.'
}
return HttpResponse(json.dumps(data, indent = 4), content_type="application/json")
def say_hi(request,name,age):
#return a greeting
if age < 12:
message = f'Sorry {name},you are not allowed here.'
else:
message = f'Welcome {name}'
return HttpResponse(message, content_type="text/plain")
| [
"raulccorimanyaalfaro@gmail.com"
] | raulccorimanyaalfaro@gmail.com |
d4a7b316aa7e13c5cf8deae0242001b381605f9f | 34594886705081059385e50c7b071cc42f3f2c67 | /NewsPaper/NewsPaper/settings.py | 6f01e61face427ca9dc6b02479dda278901c4de6 | [] | no_license | Mistyhops/task-D_2 | 870c1fa72d5fa4232bf654f59006daa35383cd00 | 2dad7d12f7e53a5cd30bf33fffc5d7611bd163c6 | refs/heads/master | 2023-03-17T11:27:53.606278 | 2021-02-24T16:57:18 | 2021-02-24T16:57:18 | 341,962,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | """
Django settings for NewsPaper project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7t12en#e_rsn&jy**r%y8vwb^toc1u(baud14@uu^+^_0rdo62'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'NewsPaper.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'NewsPaper.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"nedgalkin@gmail.com"
] | nedgalkin@gmail.com |
de3a0c28cc1023aa05a34f5fd437c0431ba35fee | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/42142e465a234cfaa158392bdda680b9.py | 2e0e27725c2144af3babc0a50be01d1f5932c483 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 353 | py | def is_leap_year(year):
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
return False
return True
return False
"""
on every year that is evenly divisible by 4
except every year that is evenly divisible by 100
unless the year is also evenly divisible by 400
"""
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
a50552a2fa86c9f1ea3036c1f58014cfb6e6b073 | c3d20923c699e7459c4d3deadbdc71de3e9a02c0 | /gta_data_to_tfrecords_simple.py | 16a9bb26b727ecb1bfd3bfba08ac8f05a72c556c | [] | no_license | qzramiz/DeepDrive | bc8f76ee05c7180564c075483f841c81886e845a | 0d8dae48787c1477d1b6c83e9719a3a2f18d93fc | refs/heads/master | 2020-04-11T19:20:33.539945 | 2018-12-16T18:58:58 | 2018-12-16T18:58:58 | 162,030,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,078 | py | # coding: utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/image0.jpeg
data_dir/image1.jpg
...
label_dir/weird-image.jpeg
label_dir/my-image.jpeg
...
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import pandas as pd
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_integer('num_threads', 1 ,
'Number of threads to preprocess the images.')
FLAGS = tf.app.flags.FLAGS
def _float_feature(value):
if not isinstance(value,list):
value=[value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(in_example,labels):
"""Build an Example proto for an example.
Args:
image : image as string
labels: a list containing [throttle,brake,steering]
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'steering': _float_feature(labels[0]),
'throttle':_float_feature(labels[1]),
'brake':_float_feature(labels[2]),
'speed': _float_feature(labels[3]),
'image': _bytes_feature(tf.compat.as_bytes(in_example))
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self,labels_filename):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that decodes RGB png data.
self._png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_image(self._png_data, channels=3)
self.csv = pd.read_csv(labels_filename,header=None)
def decode_png(self,image_data):
return self._sess.run(self._decode_png,feed_dict={self._png_data:image_data})
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# tf.read_file()
# sess = tf.Session()
# image=sess.run(tf.image.decode_png(tf.read_file(filename),channels=3))
# Decode the RGB JPEG.
# image = coder.decode_png(image_data)
# # Check that image converted to RGB
# assert len(image.shape) == 3
# height = image.shape[0]
# width = image.shape[1]
# assert image.shape[2] == 3
return image_data
def _process_image_files_batch(coder, thread_index, ranges, name, image_filenames, num_shards, output_directory):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
image_filenames: list of strings; each string is a path to an image file
label_filenames: list of strings; each string is a path to an image file
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
orig = image_filenames+'/img'+str(i)+'.png'
label = coder.csv.values[i].tolist()
image_buffer = _process_image(orig, coder)
example = _convert_to_example(image_buffer,label)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, orig_filenames, label_filenames, num_shards, output_directory):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
orig_filenames: list of strings; each string is a path to an image file
label_filenames: list of strings; each string is a path to an image file
num_shards: integer number of shards for this data set.
output_directory : Directory for output files
"""
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder(labels_filename = label_filenames)
print ('labels_length : ',coder.csv.shape[0])
spacing = np.linspace(0, coder.csv.shape[0], FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, orig_filenames, num_shards, output_directory)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(orig_filenames)))
sys.stdout.flush()
def main(orignal_image_folder, label_filename, output_directory, num_shards):
# orig_img_paths = [os.path.join(orignal_image_folder,im) for im in os.listdir(orignal_image_folder) if os.path.isfile (os.path.join(orignal_image_folder,im))]
_process_image_files("train", orignal_image_folder, label_filename, num_shards, output_directory)
if __name__ == '__main__':
if len(sys.argv) < 5:
print ("Usage imagesToTfrecords <input_images_folder> <label_images_folder> <output_folder> <num partitions (multiples of 4)>")
else:
main(sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]))
# For reading files
# import tensorflow as tf
# import matplotlib.pyplot as plt
# filename = "../Data/tfrecords/cool-00000-of-00004"
# sess = tf.Session()
# for serialized_example in tf.python_io.tf_record_iterator(filename):
# example = tf.train.Example()
# example.ParseFromString(serialized_example)
# # traverse the Example format to get data
# img = example.features.feature['origimage/encoded']
# # get the data out of tf record
# orignal_image_height = example.features.feature['orig/image/height']
# orignal_image_width = example.features.feature['orig/image/width']
# orignal_image_colors = example.features.feature['orig/image/colorspace']
# orignal_image_channels = example.features.feature['orig/image/channels']
# orignal_image_format = example.features.feature['orig/image/format']
# orignal_image_filename = example.features.feature['orig/image/filename']
# orignal_image_data = example.features.feature['orig/image/encoded']
# noisy_image_height = example.features.feature['label/image/height']
# noisy_image_width = example.features.feature['label/image/width']
# noisy_image_colors = example.features.feature['label/image/colorspace']
# noisy_image_channels = example.features.feature['label/image/channels']
# noisy_image_format = example.features.feature['label/image/format']
# noisy_image_filename = example.features.feature['label/image/filename']
# noisy_image_data = example.features.feature['label/image/encoded']
# orignal_image = sess.run(tf.image.decode_jpeg(orignal_image_data.bytes_list.value[0], channels=3))
# noisy_image = sess.run(tf.image.decode_jpeg(noisy_image_data.bytes_list.value[0], channels=3))
# plt.subplot(121)
# plt.title("Image Name : " + str(orignal_image_filename.bytes_list.value[0]) + "\n" +
# "Image Height : " + str(orignal_image_height.int64_list.value[0]) + "\n" +
# "Image Weight : " + str(orignal_image_width.int64_list.value[0]) + "\n" +
# "Image ColourSpace : " + str(orignal_image_colors.bytes_list.value[0]) + "\n" +
# "Image Channels : " + str(orignal_image_channels.int64_list.value[0]) + "\n" +
# "Image format : " + str(orignal_image_format.bytes_list.value[0]) + "\n")
# plt.imshow(orignal_image)
# plt.subplot(122)
# plt.title("Image Name : " + str(noisy_image_filename.bytes_list.value[0]) + "\n" +
# "Image Height : " + str(noisy_image_height.int64_list.value[0]) + "\n" +
# "Image Weight : " + str(noisy_image_width.int64_list.value[0]) + "\n" +
# "Image ColourSpace : " + str(noisy_image_colors.bytes_list.value[0]) + "\n" +
# "Image Channels : " + str(noisy_image_channels.int64_list.value[0]) + "\n" +
# "Image format : " + str(noisy_image_format.bytes_list.value[0]) + "\n")
# plt.imshow(noisy_image)
# plt.show()
# break | [
"noreply@github.com"
] | qzramiz.noreply@github.com |
b75083697cb6e23fef46d711154c9b757a164777 | 65eaa08df32cdb13159a0b3b98a69f5bbd392493 | /BetterBuy/urls.py | b60e3b58c051757972912e03d28c5ece7c507dae | [] | no_license | AKAaayush/MW | 7730b50ccd1a3ddcc26bd516a6b0ce32dd43506f | ca90290d11a6b7261d019866b88057836883e3ce | refs/heads/master | 2020-12-29T13:18:15.161281 | 2020-02-14T03:46:41 | 2020-02-14T03:46:41 | 238,619,449 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | """BetterBuy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from app import views
urlpatterns = [
path('', views.index, name="index"),
# path('', views.laptos),
path('laptos', views.laptos, name="laptos"),
path('about', views.about, name="about"),
path('apple', views.apple, name="apple"),
path('userdetail', views.userdetail, name='userdetail'),
path('admintable', views.admintable),
# CRUD
path('create', views.create, name="create"),
path('loginvalid', views.loginvalid, name="loginvalid"),
path('login', views.login, name="login"),
path('adminentry', views.adminentry, name="adminentry"),
path('userentry', views.userentry),
path('edit/<int:id>', views.edit, name="edit"),
path('update/<int:id>', views.update, name="update"),
path('delete/<int:id>', views.delete, name="delete"),
# admin
path('admindelete/<int:adminid>', views.admindelete),
path('create1', views.create1),
path('edit1/<int:adminid>', views.edit1),
path('update1/<int:adminid>', views.update1),
path('admin', views.adminlogin),
]
| [
"shresthaaayush81@gmail.com"
] | shresthaaayush81@gmail.com |
b5e435d58d0527b0a10b2c3d2ddb08609b44daa9 | da9c4a9a92d49d2fb2983a54e0f64c2a1ce8aa19 | /symphony/cli/pysymphony/graphql/input/add_image.py | 8a71be0cda0a9d98d166adf96a4a1fc7a8c266e2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | rohan-prasad/magma | 347c370347724488215a0783504788eac41d8ec7 | 2c1f36d2fd04eae90366cc8b314eaab656d7f8ad | refs/heads/master | 2022-10-14T14:08:14.067593 | 2020-06-11T23:52:03 | 2020-06-11T23:54:27 | 271,671,835 | 0 | 0 | NOASSERTION | 2020-06-12T00:20:23 | 2020-06-12T00:17:39 | null | UTF-8 | Python | false | false | 748 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from functools import partial
from gql.gql.datetime_utils import DATETIME_FIELD
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from gql.gql.enum_utils import enum_field
from ..enum.image_entity import ImageEntity
@dataclass
class AddImageInput(DataClassJsonMixin):
entityType: ImageEntity = enum_field(ImageEntity)
entityId: str
imgKey: str
fileName: str
fileSize: int
modified: datetime = DATETIME_FIELD
contentType: str
category: Optional[str] = None
annotation: Optional[str] = None
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
03c5c7703d45d7df69e6b66301140f861aecac0e | 332038c76407980256a2f158d96f20ad3238290f | /bbcpex_script_hook.py | 6798eb38a9e25bbbc155f26f4fe03da82fb46dd1 | [] | no_license | SirRouzel/bbtools | 006df8ef121ddd44e83f204e485f9748a8d2d9c0 | 7388d18630d219dab4e1935e6dd8871c19e43aff | refs/heads/master | 2021-01-15T07:53:32.509996 | 2016-09-02T23:25:51 | 2016-09-02T23:25:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | from __future__ import print_function
import frida
import sys
import atexit
def cleanup():
global script
script.unload()
session = frida.attach("BBCPEX.exe")
'''
script = session.create_script("""
Interceptor.detachAll()
Interceptor.attach(ptr("%s"), function(args) {
var cmd = Memory.readUInt(args[0]);
var message = "";
if(cmd == 0)
message = Memory.readCString(args[0].add(4));
//if(cmd < 3 && cmd != 2)
if(cmd == 9190)
Memory.writeUint()
if(message != "")
send([args[0].toInt32(),this.context.ecx.toInt32(),Memory.readUInt(args[0]),message]);
});
""" % 0x4D4870)
'''
script = session.create_script("""
Interceptor.detachAll()
Interceptor.attach(ptr("%s"), function(args) {
if(this.context.edi == 0)
{
send(Memory.readCString(this.context.esi.add(4)))
for (var i = 0; i < 0x3B; i++) {
var p = 0x207E0478+ 52*i;
var data = Memory.readUInt(ptr(p));
var s = Memory.readCString(ptr(p+4));
if(data != 0 || s != ""){
send(["upon...",i,data.toString(16),s]);
}
}
}
if(false)
{
send(this.context.esi);
this.context.edi = 0xFFFFF;
}
});
""" % 0x4D4882)
def on_message(message, data):
print(message["payload"])
#print("{1:X} {2} {3}".format(*message["payload"]))
script.on('message', on_message)
script.load()
print("loaded")
sys.stdin.read()
| [
"dantarion@gmail.com"
] | dantarion@gmail.com |
52d4fbd81177b916ffa44311d25f2d7ac9ba02b3 | f60a800a22ca6906c1b5574a02c8346779e1d048 | /DL/kaggle/mnist/dataArgumentation.py | e5af7292b2202adc53d9c8250494934b211fc378 | [] | no_license | supeng0924/DeepLearning-Module | d56fe44fa76e9df4387a9aa26d2600a2c92948f1 | d43cec1074822abc5f3adc3635af5d8f269550d2 | refs/heads/master | 2021-02-14T16:08:15.507012 | 2020-03-07T12:38:39 | 2020-03-07T12:38:39 | 244,818,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | from dataFunction import *
import pandas as pd
import numpy as np
import cv2
def main():
input_path="data/train.csv"
output_path="data/argumore.csv"
x_train, y_train=get_data(input_path)
x_train_img = converVecToImg(x_train)
print(x_train_img.shape)
for i in range(x_train_img.shape[0]):
if i%1000==0:
print(i)
mat = ImgArgumentation(x_train_img[i])
res = np.ones((9, 1), dtype=np.uint8) * y_train[i]
ressss = np.c_[res, mat]
results = pd.DataFrame(ressss)
results.to_csv(output_path, index=False, header=False, mode='a')
# # # 数据验证
# x_train, y_train = get_data(output_path)
# showImg(x_train, 10000, y_train)
# showImg(x_train, 10001, y_train)
# showImg(x_train, 10002, y_train)
# showImg(x_train, 10003, y_train)
if __name__ == '__main__':
main()
| [
"supeng819@163.com"
] | supeng819@163.com |
15f2cb2b424e8fec8fc75639a1b731e71c9a1848 | 5383b3232e214e5363618569d4890d8c1170ef87 | /chainer_computational_cost/cost_calculators/cost_calculators.py | 4d28f3b28cd72265c93ac0c38fb6752b9907a3ce | [
"MIT"
] | permissive | suga93/chainer_computational_cost | 7b291a71f8327b255023820ea7a814ee86edbd53 | 50869d3efebdff3e681213b98bebe097460369ab | refs/heads/master | 2020-05-23T03:44:51.389786 | 2019-05-14T14:40:12 | 2019-05-14T14:40:12 | 186,622,433 | 0 | 0 | MIT | 2019-05-14T12:57:12 | 2019-05-14T12:57:12 | null | UTF-8 | Python | false | false | 2,936 | py | from collections import OrderedDict
import importlib
import inspect
import six
import warnings
calculators = OrderedDict() # active calculators
# all the calculators including those cannot be activated
# (not disclosed to outside, but used by make_details_md.py)
all_calculators = OrderedDict()
def check_signature(func):
"""Check cost calculator's signature
Cost calculator has to have the following parameter.
- func
- in_data
- **kwargs
Name can be different.
"""
if not callable(func):
return False
if six.PY2:
p = inspect.getargspec(func)
if len(p.args) != 2 or p.varargs is not None or p.keywords is None:
return False
else:
p = inspect.signature(func).parameters
if len(p) != 3:
return False
_, _, kwargs = p.keys()
if p[kwargs].kind != inspect.Parameter.VAR_KEYWORD:
return False
return True
def register(func):
"""A decorator to register cost calculator function (internal use only)
This registers the function as a cost calculator function for the specified
type of Chainer Function.
You can specify the target Chainer Function by the following ways.
(1) Type of Chainer Function (FunctionNode)
You can directly pass the type object to the decorator.
If the type may not exist in some Chainer versions, try the second way.
(2) Fully qualified name of a Chainer Function.
chainer-computational-cost tries to import it and registers the cost
calculator for the Function.
In case the specified Chainer Function is not found, for example the
current chainer version doesn't support that Function yet,
the cost calculator will not be registered.
For example, `"chainer.functions.activation.relu.ReLU"`
args:
func: Chainer Function that you want the cost calculator function to be
registered for.
"""
if type(func) is str:
func_name = func
try:
# F.activation.relu.ReLU -> ['F.activation.relu', 'ReLU']
func_module, func_class = func.rsplit('.', 1)
m = importlib.import_module(func_module)
func = getattr(m, func_class)
except ImportError:
func = None
except AttributeError:
func = None
else:
func_name = func.__name__
def reg(calculator):
if not check_signature(calculator):
warnings.warn("cost calculator signature mismatch: {}"
.format(func_name))
elif func is not None:
# If the function exists
calculators[func] = calculator
all_calculators[func] = calculator
else:
# register all the defined calculators including those cannot be
# activated (e.g. chainer in this env is too old)
all_calculators[func_name] = calculator
return reg
| [
"belltailjp@gmail.com"
] | belltailjp@gmail.com |
c7353fa19e9a9036da3f7516e782c9ad790622b3 | 018c4f87b34abfd8bf257efed60f6bcad4dc70c9 | /scrapy/taobao_comment/taobao_comment/spiders/taobao.py | bb28fa1b8175cdddbeb3eda61eeb4b23fdc1318c | [] | no_license | bidcms/crawler | 2881a62bd58db59f380a4f6ba11540cd9ae11a79 | 13bf1f02bc6c6817badf7d162c55169e276725b8 | refs/heads/master | 2023-06-07T10:26:57.520868 | 2021-07-17T17:21:41 | 2021-07-17T17:21:41 | 13,981,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,339 | py | import scrapy
from scrapy.spiders import CrawlSpider
from selenium import webdriver
import re,requests
#构建评论页表
def makeURL(itemId,sellerId,i):
url='http://rate.tmall.com/list_detail_rate.htm?itemId='\
+itemId+'&sellerId='+sellerId+'¤tPage='+i
return url
class taobaospider(CrawlSpider):
name = "taobao"
start_urls = [
"https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&q=python%E4%B9%A6%E7%B1%8D&suggest=history_1&_input_charset=utf-8&wq=p&suggest_query=p&source=suggest&sort=sale-desc&bcoffset=0&p4ppushleft=%2C44&s=0"
]
#根据指定页数添加商品列表,设定值为20页
for i in range(1,2):
url="https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&q=python%E4%B9%A6%E7%B1%8D&suggest=history_1&_input_charset=utf-8&wq=p&suggest_query=p&source=suggest&sort=sale-desc&bcoffset=0&p4ppushleft=%2C44&s="+str(i*44)
start_urls.append(url)
def parse(self, response):
#需要通过selenium启动虚拟浏览器,如果使用其他浏览器需作出修改
driver=webdriver.Chrome()
driver.get(response.url)
driver.implicitly_wait(30)
driver.refresh()
driver.implicitly_wait(30)
html = driver.page_source
driver.close()
#添加访问商品详细界面的url
list=re.findall('href=\"//detail.tmall.com.*?\"', html)
linkList = sorted(set(list), key=list.index)
#添加销量数据
sales=re.findall('>[0-9]*人收货', html)
i=0
for href in linkList:
link = 'https:'+href.split('"')[1]
sale=sales[i].replace('>', '').replace("人收货", "")
i=i+1
yield scrapy.Request(url=link, meta={'sales':sale}, callback=self.parse_item)
def parse_item(self, response):
#根据实际item名称修改此处
item = GameItem()
#获取商品销量
item['sales']=response.meta['sales']
#从商品界面提取信息以构建评论界面的url
try:
str1 = re.findall('itemId:\".*?\"', response.text)[0]
itemId = str1.split(' ')[1]
str2 = re.findall('sellerId:\".*?\"', response.text)[0]
sellerId = str2.split(' ')[1]
except:
return
#初始化评论列表
comments = []
comment_times = []
#爬取所需评论页数,设定值为爬取100页,如果没有评论则结束
for i in range(1,2):
try:
url_comment = makeURL(itemId, sellerId, str(i))
page=requests.get(url_comment)
page.status_code
comment = re.findall('\"rateContent\":(\".*?\")', page.text)
comment_time=re.findall('\"rateDate\":(\".*?\")', page.text)
comments.extend(comment)
comment_times.extend(comment_time)
except:
break
#输入评论
item['comment'] = comments
item['comment_time'] = comment_times
#获取商品其他信息,(以@为分割符)
details=response.xpath('//ul[@id="J_AttrUL"]/li/text()').extract()
details="@".join(details)
details=details+"@"
#输入item
try:
item['name']=re.findall('书名:(.*?)@',details)[0].replace('书名:', '').replace("\xa0","")
except:
pass
try:
item['ISBN']=re.findall('ISBN编号:(.*?)@',details)[0].replace('ISBN编号:', '').replace("\xa0","")
except:
pass
try:
item['writer']=re.findall('作者:(.*?)@',details)[0].replace('作者:', '').replace("\xa0","")
except:
pass
try:
item['price']=re.findall('定价:(.*?)@',details)[0].replace('定价:', '').replace("\xa0","")
except:
pass
try:
item['company'] = re.findall('出版社名称:(.*?)@', details)[0].replace('出版社名称:', '').replace("\xa0","")
except:
pass
yield item
| [
"bidcms@gmail.com"
] | bidcms@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.