index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,500 | e33b92e58866e85ec83c316c6ec053995de1d522 | import pandas as pd
from sklearn.datasets import load_wine
wine = pd.read_csv(filepath_or_buffer='wine.data',header=0)
df.columns=['class', 'Alcohol', 'Malic_Acid', 'Ash', 'Alcalinity of Ash', 'Magnesium',
'Total Phenols', 'Flavanoids', 'Nonflavanoid Phenols', 'Proanthocyanins', 'Colour Intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
plt.style.use('ggplot')
# Plotting line chart
plt.plot(df['Ash'], color='blue')
plt.title('Ash by Index')
plt.xlabel('Index')
plt.ylabel('Ash')
#creating a Histogram plot
fig, axes = plt.subplots(1, 1, figsize=(5, 5))
axes.hist(df['Ash'], bins=30, color='g', label='Ash')
axes.set_title('Ash')
axes.set_xlabel('Index')
axes.set_ylabel('Ash')
axes.legend()
#creating mulitiple scatter plots on same figure
fig,axes = plt.subplots(1, 1, figsize=(5,5))
axes.scatter(df['Alcohol'], df['Malic_Acid'], s=8, label='Alc_Mali_Ash', color='Blue', marker='^')
axes.set_title('Alco vs Malic_Acid')
axes.set_xlabel('Alcohol')
axes.set_ylabel('Malic_Acid')
axes.legend()
#creating multipe plots on the same axes
fig, axes = plt.subplots(1, 1, figsize=(5, 5))
axes.scatter(df['Malic_Acid'], df['Ash'], alpha=0.9, label='Ash')
axes.scatter(df['Malic_Acid'], df['Alcalinity of Ash'], alpha=0.9, label='Alcalinity of Ash')
axes.scatter(df['Malic_Acid'], df['Magnesium'], alpha=0.9, label='Magnesium')
axes.scatter(df['Malic_Acid'], df['Alcohol'], alpha=0.9, label='Alcohol')
axes.set_title(f'Malic_Acid_comparsion')
axes.set_xlabel('Malic_Acid')
axes.set_ylabel('Ash/Alcalinity of Ash/Magnesium/Alcohol')
axes.legend()
plt.tight_layout()
#creating a Pie plot
fig, axes = plt.subplots(1, 1, figsize=(5, 5))
axes.pie(df['class'].value_counts(), labels=df['class'].value_counts().index.tolist(), autopct='%1.1f%%')
axes.set_title('class')
axes.legend()
#creating a Bar plot
fig, axes = plt.subplots(1, 1, figsize=(5, 5))
axes.bar(np.arange(0, len(df['Ash'])), df['Ash'], color='y', label='Ash')
axes.set_title('Ash')
axes.set_xlabel('Index')
axes.set_ylabel('Ash')
axes.legend()
#creating a Correlation Heatmap plot
fig, axes = plt.subplots(1, 1, figsize=(20, 20))
df['encoded_class']=df['class'].map({'B': 0, 'M': 1})
correlation = df.corr().round(2)
im = axes.imshow(correlation)
cbar = axes.figure.colorbar(im, ax=axes)
cbar.ax.set_ylabel('Correlation', rotation=-90, va="bottom")
numrows = len(correlation.iloc[0])
numcolumns = len(correlation.columns)
axes.set_xticks(np.arange(numrows))
axes.set_yticks(np.arange(numcolumns))
axes.set_xticklabels(correlation.columns)
axes.set_yticklabels(correlation.columns)
plt.setp(axes.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
for i in range(numrows):
for j in range(numcolumns):
text = axes.text(j, i, correlation.iloc[i, j], ha='center', va='center', color='w')
axes.set_title('Heatmap of Correlation of Dimensions')
fig.tight_layout()
# creating a 3D plot
c1 = df[df['class'] == 1]
c2 = df[df['class'] == 2]
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1, projection='3d')
line1 = axes.scatter(c1['Alcohol'], c1['Malic_Acid'], c1['Ash'])
line2 = axes.scatter(c2['Alcohol'], c2['Malic_Acid'], c2['Ash'])
axes.legend((line1, line2), ('c1', 'c2'))
axes.set_xlabel('Alcohol')
axes.set_ylabel('Malic_Acid')
axes.set_zlabel('Ash')
plt.show()
plt.close() |
12,501 | 509eec4a09556408d9785ad9eeffb07ca545d54a | from django.shortcuts import render
from .models import Article
# Create your views here.
def news(request):
news_articles = Article.objects
return render(request, 'news.html', {'news_articles': news_articles}) |
12,502 | 454f068d4b88865db55d1a8ea9d9b72954d82026 | import pygame
from pygame.locals import *
import sys
import random
# Global variable for game
FPS = 32
SCREENWIDTH = 289
SCREENHEIGHT = 511
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) # initialising display for game
GROUND_Y = SCREENHEIGHT * 0.8
GAME_SPRITES = {} # this is use to store images
GAME_SOUNDS = {} # this is use to store sound
PLAYER = 'gallery/sprites/bird.png'
BACKGROUND = 'gallery/sprites/background.png'
PIPE = 'gallery/sprites/pipe.png'
def welcome_screen():
"""
It will use to show images on initial screen
"""
# player_position_at_x = int(SCREENWIDTH/5)
# player_position_at_y = int(SCREENHEIGHT - GAME_SPRITES['player'].get_height())/2 # (H-h)/2
message_screen_at_x = int(SCREENWIDTH - GAME_SPRITES['message'].get_height())/2+40
# 40 is offset value which I have set after running game
message_screen_at_y = int(SCREENHEIGHT * 0.25)
base_at_x = 0
while True:
for event in pygame.event.get():
# if user clicks on cross button, close the game
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
# If the user presses space or up key, start the game for them
elif event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
return
else:
SCREEN.blit(GAME_SPRITES['background'], (0, 0))
# SCREEN.blit(GAME_SPRITES['player'], (player_position_at_x, player_position_at_y))
SCREEN.blit(GAME_SPRITES['message'], (message_screen_at_x, message_screen_at_y))
SCREEN.blit(GAME_SPRITES['base'], (base_at_x, GROUND_Y))
pygame.display.update()
FPS_CLOCK.tick(FPS)
def start_game():
score = 0
player_position_at_x = int(SCREENWIDTH/5)
player_position_at_y = int(SCREENHEIGHT/2)
base_position_at_x = 0
new_pipe1 = random_pipe()
new_pipe2 = random_pipe()
upper_pipes = [
{'x': SCREENWIDTH+200, 'y': new_pipe1[0]['y']},
{'x': SCREENWIDTH+200+(SCREENWIDTH/2), 'y': new_pipe1[0]['y']}
]
lower_pipes = [
{'x': SCREENWIDTH+200, 'y': new_pipe2[1]['y']},
{'x': SCREENWIDTH+200+(SCREENWIDTH/2), 'y': new_pipe2[1]['y']}
]
pipe_velocity_at_x = -4
player_velocity_y = -9
player__max_velocity_y = 10
player_min_velocity_y = -8
player_acceleration_y = -1
player_flap_velocity = -8
is_player_flapped = False
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_LEFT):
if player_position_at_y > 0:
player_velocity_y = player_flap_velocity
is_player_flapped = True
GAME_SOUNDS['wing'].play()
crash_test = is_collide(player_position_at_x, player_position_at_y, upper_pipes, lower_pipes)
if crash_test:
return
# check for score
player_mid_position = player_position_at_x + GAME_SPRITES['player'].get_width()/2
for pipe in upper_pipes:
pipe_mid_pos = pipe['x'] + GAME_SPRITES['pipe'][0].get_width()/2
if pipe_mid_pos <= player_mid_position <pipe_mid_pos + 4:
score += 1
print(f'your score is {score}')
GAME_SOUNDS['point'].play()
if player_velocity_y < player__max_velocity_y and not is_player_flapped:
player_velocity_y += player_acceleration_y
if is_player_flapped:
is_player_flapped = False
player_height = GAME_SPRITES['player'].get_height()
player_position_at_y = player_position_at_y + min(player_velocity_y, GROUND_Y - player_position_at_y - player_height)
for upper_pipe, lower_pipe in zip(upper_pipes, lower_pipes):
upper_pipe['x'] += pipe_velocity_at_x
lower_pipe['x'] += pipe_velocity_at_x
# adding new pipe
if 0 < upper_pipes[0]['x'] < 5:
new_pipe = random_pipe()
upper_pipes.append(new_pipe[0])
lower_pipes.append(new_pipe[1])
# Removing pipe when they are out of display
if upper_pipes[0]['x'] < -GAME_SPRITES['pipe'][0].get_width():
upper_pipes.pop(0)
lower_pipes.pop(0)
SCREEN.blit(GAME_SPRITES['background'], (0, 0))
for upper_pipe, lower_pipe in zip(upper_pipes, lower_pipes):
SCREEN.blit(GAME_SPRITES['pipe'][0], (upper_pipe['x'], upper_pipe['x']))
SCREEN.blit(GAME_SPRITES['pipe'][1], (lower_pipe['x'], lower_pipe['x']))
SCREEN.blit(GAME_SPRITES['base'], (base_position_at_x, GROUND_Y))
SCREEN.blit(GAME_SPRITES['player'], (player_position_at_x, player_position_at_y))
my_digits = [int(x) for x in list(str(score))]
width = 0
for digit in my_digits:
width += GAME_SPRITES['numbers'][digit].get_width()
x_offset = (SCREENWIDTH - width)/2
for digit in my_digits:
SCREEN.blit(GAME_SPRITES['numbers'][digit], (x_offset, SCREENWIDTH * 0.12))
x_offset += GAME_SPRITES['numbers'][digit].get_width()
pygame.display.update()
FPS_CLOCK.tick(FPS)
def is_collide(player_position_at_x, player_position_at_y, upper_pipes, lower_pipes):
return False
def random_pipe():
"""
Generate random position of pipe for upper and lower one's
"""
pipe_height = GAME_SPRITES['pipe'][0].get_height()
offset = SCREENHEIGHT/3
position_for_lower_pipe_at_y = random.randrange(0, int(SCREENHEIGHT - GAME_SPRITES['base'].get_height() - 1.2 * offset))
pipe_x = SCREENWIDTH * 10
position_for_upper_pipe_at_y = pipe_height - position_for_lower_pipe_at_y + offset
pipe = [
{'x': pipe_x, 'y': position_for_upper_pipe_at_y},
{'x': pipe_x, 'y': position_for_lower_pipe_at_y}
]
return pipe
if __name__ == '__main__':
pygame.init()
FPS_CLOCK = pygame.time.Clock()
pygame.display.set_caption('Flappy Bird design by Rahul')
# adding number into sprites to blit score on screen
GAME_SPRITES['numbers'] = (
pygame.image.load('gallery/sprites/0.png').convert_alpha(),
pygame.image.load('gallery/sprites/1.png').convert_alpha(),
pygame.image.load('gallery/sprites/2.png').convert_alpha(),
pygame.image.load('gallery/sprites/3.png').convert_alpha(),
pygame.image.load('gallery/sprites/4.png').convert_alpha(),
pygame.image.load('gallery/sprites/5.png').convert_alpha(),
pygame.image.load('gallery/sprites/6.png').convert_alpha(),
pygame.image.load('gallery/sprites/7.png').convert_alpha(),
pygame.image.load('gallery/sprites/8.png').convert_alpha(),
pygame.image.load('gallery/sprites/9.png').convert_alpha()
)
# adding message image in sprite to blit on screen
GAME_SPRITES['message'] = pygame.image.load('gallery/sprites/message.png').convert_alpha()
GAME_SPRITES['base'] = pygame.image.load('gallery/sprites/base.png').convert_alpha()
GAME_SPRITES['player'] = pygame.image.load(PLAYER).convert_alpha()
GAME_SPRITES['background'] = pygame.image.load(BACKGROUND).convert()
GAME_SPRITES['pipe'] = (
pygame.transform.rotate(pygame.image.load(PIPE).convert_alpha(), 180),
pygame.image.load(PIPE).convert_alpha()
)
# loading sound in dictionary
GAME_SOUNDS['die'] = pygame.mixer.Sound('gallery/audio/die.wav')
GAME_SOUNDS['hit'] = pygame.mixer.Sound('gallery/audio/hit.wav')
GAME_SOUNDS['point'] = pygame.mixer.Sound('gallery/audio/point.wav')
GAME_SOUNDS['swoosh'] = pygame.mixer.Sound('gallery/audio/swoosh.wav')
GAME_SOUNDS['wing'] = pygame.mixer.Sound('gallery/audio/wing.wav')
while True:
welcome_screen()
start_game()
|
12,503 | 3891fc2584ff0230f891cf0d4732ce23e2526472 | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 1234))
s.listen(10) # кол-во очереди за соединением
while True:
conn, addr = s.accept()
while True:
data = conn.recv(1024)
if not data: break
else:
conn.send(data)
conn.close() |
12,504 | c175739b838959b6554b4d81886c7c826f6b3b9c | from django.core.management import setup_environ
import settings
setup_environ(settings)
import psycopg2
import os
from datetime import datetime
from utilities.notifiers.Email import Email
from utilities.notifiers.emailNotifier import emailNotifier
def flaggedSessions(curr):
q = """
select s.name
from pht_sessions as s
join pht_session_flags as f on f.id = s.flags_id
where f.thermal_night or
f.rfi_night or
f.optical_night or
f.rfi_night or
f.optical_night or
f.transit_flat or
f.guaranteed
order by s.name
"""
curr.execute(q)
return [s for s, in curr.fetchall()]
def writeToFile(sessions):
cmd = 'mv flaggedSessions_old.txt flaggedSessions_older.txt'
os.system(cmd)
cmd = 'mv flaggedSessions.txt flaggedSessions_old.txt'
os.system(cmd)
f = open('flaggedSessions.txt', 'w')
f.write('%s\n%s' % (len(sessions), '\n'.join(sessions)))
f.close()
def checkSessions(sessions):
f = open('flaggedSessions_old.txt', 'r')
contents = f.readlines()
numSessions = int(contents[0].replace('\n', ''))
if numSessions > len(sessions):
soundTheAlarm(sessions, contents)
f.close()
def soundTheAlarm(sessions, contents):
body = """
The number of sessions with flags has gone down!
from:
%s
to:
%s
""" % (''.join(contents), '%s\n%s' % (len(sessions), '\n'.join(sessions)))
email = Email(sender = 'dss@gb.nrao.edu'
, recipients = ['mmccarty@nrao.edu', 'pmargani@nrao.edu']
, subject = 'PHT Session Flags'
, body = body
, date = datetime.now()
)
emailN = emailNotifier(smtp = 'smtp.gb.nrao.edu')
emailN.Send(email)
if __name__ == '__main__':
conn = psycopg2.connect(host = settings.DATABASES['default']['HOST']
, user = settings.DATABASES['default']['USER']
, password = settings.DATABASES['default']['PASSWORD']
, database = settings.DATABASES['default']['NAME']
)
curr = conn.cursor()
sessions = flaggedSessions(curr)
writeToFile(sessions)
checkSessions(sessions)
|
12,505 | 060977d2c50cb4906537464ef984c69907d39eb0 | import cv2 #pip install opencv-python
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)# if you have more than one cam change to 1
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
print("Press q If you Want to Exit")
while True:
# Read the frame
ret, frame = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Display
cv2.imshow('Face Detection', frame)
# Stop if q key is pressed
if cv2.waitKey(10) == ord('q'):
break
# Release the VideoCapture object
cap.release()
# Destroys all the windows we created
cv2.destroyAllWindows()
|
12,506 | 9aae60506928c519789416e365eb914e926ac6b7 | import re
#read files and store lines into a list of strings
with open(r'C:\Users\xrist\OneDrive\Υπολογιστής\two_cities_ascii.txt')as f:
lines=f.readlines()
romain_numbers_list=['I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV','XVI','XVII','XVIII','XIX'
,'XX','XXI','XXII','XXIII','XXIV']
words=[]
for line in lines:
for word in line.split():
if word not in romain_numbers_list:
words.append(''.join(filter(str.isalpha,word)))
words_duplicated=words.copy()
for word1 in words:
words_duplicated.remove(word1)
for word2 in words_duplicated:
if len(word2)+len(word1)==20:
words.remove(word1)
words.remove(word2)
words_duplicated.remove(word2)
print(word1+"-"+word2)
break
|
12,507 | a28d8890d5221775d92cb2b0efd762e1d5c7199d | dicts={
"hola":15,
"chau":65,
"adios":4154,
}
print(list(dicts.keys())) |
12,508 | 768d040a6b969abdc7920f144d28a7b8c26f041d | from django.db import migrations, transaction
def copy_slack_account(apps, schema_editor):
BigEmoji = apps.get_model('bigemoji', 'BigEmoji')
while BigEmoji.objects.filter(owner__isnull=True).exists():
with transaction.atomic():
for row in BigEmoji.objects.filter(owner__isnull=True):
row.owner = row.author.slackaccount
row.save()
def reverse_copy_slack_account(apps, schema_editor):
BigEmoji = apps.get_model('bigemoji', 'BigEmoji')
while BigEmoji.objects.filter(owner__isnull=False).exists():
with transaction.atomic():
for row in BigEmoji.objects.filter(owner__isnull=False):
row.owner = None
row.save()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('bigemoji', '0006_add_owner'),
]
operations = [
migrations.RunPython(
copy_slack_account,
reverse_copy_slack_account
)
]
|
12,509 | c44411b89821cfa1ed2b1403d5cbbdbdd0bcdb01 | f = open("새 파일.txt",'w') #파이썬 소스코드가 있는 경로가 기본 경로임
f.close() |
12,510 | 4984bcc657c473e377a8a363db156d8c29df4aeb | from scrapy import Spider
from scrapy.http import FormRequest
from scrapy.utils.response import open_in_browser
class QuotesSpider(Spider):
name = 'quotes'
start_urls = ('http://quotes.toscrape.com/login',)
def parse(self, response):
token = response.xpath('//*[@name="csrf_token"]/@value').extract_first()
return FormRequest.from_response(response,
formdata={'csrf_token': token,
'password': 'foobar',
'username': 'foobar'},
callback=self.scrape_pages)
def scrape_pages(self, response):
open_in_browser(response)
# Complete your code here to scrape the pages that you are redirected to after logging in
# ....
# ....
|
12,511 | ef963089e8e2eebda1fad7b5befffbaf31889bd2 |
import ruamel.yaml
import canonical # NOQA
def test_canonical_scanner(canonical_filename, verbose=False):
with open(canonical_filename, 'rb') as fp0:
data = fp0.read()
tokens = list(ruamel.yaml.canonical_scan(data))
assert tokens, tokens
if verbose:
for token in tokens:
print(token)
test_canonical_scanner.unittest = ['.canonical']
def test_canonical_parser(canonical_filename, verbose=False):
with open(canonical_filename, 'rb') as fp0:
data = fp0.read()
events = list(ruamel.yaml.canonical_parse(data))
assert events, events
if verbose:
for event in events:
print(event)
test_canonical_parser.unittest = ['.canonical']
def test_canonical_error(data_filename, canonical_filename, verbose=False):
with open(data_filename, 'rb') as fp0:
data = fp0.read()
try:
output = list(ruamel.yaml.canonical_load_all(data)) # NOQA
except ruamel.yaml.YAMLError as exc:
if verbose:
print(exc)
else:
raise AssertionError('expected an exception')
test_canonical_error.unittest = ['.data', '.canonical']
test_canonical_error.skip = ['.empty']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
|
12,512 | 9ecdef13d6afef4ff81ed05171e54bc71678f7ac | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
"""Sent when a channel is created/joined on the client."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ..core.dispatch import GatewayDispatch
from ..objects.guild.channel import Channel
from ..utils.conversion import construct_client_dict
if TYPE_CHECKING:
from typing import List, Tuple
from ..core.dispatch import GatewayDispatch
def channel_create_middleware(
self,
payload: GatewayDispatch
) -> Tuple[str, List[Channel]]:
"""|coro|
Middleware for ``on_channel_creation`` event.
Parameters
----------
payload : :class:`~pincer.core.dispatch.GatewayDispatch`
The data received from the ready event.
Returns
-------
Tuple[:class:`str`, List[:class:`~pincer.objects.guild.channel.Channel`]]
``"on_channel_creation"`` and a channel.
"""
return "on_channel_creation", [
Channel.from_dict(construct_client_dict(self, payload.data))
]
def export():
return channel_create_middleware
|
12,513 | 226a91cf43a3fca38edd5ed67de85bcd8309b067 | from .forms import Forms
from .form import Form
__all__ = ['Forms', 'Form'] |
12,514 | 5378c22b3a2c5566f060f911734a3b8c42b191fe | import sys
import math
import numpy as np
class Seidel:
def __init__(self, exercise, error):
self.exercise = exercise
self.error = error
self.start()
def start(self):
if self.verifyLines(self.exercise):
self.seidel(self.exercise)
else:
if self.verifyColumns(self.exercise):
self.seidel(self.exercise)
else:
if self.verifySassenfield(self.exercise):
self.seidel(self.exercise)
else:
print('Não da Convergência')
def verifyLines(self, matrix):
for line, countLines in zip(matrix, range(len(matrix))):
for number, countColums in zip(line, range(len(line))):
sums = 0
if countLines == countColums:
for countColumsLine in range(len(line)-1):
if countColums != countColumsLine:
sums += line[countColumsLine]
if (sums / number) >= 1:
return False
break
return True
def verifyColumns(self, matrix):
for countColumns in range(len(matrix[0]) -1):
sums = 0
for countLines in range(len(matrix)):
if countLines != countColumns:
sums += matrix[countLines][countColumns]
else:
div = matrix[countLines][countLines]
if (sums / div) >= 1:
return False
return True
def verifySassenfield(self, matrix):
newValueOfMultiplier = []
countMultiplier = 0
for countLines in range(len(matrix)):
newValueOfMultiplier.append(1)
for countLines in range(len(self.exercise)):
sums = 0
for countColumns in range(len(self.exercise[0])-1):
if countLines != countColumns:
sums += (self.exercise[countLines][countColumns] * newValueOfMultiplier[countColumns])
else:
divisor = self.exercise[countLines][countColumns]
division = sums/divisor
if division >= 1:
return False
else:
if countLines != (len(self.exercise)-1):
newValueOfMultiplier[countMultiplier] = division
countMultiplier+=1
return True
def verifyStop(self, tableErrorVerify):
for count in range(len(tableErrorVerify)):
if tableErrorVerify[count] > self.error:
return False
return True
def seidel(self, matrix):
equacao = []
for linha, count in zip(matrix, range(len(matrix))):
soma = []
for countColunaLinha in range(len(linha)):
if linha[countColunaLinha] is not None:
if (countColunaLinha == (len(linha) - 1)):
soma.append(linha[countColunaLinha])
else:
if count != countColunaLinha:
soma.append(linha[countColunaLinha] * -1)
else:
soma.append(None)
for countSoma in range(len(soma)):
if soma[countSoma] is not None:
soma[countSoma] = (1/linha[count]) * soma[countSoma]
equacao.append(soma)
tableValues = [0 for i in range(len(equacao))]
tableErrorVerify = [0 for i in range(len(equacao))]
validator = True
while(validator):
tableValues, tableErrorVerify = self.seidelEquation(tableValues, equacao)
if(self.verifyStop(tableErrorVerify)):
validator = False
print(f' Tabela Final - {tableValues}')
return True
def seidelEquation(self, tableValues, equation):
oldTableValues = tableValues[:]
tableErrorVerify = []
for i in range(len(equation)):
linha = 0
values = tableValues[:]
values.pop(i)
for j in range(len(equation[0])-1):
linha += (equation[i][j] * values[j])
if equation[i][len(equation[0])-1] is not None:
linha += equation[i][len(equation[0])-1]
tableValues[i] = linha
for i in range(len(tableValues)):
tableErrorVerify.append(abs(round(tableValues[i] - oldTableValues[i], 5)))
print(f'{tableValues} new')
print(f'{tableErrorVerify} - Tabela de Verificação')
return tableValues, tableErrorVerify |
12,515 | dd263598528cc835b08287e558a631b3dc822110 | import sys
import pwm
def shell():
sys.stdout.write(b"ESP32 UART brightness control loop...\n")
sys.stdout.write(b"Type 'repl' to return to micropython REPL.\n")
led_ctrl = pwm.iMacBrightnessPWM(2, 1000, 0)
imac_ctrl = pwm.iMacBrightnessPWM()
brightness = 0.2
led_ctrl.set_brightness(brightness)
imac_ctrl.set_brightness(brightness)
while True:
input = sys.stdin.readline().strip()
if input == "repl":
led_ctrl.stop()
imac_ctrl.stop()
return
try:
brightness = int(input) / 100
led_ctrl.set_brightness(brightness)
imac_ctrl.set_brightness(brightness)
except:
pass
|
12,516 | 1f542986b06dfe90987e5fb33d6be4dae460e6e5 | """
MODEL
"""
from system.core.model import Model
class Product(Model):
def __init__(self):
super(Product, self).__init__()
def get_all_products(self):
return self.db.query_db("SELECT * FROM products ORDER BY created_at DESC")
def add_product(self, product):
query = "INSERT INTO products (name, description, price, created_at, updated_at) VALUES (:name, :description, :price, NOW(), NOW() )"
data = { 'name': product['name'], 'description': product['description'], 'price': product['price'] }
return self.db.query_db(query, data)
def show_product_by_id(self, product_id):
query = "SELECT * FROM products WHERE id = :product_id"
data = { 'product_id': product_id }
return self.db.query_db(query, data)
def update_product_by_id(self, product):
query = "UPDATE products SET name=:name, description=:description, price=:price, updated_at=NOW() WHERE id = :id"
data = { 'name': product['name'], 'description': product['description'], 'price': product['price'], 'id': product['id'] }
return self.db.query_db(query, data)
def delete_product_by_id(self, product):
print product
query = "DELETE FROM products WHERE id = :product_id"
data = { "product_id": product['id'] }
return self.db.query_db(query, data) |
12,517 | 9a25fe77691538ec15311021f54b4d48996fc24f | import requests
import json
from bs4 import BeautifulSoup
import time
url = 'https://kantan.vn/postrequest.ashx'
headers= {'content-type': "application/x-www-form-urlencoded", "accept": "*/*"}
data = {
'm': 'dictionary',
'fn': 'kanji_detail',
}
column_title_map = {
'ý nghĩa': 'mean',
'giải thích': 'explain',
'onyomi': 'onyomi',
'kunyomi': 'kunyomi',
'hình ảnh gợi nhớ': 'memorable_img',
'cách ghi nhớ': 'remember_trick',
'trình độ': 'level',
'số nét': 'stroke_count',
'bộ phận cấu thành': 'constructs',
'ví dụ': 'examples'
}
def fromLabelToColumnTitle(label):
label = label.lower()
if label[-1] == ':':
label = label[:-1]
column_title = ''
if label in column_title_map.keys():
column_title = column_title_map[label]
return column_title
def getYomi(search_block, class_name='ony'):
yomis = []
yomi_elements = search_block.findAll('a', class_=class_name)
if yomi_elements is not None and len(yomi_elements) > 0:
for element in yomi_elements:
yomis.append(element.getText())
return yomis
def getExamples(search_block):
examples = []
ul_element = search_block.find('ul', class_='kanji-search-ul')
if ul_element is None:
return examples
li_elements = ul_element.findAll('li')
if li_elements is None or len(li_elements) == 0:
return examples
for element in li_elements:
examples.append(element.getText())
return examples
def getConstructs(search_block):
constructs = []
ul_element = search_block.find('ul', class_='kanji-search-ul')
if ul_element is None:
return constructs
li_elements = ul_element.findAll('li')
if len(li_elements) < 2 or li_elements is None:
return constructs
construct_elements = li_elements[1].findAll('span')
if construct_elements is None or len(construct_elements) == 0:
return constructs
for element in construct_elements:
text = element.getText()
hanviet = element.get('title')
constructs.append({
'text': text,
'hanviet': hanviet
})
return constructs
def getImage(search_block):
image_url = ''
image_element = search_block.find('img')
if image_element is not None:
image_url = image_element.get('src')
return image_url
def getText(search_block):
text = ''
p_element = search_block.find('p')
if p_element is not None:
text = p_element.getText()
return text
kanjis = []
for i in range(2, 4000):
data['id'] = i
response = requests.post(url, data= data, headers=headers)
response_json = json.loads(response.text)
response_content = response_json['Content']
if response_content == '':
continue
kanji_obj = {
'kanji_text': '',
'hanviet': '',
'onyomis': [],
'kunyomis': [],
'examples': [],
'constructs': [],
'image_url': '',
'mean': '',
'explain': '',
'remember_trick': '',
'level': '',
'stroke_count': ''
}
soup = BeautifulSoup(response_content, 'html.parser')
kanji_header = soup.find('div', class_='kanji-search-header')
if kanji_header is None:
continue
kanji_text_element = kanji_header.find('span', class_='qqq')
hanviet_element = kanji_header.find('span', class_='qqe')
if kanji_text_element is None or hanviet_element is None:
continue
kanji_obj['kanji_text'] = kanji_text_element.getText()
kanji_obj['hanviet'] = hanviet_element.getText()
kanji_search_blocks = soup.findAll('div', class_='kanji-search-block')
for block in kanji_search_blocks:
label = block.find('label')
if label is None:
continue
label_text = label.getText()
if label_text is None or label_text == '':
continue
column_title = fromLabelToColumnTitle(label_text)
if column_title == '':
continue
if column_title == 'onyomi':
kanji_obj['onyomis'] = getYomi(block, class_name='ony')
elif column_title == 'kunyomi':
kanji_obj['kunyomis'] = getYomi(block, class_name='kuny')
elif column_title == 'examples':
kanji_obj['examples'] = getExamples(block)
elif column_title == 'constructs':
kanji_obj['constructs'] = getConstructs(block)
elif column_title == 'memorable_img':
kanji_obj['image_url'] = getImage(block)
else:
kanji_obj[column_title] = getText(block)
kanjis.append(kanji_obj)
time.sleep(15)
with open('kanji.json', 'w', encoding='utf-8') as f:
json.dump(kanjis, f, ensure_ascii=False) |
12,518 | 56d9bc24789139292f4dc08edf656544a05de0d1 | from django.db import connection, transaction, utils
from core.utils.transform import from_dict_list_to_gen, from_csv_file_to_gen
from core.utils.csv_helpers import gen_to_csv
from django.conf import settings
from postgres_copy import CopyManager
from io import StringIO
import itertools
import csv
import random
import math
import os
import json
import logging
logger = logging.getLogger('app')
def execute(sql):
with connection.cursor() as curs:
try:
with transaction.atomic():
curs.execute(sql)
except Exception as e:
logger.error("Database - Execute error: {}".format(e))
def create_gen_from_csv_diff(original_file_path, new_file_path):
new_file = open(new_file_path, 'r')
new_reader = csv.reader(new_file, delimiter=',', quotechar='"', doublequote=True,
quoting=csv.QUOTE_ALL, skipinitialspace=True)
logger.debug(" * Beginning CSV diff process.")
# *** if you want to speed this up, open the file and put the original_reader into a List
# I'm not doing so because I don't have confidence that the server can handle 10+ million rows in Memory
# original_reader = list(csv.reader(open(original_file_path, 'r'))
cursor = 0
count = -1 # offset for headers
# iterate through each csv row
# for new_row in new_reader:
# # pass headers first
with open(new_file_path, 'r') as nf:
new_content = nf.readlines()
for new_row in new_content:
if count == -1:
count = count + 1
yield list(csv.reader(StringIO(new_row), delimiter=',', quotechar='"',
doublequote=True, quoting=csv.QUOTE_ALL, skipinitialspace=True))[0]
continue
found = False
# search for csv row in old file
# original_reader = csv.reader(open(original_file_path, 'r'), delimiter=',', quotechar='"',
# doublequote=True, quoting=csv.QUOTE_ALL, skipinitialspace=True)
# for original_row in original_reader:
#
with open(original_file_path, 'r') as of:
original_content = of.readlines()
for original_row in original_content:
if new_row == original_row:
found = True
break
cursor = cursor + 1
# if cursor % settings.BATCH_SIZE == 0:
logger.debug("Diff cursor at: {}".format(cursor))
if not found:
count = count + 1
if count % settings.BATCH_SIZE == 0:
logger.debug('Performed csv diff on {} records'.format(count))
yield list(csv.reader(StringIO(new_row), delimiter=',', quotechar='"',
doublequote=True, quoting=csv.QUOTE_ALL, skipinitialspace=True))[0]
def write_gen_to_temp_file(gen_rows):
temp_file_path = os.path.join(settings.MEDIA_TEMP_ROOT, str(
'set_diff' + str(random.randint(1, 10000000))) + '.mock' if settings.TESTING else '.csv')
headers = iter(next(gen_rows))
with open(temp_file_path, 'w') as temp_file:
writer = csv.writer(temp_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL, skipinitialspace=True)
writer.writerow(headers)
for row in gen_rows:
writer.writerow(row)
return temp_file_path
def seed_from_csv_diff(original_file_path, new_file_path, model, **kwargs):
"""
takes new file, filters it down in size, adds to Set()
takes old file, adds to Set()
saves to temporary file for read to avoid high memory usage
Diff Set() = New file Set() - Old file Set()
- preserves new records
- preserves altered/updated records
- removes duplicate, non updated records
seeds Diff Set() in batches
"""
original_diff_set = set()
new_diff_set = set()
new_file = open(new_file_path, 'r')
headers = new_file.readline().replace('\n', '').split(',')
new_reader = model.update_set_filter(csv.reader(new_file), headers)
original_file = open(original_file_path, 'r')
original_reader = csv.reader(original_file)
next(original_reader, None)
logger.debug(" * Beginning CSV diff process.")
for row in new_reader:
new_diff_set.add(json.dumps(row))
for row in original_reader:
original_diff_set.add(json.dumps(row))
diff = new_diff_set - original_diff_set
temp_file_path = os.path.join(settings.MEDIA_TEMP_ROOT, str(
'set_diff' + str(random.randint(1, 10000000))) + '.mock' if settings.TESTING else '.csv')
with open(temp_file_path, 'w') as temp_file:
writer = csv.writer(temp_file, delimiter=',')
writer.writerow(headers)
for row in diff:
writer.writerow(json.loads(row))
diff_gen = from_csv_file_to_gen(temp_file_path, kwargs['update'])
logger.debug(" * Csv diff completed, beginning batch upsert.")
batch_upsert_from_gen(model, diff_gen, settings.BATCH_SIZE, **kwargs)
if os.path.isfile(temp_file_path):
os.remove(temp_file_path)
if 'callback' in kwargs and kwargs['callback']:
kwargs['callback']()
def bulk_insert_from_file(model, file_path, **kwargs):
table_name = model._meta.db_table
logger.debug('creating temp csv with cleaned rows and seeding...')
# create new csv with cleaned rows
temp_file_extension = '.mock' if settings.TESTING else '.csv'
temp_file_path = os.path.join(settings.MEDIA_TEMP_ROOT, str(
'clean_csv_' + str(random.randint(1, 10000000))) + temp_file_extension)
update = kwargs['update'] if 'update' in kwargs else None
rows = model.transform_self_from_file(file_path, update=update)
logger.debug("writing temp file for {} at {}".format(table_name, temp_file_path))
gen_to_csv(rows, temp_file_path)
logger.debug("temp file complete for {}".format(table_name))
copy_file(model, file_path=temp_file_path, **kwargs)
if os.path.isfile(temp_file_path):
os.remove(temp_file_path)
if 'callback' in kwargs and kwargs['callback']:
kwargs['callback']()
def copy_file(model, file_path=None, **kwargs):
table_name = model._meta.db_table
with open(file_path, 'r') as file:
columns = file.readline().replace('"', '').replace('\n', '')
sql = copy_query(table_name, columns)
try:
copy_insert_from_csv(table_name, file_path, **kwargs)
except Exception as e:
logger.warning("Database - Bulk Import Error - beginning Batch seeding. Error: {}".format(e))
rows = from_csv_file_to_gen(file_path, kwargs['update'])
batch_upsert_from_gen(model, rows, settings.BATCH_SIZE, **kwargs)
def copy_insert_from_csv(table_name, temp_file_path, **kwargs):
with open(temp_file_path, 'r') as temp_file:
columns = temp_file.readline().replace('"', '').replace('\n', '')
sql = copy_query(table_name, columns)
with transaction.atomic():
if 'overwrite' in kwargs and kwargs['overwrite']:
logger.debug('Overwriting table...')
connection.cursor().execute('DELETE FROM {};'.format(table_name))
logger.debug("* Beginning Bulk CSV copy.")
connection.cursor().copy_expert(sql, temp_file)
logger.debug(" * Bulk CSV copy completed successfully.")
if 'update' in kwargs and kwargs['update']:
reader = csv.reader(open(temp_file_path, 'r'))
next(reader, None) # skip headers
kwargs['update'].rows_created = sum(1 for row in reader)
kwargs['update'].save()
if os.path.isfile(temp_file_path):
os.remove(temp_file_path)
def upsert_query(table_name, row, primary_key, ignore_conflict=False):
fields = ', '.join(row.keys())
upsert_fields = ', '.join([k + "= EXCLUDED." + k for k in row.keys()])
placeholders = ', '.join(["%s" for v in row.values()])
conflict_action = "DO NOTHING" if ignore_conflict else "DO UPDATE SET {}".format(upsert_fields)
sql = "INSERT INTO {table_name} ({fields}) VALUES ({values}) ON CONFLICT ({primary_key}) {conflict_action};"
return sql.format(table_name=table_name, fields=fields, values=placeholders, primary_key=primary_key, conflict_action=conflict_action)
def insert_query(table_name, row):
fields = ', '.join(row.keys())
placeholders = ', '.join(["%s" for v in row.values()])
sql = "INSERT INTO {table_name} ({fields}) VALUES ({values})"
return sql.format(table_name=table_name, fields=fields, values=placeholders)
def update_query(table_name, row, primary_key):
fields = ', '.join(['{key} = %s'.format(key=key) for key in row.keys()])
keys = ' AND '.join(['{key} = %s'.format(key=key) for key in primary_key.split(', ')])
sql = 'UPDATE {table_name} SET {fields} WHERE({pk});'
return sql.format(table_name=table_name, fields=fields, pk=keys)
def copy_query(table_name, columns):
return 'COPY {table_name} ({fields}) FROM STDIN WITH (format csv)'.format(table_name=table_name, fields=columns)
def build_row_values(row):
t_row = tuple(row.values())
return tuple(None if x == '' else x for x in t_row)
def build_pkey_tuple(row, pkey):
tup = tuple()
for key in pkey.split(', '):
tup = tup + (row[key],)
return tup
def batch_upsert_from_gen(model, rows, batch_size, **kwargs):
table_name = model._meta.db_table
update = kwargs['update'] if 'update' in kwargs else None
ignore_conflict = kwargs['ignore_conflict'] if 'ignore_conflict' in kwargs else None
with connection.cursor() as curs:
try:
count = 0
while True:
batch = list(itertools.islice(rows, 0, batch_size))
if len(batch) == 0:
logger.info("Database - Batch upserts completed for {}.".format(model.__name__))
if 'callback' in kwargs and kwargs['callback']:
kwargs['callback']()
break
else:
with transaction.atomic():
logger.debug("Seeding next batch for {}.".format(model.__name__))
batch_upsert_rows(model, batch, batch_size, update=update, ignore_conflict=ignore_conflict)
count = count + batch_size
logger.debug("Rows touched: {}".format(count))
except Exception as e:
logger.warning("Unable to batch upsert: {}".format(e))
raise e
# No Conflict = True means DO NOTHING on conflict. False means update on conflict.
def batch_upsert_rows(model, rows, batch_size, update=None, ignore_conflict=False):
table_name = model._meta.db_table
primary_key = model._meta.pk.name
""" Inserts many row, all in the same transaction"""
rows_length = len(rows)
with connection.cursor() as curs:
try:
starting_count = model.objects.count()
with transaction.atomic():
curs.executemany(upsert_query(table_name, rows[0], primary_key, ignore_conflict=ignore_conflict), tuple(
build_row_values(row) for row in rows))
if update:
rows_created = model.objects.count() - starting_count
update.rows_created = update.rows_created + rows_created
update.rows_updated = update.rows_updated + (rows_length - rows_created)
update.save()
except Exception as e:
logger.info('Database - error upserting rows. Doing single row upsert. - Error: {}'.format(e))
upsert_single_rows(model, rows, update=update, ignore_conflict=ignore_conflict)
def upsert_single_rows(model, rows, update=None, ignore_conflict=False):
table_name = model._meta.db_table
primary_key = model._meta.pk.name
rows_created = 0
rows_updated = 0
for row in rows:
try:
with connection.cursor() as curs:
with transaction.atomic():
curs.execute(upsert_query(table_name, row, primary_key, ignore_conflict=ignore_conflict),
build_row_values(row))
rows_updated = rows_updated + 1
rows_created = rows_created + 1
if rows_created % settings.BATCH_SIZE == 0:
logger.debug("{} - seeded {}".format(table_name, rows_created))
if update:
update.rows_created = update.rows_created + rows_created
update.rows_updated = update.rows_updated + rows_created
update.save()
rows_updated = 0
rows_updated = 0
except Exception as e:
logger.error("Database Error * - unable to upsert single record. Error: {}".format(e))
continue
if update:
update.rows_created = update.rows_created + rows_created
update.rows_updated = update.rows_updated + rows_created
update.save()
# https://djangosnippets.org/snippets/1400/
import time
import traceback
import logging
import sys
class Status(object):
def __init__(self):
self.num_successful = 0
self.failed_ids = []
self.done = False
self.cur_idx = 0
def __repr__(self):
return u'<Status: %s/%s, %s failed>' % (
getattr(self, 'cur_idx', '-'),
getattr(self, 'total', '-'),
self.num_failed)
@property
def num_failed(self): return len(self.failed_ids)
def start(self):
self.start_time = time.time()
def finished(self):
self.cur_idx = self.total
self.done = True
self.end_time = time.time()
@property
def rate(self):
if self.done:
end_time = self.end_time
else:
end_time = time.time()
return self.cur_idx / (end_time - self.start_time)
@property
def time_left(self):
rate = self.rate
if rate == 0:
return 0
return (self.total - self.cur_idx) / self.rate
def progress_callback(status):
message = '%d/%d failed=%d, rate~%.2f per second, left~%.2f sec \r' % (
status.cur_idx, status.total, status.num_failed, status.rate, status.time_left)
if status.done:
message = "DONE! - {}".format(message)
print(message)
logger.debug(message)
else:
message = "Progress - {}".format(message)
print(message)
logger.debug(message)
def queryset_foreach(queryset, f, batch_size=1000,
progress_callback=progress_callback, transaction=True):
'''
Call a function for each element in a queryset (actually, any list).
Features:
* stable memory usage (thanks to Django paginators)
* progress indicators
* wraps batches in transactions
* can take managers or even models (e.g., Assertion.objects)
* warns about DEBUG.
* handles failures of single items without dying in general.
* stable even if items are added or removed during processing
(gets a list of ids at the start)
Returns a Status object, with the following interesting attributes
total: number of items in the queryset
num_successful: count of successful items
failed_ids: list of ids of items that failed
'''
from django.conf import settings
if settings.DEBUG:
logger.debug('Warning: DEBUG is on. django.db.connection.queries may use up a lot of memory.')
# Get querysets corresponding to managers
from django.shortcuts import _get_queryset
queryset = _get_queryset(queryset)
# Get a snapshot of all the ids that match the query
logger.debug('qs4e: Getting list of objects')
ids = list(queryset.values_list(queryset.model._meta.pk.name, flat=True))
# Initialize status
status = Status()
status.total = len(ids)
def do_all_objects(objects):
from django.db import transaction
with transaction.atomic():
for id, obj in objects.items():
try:
f(obj)
status.num_successful += 1
except Exception as e: # python 2.5+: doesn't catch KeyboardInterrupt or SystemExit
logger.error(e)
status.failed_ids.append(id)
# if transaction:
# # Wrap each batch in a transaction
# with transaction.atomic():
# do_all_objects = transaction.commit_on_success(do_all_objects)
from django.core.paginator import Paginator
paginator = Paginator(ids, batch_size)
status.start()
progress_callback(status)
for page_num in paginator.page_range:
status.page = page = paginator.page(page_num)
status.cur_idx = page.start_index() - 1
progress_callback(status)
objects = queryset.in_bulk(page.object_list)
do_all_objects(objects)
status.finished()
progress_callback(status)
return status
|
12,519 | 8dc998e73a58ec1e1e41163e107e020ae7209419 | #!/usr/bin/env python3
"""
Calculate gene lengths for each gene from an annotation
"""
__author__ = "Scott Teresi"
import argparse
import os
import numpy as np
import pandas as pd
import sys
from gene_lengths import import_genes
from count_matrix import import_count_matrix
from fpkm import calc_fpkm
def process(gene_annotation, count_matrix, selection, output_dir):
ids_and_exon_lengths = import_genes(gene_annotation)
counts = import_count_matrix(count_matrix, selection)
merged_data = pd.merge(ids_and_exon_lengths, counts, on="GSVIV")
merged_data.drop(columns=["Chromosome", "PAC_ID"], inplace=True)
merged_data.set_index("GSVIV", inplace=True)
lengths = merged_data.Total_Exon_Length
merged_data.drop(columns=["Total_Exon_Length"], inplace=True)
fpkm_vals = calc_fpkm(merged_data, lengths)
fpkm_vals = pd.DataFrame(fpkm_vals, columns=merged_data.columns)
fpkm_vals.set_index(merged_data.index, inplace=True)
fpkm_vals.to_csv(
os.path.join(output_dir, str(selection + "_FPKM_Out.tsv")), sep="\t"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="calculate gene lengths")
path_main = os.path.abspath(__file__)
parser.add_argument("genes_input_file", type=str, help="parent path of gene file")
parser.add_argument("count_matrix", type=str, help="parent path of counts file")
parser.add_argument(
"selection", type=str, help="type of counts file (specific to grape analyses"
)
parser.add_argument(
"--output_dir",
"-o",
type=str,
default=os.path.join(path_main, "../../../../", "Grape_Data/Results"),
help="parent directory to output results",
)
args = parser.parse_args()
args.genes_input_file = os.path.abspath(args.genes_input_file)
args.count_matrix = os.path.abspath(args.count_matrix)
args.selection = str(args.selection)
args.output_dir = os.path.abspath(args.output_dir)
process(args.genes_input_file, args.count_matrix, args.selection, args.output_dir)
|
12,520 | c3327320e2500058ce587752752b906c26a23a39 | """
Configuration file for the application, sets key parameters
Variables denotes with (*) at the end of the corresponding comment must be set before running the program
"""
from common import secrets
import logging, os
# REQUIRED
DATA_PATH = '/home/martmichals/stockwatch/app/data/' # Path for data folder *
COMPANIES_OF_INTEREST = {'NOC': 'Norhtrop Grumman'} # Stock tickers of interest, no more than 450 *
# Script info
TAG = 'config.py -'
LOG_FILEPATH = '/var/tmp/stockwatch.log'
MODES = ['production', 'debug']
# News data path and limit
NEWS_PATH = DATA_PATH + '/news/'
NEWS_DAILY_LIMIT = 500
# File path for stock data folder
STOCK_PATH = DATA_PATH + '/stocks/'
# People for which news is pulled - market "influencers"
PEOPLE = [
'Warren Buffet',
'Bill McNabb',
'Jamie Dimon',
'Lloyd Blankfein',
'Larry Fink',
'Carl Icahn',
'Sergio Ermotti',
'Jeffery Gundlach',
'John Stumpf']
# Indecies for which statistics are pulled
COUNTRIES = ['us', 'ch', 'ru'] # Countries for which top headlines are pulled
# Function to check that the configuration parameters are properly instantiated
def check_config():
if len(COMPANIES_OF_INTEREST.keys()) == 0:
logging.exception('Improper instantiation of COMPANIES_OF_INTEREST')
elif not os.path.isdir(DATA_PATH):
logging.exception('Improper instantiation of DATA_PATH')
else:
if len(secrets.NEWS_API_KEY) is 0:
logging.exception('%s The news API key is empty, please fill before re-running', TAG)
elif len(secrets.FINNHUB_API_KEY) is 0:
logging.exception('%s The finnhub API key is empty, please fill before re-running', TAG)
else:
return True
raise ValueError('Key should not be empty')
raise RuntimeError('Improper config.py parameters')
# Function that configures everything required for the program to run
def configure(mode):
open(LOG_FILEPATH, 'w').close()
if mode == 'debug':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG, filename=LOG_FILEPATH)
elif mode == 'production':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.WARNING, filename=LOG_FILEPATH)
else:
raise RuntimeError('Incorrect mode passed as an argument')
logging.getLogger().addHandler(logging.StreamHandler())
check_config()
# Initialize folders for news and stocks
if not os.path.isdir(NEWS_PATH):
os.mkdir(NEWS_PATH)
if not os.path.isdir(STOCK_PATH):
os.mkdir(STOCK_PATH)
logging.info('%s configure() completed successfully', TAG)
|
12,521 | 24951f807ec2a7747bb3f7cf04ffd6f93c4920b2 | #################################
# Twitter Wiggler: Twitter Plays With Bug The Cat
# v1.0
# python3 <--- remember that for print statements!
# Nadine Lessio and Bijun Chen
#
# Some Limitations:
# --> This only works if your account is public
#################################
### IMPORTS #########################################################################
import random, sys, json, datetime, re, os, unicodedata, logging, time, struct, requests
import serial
from random import choice
from time import sleep
### twython ###
from twython import Twython, TwythonError
from twython import TwythonStreamer
### LOGGING ##########################################
logging.basicConfig(filename='twitterWarn.log',level=logging.INFO)
### SERIAL ############################################
try:
ser = serial.Serial('/dev/cu.usbmodem1421', 9600, timeout=0) ## open port
time.sleep(2) ## sleep two seconds
print("Connection to arduino established succesfully!\n") ## signal success
except Exception as e:
print(e)
#sys.exit() ## exit the program if there is an error
### GET KEYS #########################################################################
### local ###
### try and find a way to do os.environ things locally
keys = []
with open('keys.txt','r') as my_file:
keys = my_file.read().splitlines()
APP_KEY = keys[0]
APP_SECRET = keys[1]
OAUTH_TOKEN = keys[2]
OAUTH_TOKEN_SECRET = keys[3]
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
twitter.verify_credentials()
### CAT STREAMER TRACKER ###############################################################
class catStream(TwythonStreamer):
""" Twitter Streamer Class """
## can be accessed by all methods in the class
numberOfReplies = 0 # number of replies
topLimit = 20 # setting this to 20 but could be like 100 or 1000
def on_success(self, data):
if 'text' in data:
## get your data as a str
body = data['text']
user = data['user']['screen_name']
## set up some specific things to track
acct = "@smolcatbug"
if body.startswith(acct): # if it is an @reply not a mention
bodyStrip = re.sub(r'[^\w\s]','',body.lower()) # strip everything but words
self.numberOfReplies+=1 # update the counter by 1
if not self.numberOfReplies % 2: # update the position every other tweet
print("multiple of two:",self.numberOfReplies)
ser.write(struct.pack('>B',5))
## this will only find the first incident of each word. whatever comes first. ok with that.
## particular comands will not increase position.
if "faster" in bodyStrip:
print("faster")
ser.write(struct.pack('>B', 3)) # any one can increase servo speed
elif "slower" in bodyStrip:
print("slower")
ser.write(struct.pack('>B', 4)) # anyone can decrease servo speed
elif "stop" in bodyStrip and user == "smolcatbug":
print("stop")
ser.write(struct.pack('>B', 7)) # only smolcatbug can stop the servo
elif "go" in bodyStrip and user == "smolcatbug":
print("go")
ser.write(struct.pack('>B', 2)) # only smolcatbug can start the servo
else:
print("no specific commands")
## This is to basically manipulate people to keep interacting with it.
## it also puts in some natural high and low movements into the toy
if self.numberOfReplies >= self.topLimit:
self.numberOfReplies = self.topLimit # keep it high.
self.hard_reset() # start winding it down ?
print("!on_success: ", self.numberOfReplies)
def hard_reset(self):
""" reset everything """
ser.write(struct.pack('>B', 8))
self.numberOfReplies = 0; # reset the reply number
self.windDownCount = 3 # reset the wind down count
def on_error(self, status_code, data):
""" Print The Error Code """
print("error_code: ",status_code)
cStream = catStream(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
cStream.statuses.filter(track="@smolcatbug") # only works if you are public
|
12,522 | acbb4b1a27da30337fe6b382412722db63ff1095 | try:
import cv2
_available = True
except ImportError:
_available = False
def resize(x, output_shape):
"""Resize image to match the given shape.
A bilinear interpolation is used for resizing.
Args:
x (~numpy.ndarray): array to be transformed. This is in CHW format.
output_shape (tuple): this is a tuple of length 2. Its elements are
ordered as (height, width).
Returns:
~numpy.ndarray: a resize array
"""
if not _available:
raise ValueError('cv2 is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install OpenCV.\n\n Under Anaconda '
' environment, you can install it by '
'$ conda install -c menpo opencv=2.4.11\n')
H, W = output_shape
x = x.transpose(1, 2, 0)
x = cv2.resize(x, dsize=(W, H))
x = x.transpose(2, 0, 1)
return x
|
12,523 | 8c8703a91657e8a9a4c78a96dd4ff7f166cee3a3 | import random as r
class sim:
def __init__(self):
self._simclock = None
self.t_arrival = self.g_packet()
self.t_depart = float('inf')
self.t_service = 0
self._maxque = None
self.cqs = 0
self.sstatus = 0
self.nodes = None
self.npdrop = 0
self.n_depart = 0
self.t_event = 0
self.n_arrival = 0
self.tmp_time = self.g_service()
#def sch(self):
# print(1)
def a_event(self):
self.sstatus += 1
self.n_arrival += 1
if self.sstatus <= 1:
self.temp1 = self.g_service()
print(">>>>Service time>>>>",self.temp1)
self.tmp_time=self.temp1
self.t_depart = self.simclock + self.temp1
self.t_arrival = self.simclock + self.g_packet()
def d_event(self):
self.sstatus -=1
self.n_depart += 1
if self.sstatus > 0:
self.temp2=self.g_service()
print(">>>>Service time<<<<",self.temp2)
self.tmp_time = self.temp2
self.t_depart = self.simclock + self.temp2
else:
self.t_depart = float('inf')
def u_clock(self):
self.t_event = min(self.t_arrival,self.t_depart)
self.simclock = self.t_event
print("event time:",self.simclock)
print("arrival time:",self.t_arrival,"departure time:",self.t_depart)
print('---------------------------')
def event_type(self):
if self.t_arrival <= self.t_depart:
self.a_event()
else:
self.d_event()
def g_service(self):
return round(r.uniform(0,2),2)
def g_packet(self):
return round(r.uniform(0,2),2)
def s_nq(self):
if self.nodes is None:
while self.nodes is None:
self.nodes = input("Insert number of nodes:")
try:
self.nodes = int(self.nodes)
except:
self.nodes = None
print("Insert valid integer!")
if self._maxque is None:
while self._maxque is None:
self._maxque = input("Insert maximum que size:")
print('---------------------------')
try:
self._maxque = int(self._maxque)
except:
self._maxque = None
print("Insert valid integer!")
return self.nodes
@property
def simclock(self):
return self._simclock
@simclock.setter
def simclock(self,clock):
self._simclock = clock
def ssc(self):
print("sstatus number",self.sstatus)
if __name__ == "__main__":
a = sim()
x = a.s_nq()
for i in range(0,x):
#a.sch()
a.ssc()
a.u_clock()
a.event_type()
|
12,524 | fab9eee98d580491419eb50c6a58befed29be431 | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class Review(models.Model):
product = models.ForeignKey('products.Product', null=True, blank=True, on_delete=models.SET_NULL)
review_rating = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(5)],
null=False, blank=False)
user = models.CharField(max_length=254, null=False, blank=False, default="User not found")
review_content = models.TextField(default="")
def get_friendly_name(self):
return self.review_rating
def get_review_content(self):
return self.review_content
|
12,525 | 9048a7e18ecec0f2ac2334af017db6b12e2c4616 | from rest_framework import serializers
from ..models import Models, Trim, ModelsShown, Accessories, Location, Colours, Interiors, Engines, Gallery, Transmissions, Features, InteriorColour, ExteriorColour
class TransmissionsSerializer(serializers.ModelSerializer):
class Meta:
model = Transmissions
fields = ('id', 'name', 'abberviation', 'selected')
class FeaturesSerializer(serializers.ModelSerializer):
class Meta:
model = Transmissions
fields = ('id', 'name',)
class ColoursSerializer(serializers.ModelSerializer):
class Meta:
model = Colours
fields = '__all__'
class InteriorsSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = Interiors
fields = ('id', 'name', 'url', 'path', 'link')
extra_kwargs = {
'url': {
'required': False,
}
}
def get_url_url(self, obj):
return obj.url.url
class EnginesSerializer(serializers.ModelSerializer):
class Meta:
model = Engines
fields = ('id', 'name', 'twoLiter', 'onePointFiveLiter', 'hp', 'torque', 'displacement', 'emissionsRating', 'boreAndStroke', 'compression', 'driveByWire', 'ecoAssis', 'recommendedFueld')
class InteriorsImageSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = Interiors
fields = ('url', 'path', 'md5')
extra_kwargs = {
'url': {
'required': False,
}
}
def get_url_url(self, obj):
return obj.url.url
class ExteriorColourSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = ExteriorColour
fields = ('id', 'colour', 'path', 'selected')
def get_url_url(self, obj):
return obj.url.url
class ExteriorColourImageSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = ExteriorColour
fields = ('url', 'path', 'md5')
extra_kwargs = {
'url': {
'required': False,
}
}
def get_url_url(self, obj):
return obj.url.url
class InteriorColourSerializer(serializers.ModelSerializer):
class Meta:
model = InteriorColour
fields = ('id', 'colour', 'selected')
class TrimNSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
engine = EnginesSerializer(read_only=True)
colour = ColoursSerializer(read_only=True)
interiors = InteriorsSerializer(read_only=True, many=True)
# model = ModelsSerializer(read_only=True)
transmission = TransmissionsSerializer(read_only=True, many=True)
features = FeaturesSerializer(read_only=True, many=True)
exteriorColours = ExteriorColourSerializer(read_only=True, many=True)
interiorColours = InteriorColourSerializer(read_only=True, many=True)
class Meta:
model = Trim
# fields = ('id', 'model', 'name', 'url', 'link', 'heading', 'description', 'features', 'engine', 'colour', 'interiors', 'transmission_type', 'price', 'fuel_city', 'fuel_highway', 'fuel_combined')
# fields = ('id', 'name', 'base_price', 'engine', 'transmission', 'features', 'highlights', 'exteriorColours', 'interiorColours', 'url', 'path', 'link', 'heading', 'description', 'colour', 'interiors', 'model', 'fuel_city', 'fuel_highway', 'fuel_combined')
fields = ('id', 'name', 'base_price', 'engine', 'transmission', 'features', 'highlights', 'exteriorColours', 'interiorColours', 'url', 'path', 'link', 'heading', 'description', 'colour', 'interiors', 'fuel_city', 'fuel_highway', 'fuel_combined')
extra_kwargs = {
'url': {
'required': False,
}
}
depth = 1
def get_url_url(self, obj):
return obj.url.url
class GallerySerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
# vehicle = ModelsSerializer(read_only=True)
class Meta:
model = Gallery
fields = ('id', 'url', 'path')
def get_url_url(self, obj):
return obj.url.url
class ModelsSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
trims = TrimNSerializer(many=True)
gallery = GallerySerializer(many=True)
class Meta:
model = Models
# fields = ('id', 'gallery', 'path', 'trims')
fields = ('id', 'trims', 'name', 'year', 'subhead', 'url', 'path', 'link', 'disclaimer', 'base_price', 'freight_DPI', 'special_offers', 'line1', 'line2', 'percentage', 'price', 'gallery',)
# fields = '__all__'
depth = 1
def get_url_url(self, obj):
return obj.url.url
class ModelsImageSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = Models
fields = ('url', 'path', 'md5')
extra_kwargs = {
'url': {
'required': False,
}
}
def get_url_url(self, obj):
return obj.url.url
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = '__all__'
class AccessoriesSerializer(serializers.ModelSerializer):
class Meta:
model = Accessories
fields = ('id', 'name', 'base_price')
depth = 1
class TrimSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
engine = EnginesSerializer(read_only=True)
colour = ColoursSerializer(read_only=True)
interiors = InteriorsSerializer(read_only=True, many=True)
# model = ModelsSerializer(read_only=True)
transmission = TransmissionsSerializer(read_only=True)
features = FeaturesSerializer(read_only=True, many=True)
exteriorColours = ExteriorColourSerializer(read_only=True, many=True)
interiorColours = InteriorColourSerializer(read_only=True, many=True)
class Meta:
model = Trim
# fields = ('id', 'model', 'name', 'url', 'link', 'heading', 'description', 'features', 'engine', 'colour', 'interiors', 'transmission_type', 'price', 'fuel_city', 'fuel_highway', 'fuel_combined')
fields = ('id', 'name', 'base_price', 'engine', 'transmission', 'features', 'highlights', 'exteriorColours', 'interiorColours', 'url', 'path', 'link', 'heading', 'description', 'colour', 'interiors', 'fuel_city', 'fuel_highway', 'fuel_combined')
extra_kwargs = {
'url': {
'required': False,
}
}
# depth = 1
def get_url_url(self, obj):
return obj.url.url
class TrimImageSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = Trim
fields = ('url', 'path', 'md5')
extra_kwargs = {
'url': {
'required': False,
}
}
def get_url_url(self, obj):
return obj.url.url
class ModelsShownSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
location = LocationSerializer(read_only=True, many=True)
vehicle = ModelsSerializer(read_only=True)
trim = TrimSerializer(read_only=True)
accessory = AccessoriesSerializer(read_only=True, many=True)
class Meta:
model = ModelsShown
fields = ('id', 'vehicle', 'trim', 'url', 'path', 'link', 'disclaimer', 'wheels', 'drivetrain', 'accessory', 'price_override', 'location')
extra_kwargs = {
'url': {
'required': False,
}
}
# depth = 1
def get_url_url(self, obj):
return obj.url.url
class ModelsShownImageSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = ModelsShown
fields = ('url', 'path', 'md5')
extra_kwargs = {
'url': {
'required': False,
}
}
def get_url_url(self, obj):
return obj.url.url
class GalleryImageSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField('get_url_url')
class Meta:
model = Gallery
fields = ('url', 'path', 'md5')
extra_kwargs = {
'url': {
'required': False,
}
}
def get_url_url(self, obj):
return obj.url.url
|
12,526 | cef53e2254e0d07531d05d83aaeb0cbea3cc5d29 | import contextlib
from typing import Dict
from urllib.parse import urlparse
from azure.identity import ClientSecretCredential
from chaoslib.exceptions import InterruptExecution
@contextlib.contextmanager
def auth(secrets: Dict) -> ClientSecretCredential:
"""
Create Azure authentication client from a provided secrets.
Service principle and token based auth types are supported. Token
based auth do not currently support refresh token functionality.
Type of authentication client is determined based on passed secrets.
For example, secrets that contains a `client_id`, `client_secret` and
`tenant_id` will create ServicePrincipalAuth client
```python
{
"client_id": "AZURE_CLIENT_ID",
"client_secret": "AZURE_CLIENT_SECRET",
"tenant_id": "AZURE_TENANT_ID"
}
```
If you are not working with Public Global Azure, e.g. China Cloud
you can provide `msrestazure.azure_cloud.Cloud` object. If omitted the
Public Cloud is taken as default. Please refer to msrestazure.azure_cloud
```python
{
"client_id": "xxxxxxx",
"client_secret": "*******",
"tenant_id": "@@@@@@@@@@@",
"cloud": "msrestazure.azure_cloud.Cloud"
}
```
Using this function goes as follows:
```python
with auth(secrets) as cred:
subscription_id = configuration.get("subscription_id")
resource_client = ResourceManagementClient(cred, subscription_id)
compute_client = ComputeManagementClient(cred, subscription_id)
```
Again, if you are not working with Public Azure Cloud,
and you set azure_cloud in secret,
this will pass one more parameter `base_url` to above function.
```python
with auth(secrets) as cred:
cloud = cred.get('cloud')
client = ComputeManagementClient(
credentials=cred, subscription_id=subscription_id,
base_url=cloud.endpoints.resource_manager)
```
"""
try:
credential = ClientSecretCredential(
tenant_id=secrets.get('tenant_id'),
client_id=secrets.get('client_id'),
client_secret=secrets.get('client_secret'),
authority=urlparse(secrets.get('cloud').endpoints.active_directory).hostname
)
except ValueError as e:
raise InterruptExecution(str(e))
yield credential
|
12,527 | c4f0515daeb2d114a146550adf50a68c04e9bd10 | from elixir import *
from models.medline import *
import time, datetime
import pprint
import networkx as nx
import numpy
import itertools
metadata.bind = 'mysql+oursql://caopsci:G@localhost/medline'
setup_all()
level = 3
for year in range(1987,2014):
start = datetime.datetime(year,1,1)
end = datetime.datetime(year+1,1,1)
year_query = Citation.query.filter(Citation.date_created>=start,
Citation.date_created<=end)
G = nx.Graph()
for cit in year_query.all():
msh_branch = {}
try:
for msh in cit.meshterms:
for branch in msh.term.branches:
leaves = branch.branch.split('.')
if len(leaves) >= level:
truncated = '.'.join( leaves[0:level] )
msh_branch[msh.term] = truncated
for pair in itertools.combinations( cit.meshterms, 2 ):
key_s = msh_branch[pair[0].term]
key_t = msh_branch[pair[1].term]
e = G.get_edge_data(key_s, key_t)
if not e:
G.add_edge(key_s, key_t, weight=1)
else:
G.add_edge(key_s, key_t, weight=e['weight']+1)
except:
pass
weights = []
for e in G.edges():
weights.append(float(G.get_edge_data(e[0], e[1])['weight']))
arr = numpy.array(weights)
with open('branch_nx_l'+str(level)+'_'+str(year)+'.csv', 'w') as f:
for e in G.edges():
z = ( float(G.get_edge_data(e[0], e[1])['weight']) /arr.max()) * 100
f.write("%s,%s,%s\n" % (e[0], e[1], z))
|
12,528 | 37622f64569bfa998980a3ac8a8358a1c1c6728c | """
NT2 Kombinacio
Mester / Halado / Kombinatorikai algoritmusok / 48. Kombinacio
"""
from sys import stdin, stdout
n,m = tuple(map(int,stdin.readline()[:-1].split(" ")))
A = list(map(int,stdin.readline()[:-1].split(" ")))
Aprev = A[:]
if m==n:
for i in range(m):
stdout.write("{} ".format(A[i]))
stdout.write("\n")
for i in range(m):
stdout.write("{} ".format(A[i]))
else:
if A[m-1]==m:
for i in range(m):
stdout.write("{} ".format(A[i]+n-m))
stdout.write("\n")
A[m-1]=m+1
for i in range(m):
stdout.write("{} ".format(A[i]))
else:
if A[0]==n-m+1:
A[0]-=1
for i in range(m):
stdout.write("{} ".format(A[i]))
stdout.write("\n")
A[0]+=1
for i in range(m):
stdout.write("{} ".format(A[i]-n+m))
else:
#prev
if Aprev[m-1]-1==Aprev[m-2]:
Aprev[m-1]=n
for i in range(m-2,0,-1):
if Aprev[i]-1!=Aprev[i-1]:
Aprev[i]-=1
for j in range(m-2,i,-1):
Aprev[j]=Aprev[j+1]-1
break
else:
Aprev[m-1]-=1
for i in range(m):
stdout.write("{} ".format(Aprev[i]))
stdout.write("\n")
#next:
if A[m-1]==n:
for i in range(m-2,-1,-1):
if A[i]+1!=A[i+1]:
A[i]+=1
for j in range(i+1,m):
A[j]=A[j-1]+1
break
else:
A[m-1]+=1
for i in range(m):
stdout.write("{} ".format(A[i]))
|
12,529 | 14c2c9b6ff63ec66a44806cb14813f20e3c09947 | import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import sys
from scipy.ndimage import rotate
'''
dat=fits.open('psf1.fits')
#21theta 21phi 5energies 1defocus 256x256 pixels
image=dat[0].data[10][20][1][0]
plt.imshow(image,origin='lower')
plt.show()
sys.exit()
fig = plt.figure(figsize=(21,21))
fig.subplots_adjust(hspace=0, wspace=0)
k=1
for i in range(21):
for j in range(21):
print(i,j)
ax=fig.add_subplot(21,21,k)
k+=1
image=dat[0].data[i][j][0][0]
plt.imshow(image,origin='lower')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
#ax.text(125, 200, str((i,j)),fontsize=10, color='w', ha='center')
plt.tight_layout()
plt.savefig('psf.png',format='png')
dat.close()
'''
theta=9
phi=90
roll=270
phi_eq=phi
elev=theta*np.sin(phi_eq*np.pi/180.)
azim=theta*np.cos(phi_eq*np.pi/180.)
ii=10+int(round(elev))
jj=10+int(round(azim))
dat=fits.open('psf1_rebinned.fits')
#21theta 21phi 5energies 1defocus 256x256 pixels
image=dat[0].data[ii][jj][1][0]
#image2=dat[0].data[ii][jj][3][0]
plt.imshow(image,origin='lower')
plt.show()
#plt.imshow(image2,origin='lower')
#plt.show()
'''
#data_orig = misc.face()
data_orig = image
x0,y0 = 128,128 # left eye; (xrot,yrot) should point there
def rot(image, xy, angle):
im_rot = rotate(image,angle)
org_center = (np.array(image.shape[:2][::-1])-1)/2.
rot_center = (np.array(im_rot.shape[:2][::-1])-1)/2.
org = xy-org_center
a = np.deg2rad(angle)
new = np.array([org[0]*np.cos(a) + org[1]*np.sin(a),
-org[0]*np.sin(a) + org[1]*np.cos(a) ])
return im_rot, new+rot_center
plt.imshow(data_orig,origin='lower')
plt.show()
data_rot, (x1,y1) = rot(data_orig, np.array([x0,y0]), roll)
print(data_rot.shape)
'''
im_rot = rotate(image,roll)
plt.imshow(im_rot,origin='lower')
plt.show()
'''
plt.imshow(image,origin='lower')
plt.show()
larger_image=np.zeros((400,400))
n=0
for jj in range(72,328):
m=0
for ii in range(72,328):
larger_image[ii][jj]=image[m][n]
m=m+1
n=n+1
plt.imshow(larger_image,origin='lower')
plt.show()
alfa=roll
r=np.array([[np.cos(alfa*np.pi/180.),-np.sin(alfa*np.pi/180.)],[np.sin(alfa*np.pi/180.),np.cos(alfa*np.pi/180.)]])
newimage=np.zeros_like(larger_image)
plt.imshow(newimage,origin='lower')
plt.show()
n=0
for jj in range(72,328):
m=0
for ii in range(72,328):
x=r[0][0]*(ii-127.5)+r[0][1]*(127.5-jj)+127.5
y=r[1][0]*(ii-127.5)+r[1][1]*(jj-127.5)+127.5
#newimage[ii][jj]=image[m][n]
if (int(round(x)) < 256) and (int(round(y)) < 256):
newimage[ii][jj]=image[int(round(x))][int(round(y))]
m=m+1
n=n+1
for i in range(len(larger_image)):
for j in range(len(larger_image[i])):
x=r[0][0]*(i-200)+r[0][1]*(200-j)+200
y=r[1][0]*(i-200)+r[1][1]*(j-200)+200
#if (int(round(x)) < 256) and (int(round(y)) < 256):
# print(int(round(x)),int(round(y)))
newimage[i][j]=larger_image[int(round(x))][int(round(y))]
plt.imshow(newimage,origin='lower')
plt.show()
'''
'''
for i, angle in enumerate([66,-32,90]):
data_rot, (x1,y1) = rot(data_orig, np.array([x0,y0]), angle)
axes.flatten()[i+1].imshow(data_rot,origin='lower')
axes.flatten()[i+1].scatter(x1,y1,c="r" )
axes.flatten()[i+1].set_title("Rotation: {}deg".format(angle))
plt.show()
'''
|
12,530 | 4834b72efa37039d04033b5ff6cc1d3f02508f20 | from __future__ import unicode_literals
from django.db import models
class User(models.Model):
name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add = True)
admin = models.IntegerField(default = 0)
rank = models.IntegerField(default = 0)
rank_amrap = models.IntegerField(default = 0)
class Wod(models.Model):
created_at = models.DateTimeField(auto_now_add = True)
title = models.CharField(max_length=100)
description = models.TextField()
user = models.ForeignKey(User, related_name = "Wods")
style = models.CharField(max_length=50)
class Score(models.Model):
created_at = models.DateTimeField(auto_now_add = True)
amrap_score = models.IntegerField()
string = models.CharField(max_length = 30)
timed_score = models.IntegerField()
wod = models.ForeignKey(Wod, related_name = "wods_score")
user = models.ForeignKey(User, related_name = "user_score")
|
12,531 | 7cf5494b5f900d742bfbc61eaac15a6f803085aa |
import matplotlib.pyplot as plt
from sitka_highs_lows import highs as sitka_highs
from death_valley_highs_lows import highs as deathvalley_highs
from sitka_highs_lows import dates
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(dates, deathvalley_highs, c='red', alpha=0.5)
ax.plot(dates, sitka_highs, c='blue', alpha=0.5)
plt.fill_between(dates, deathvalley_highs, sitka_highs, facecolor='blue', alpha=0.1)
# Format plot.
plt.title("Daily high temperature comparison, 2018\nDeath Valley, CA and Sitka", fontsize=24)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show() |
12,532 | f517920c93e06edcfbbf94782b709b01c608baab | #!/usr/bin/env python
#
#
# Main parser for PDDL - JOe's version (don't overwrite!)
# Written by Joseph Kim
from Problem import Problem
import ast
#==============================================================================
# MAIN
#==============================================================================
# Reading the main problem file
with open('C:/Users/liwan/Desktop/myStuff/spring2016/summer/UROP/prelim_work/pfile2', 'r') as f:
filein = f.read().splitlines()
p = Problem(filein)
p.getObjects()
p.getInitStates()
p.getInitNumerics()
p.getGoalStates()
p.getMetric()
#print (p.getInitNumerics())
def parseToWorkableInitStates(initState):
#initState is a list of dictionaries with 1 key (as a string) and values that are lists
allPredicates = {}
for predicate in initState:
#every predicate is a dictionary
for key in predicate:
#there is only 1
if key not in allPredicates:
allPredicates[key] = []
allPredicates[key].append(tuple(predicate[key]))
return allPredicates
def parseToWorkableNumerics(initNumbers):
#initNumbers is a list of strings
allActions = {}
for s in initNumbers:
splitLeft = s.split(")") #split to separate number from the rest
num = float(splitLeft[1]) #resulting 2nd number will be the number with extra spaces
splitRight = splitLeft[0].split("(") #split so that action and objects are left
splitBySpaces = splitRight[-1].split(" ") #separates action and objects
action = splitBySpaces[0] #first value is the action
if action not in allActions:
allActions[action] = {}
allActions[action][tuple(splitBySpaces[1:])] = num #keys of dictionary are the tuples of objects
return allActions
print (p.getObjects())
|
12,533 | b5647487c8593ca45cf1bdbf1c8cef59ee79660a | import copy
import math
import torch
import torch.nn as nn
import numpy as np
from constants import *
import torch.nn.functional as F
from utils.model_utils import device, init_weights
def get_nll_criterion(reduction="sum"):
crit = nn.NLLLoss(reduction=reduction)
return crit
def get_bce_criterion(reduction="sum"):
crit = nn.BCELoss(reduction=reduction)
return crit
def get_masked_nll_criterion(vocab_size, pad_idx=0, reduction="sum"):
weight = torch.ones(vocab_size)
weight[pad_idx] = 0
crit = nn.NLLLoss(reduction=reduction, weight=weight)
return crit
def get_masked_bce_criterion(vocab_size, pad_idx=0, reduction="sum"):
weight = torch.ones(vocab_size)
weight[pad_idx] = 0
crit = nn.BCELoss(reduction=reduction, weight=weight)
return crit
def make_std_mask(tgt, pad):
if pad is not None:
tgt_mask = (tgt != pad).unsqueeze(-2)
else:
tgt_mask = torch.ones(tgt.size()).type(torch.ByteTensor).unsqueeze(-2)
tgt_mask = tgt_mask & subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data)
return tgt_mask.to(device())
def clones(module, n):
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subseq_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') # upper triangular matrix
return torch.from_numpy(subseq_mask) == 0
def batch_subsequent_mask(seq_size, batch_size):
"Mask out subsequent positions."
attn_shape = (batch_size, seq_size, seq_size)
subseq_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') # upper triangular matrix
return torch.from_numpy(subseq_mask) == 0
def get_mh_attention_weights(query, key, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # batch_size x n_heads x seq_len x seq_len, i.e. attn score on each word
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e10)
p_attn = F.softmax(scores, dim=-1) # batch_size x n_heads x seq_len x seq_len, softmax on last dimension, i.e. 3rd dimension attend on 4th dimension
if dropout is not None:
p_attn = dropout(p_attn)
return p_attn
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # batch_size x n_heads x seq_len x seq_len, i.e. attn score on each word
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e10)
p_attn = F.softmax(scores, dim=-1) # batch_size x n_heads x seq_len x seq_len, softmax on last dimension, i.e. 3rd dimension attend on 4th dimension
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn # attended output, attention vec
class NoamOptimizer:
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
# print("noam lr {}".format(self._rate))
def rate(self, step=None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def zero_grad(self):
self.optimizer.zero_grad()
def state_dict(self):
rv = {}
rv["_step"] = self._step
rv["warmup"] = self.warmup
rv["factor"] = self.factor
rv["model_size"] = self.model_size
rv["_rate"] = self._rate
rv["opt_state_dict"] = self.optimizer.state_dict()
return rv
def load_state_dict(self, state_dict):
self._step = state_dict["_step"]
self.warmup = state_dict["warmup"]
self.factor = state_dict["factor"]
self.model_size = state_dict["model_size"]
self._rate = state_dict["_rate"]
self.optimizer.load_state_dict(state_dict["opt_state_dict"])
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device())
class LabelSmoothing(nn.Module):
"Label smoothing actually starts to penalize the model if it gets very confident about a given choice"
def __init__(self, size, padding_idx, smoothing=0.0, reduction="sum"):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(reduction=reduction)
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
def forward(self, x, target):
assert x.size(1) == self.size
x = x.to(device())
target = target.to(device())
true_dist = x.data.clone()
true_dist.fill_(self.smoothing / (self.size - 2))
indices = target.data.unsqueeze(1)
true_dist.scatter_(1, indices, self.confidence)
if self.padding_idx is not None:
true_dist[:, self.padding_idx] = 0
mask = torch.nonzero(target.data == self.padding_idx)
if mask.shape[0] > 0: true_dist.index_fill_(0, mask.squeeze(), 0.0)
return self.criterion(x, true_dist)
class LRDecayOptimizer:
"""A simple wrapper class for learning rate scheduling"""
def __init__(self, optimizer, initial_lr,
shrink_factor=0.5,
min_lr=0.0001,
past_scores_considered=2,
verbose=False,
score_method="max",
max_fail_limit=1):
self.optimizer = optimizer
self.curr_lr = initial_lr
self.shrink_factor = shrink_factor
self.past_scores_considered = past_scores_considered
self.verbose = verbose
self.min_lr = min_lr
self.past_scores_list = []
self.score_method = score_method
self.max_fail_limit = max_fail_limit
self.curr_fail_count = 0
self._commit_lr()
def state_dict(self):
sd = {
"opt_sd": self.optimizer.state_dict(),
"curr_lr": self.curr_lr,
"shrink_factor": self.shrink_factor,
"past_scores_considered": self.past_scores_considered,
"verbose": self.verbose,
"min_lr": self.min_lr,
"past_scores_list": self.past_scores_list,
"score_method": self.score_method,
"max_fail_limit": self.max_fail_limit,
"curr_fail_count": self.curr_fail_count,
}
return sd
def load_state_dict(self, state_dict):
self.curr_lr = state_dict["curr_lr"]
self.shrink_factor = state_dict["shrink_factor"]
self.past_scores_considered = state_dict["past_scores_considered"]
self.verbose = state_dict["verbose"]
self.min_lr = state_dict["min_lr"]
self.past_scores_list = state_dict["past_scores_list"]
self.score_method = state_dict["score_method"]
self.max_fail_limit = state_dict["max_fail_limit"]
self.curr_fail_count = state_dict["curr_fail_count"]
self.optimizer.load_state_dict(state_dict["opt_sd"])
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device())
def step(self):
self.optimizer.step()
def zero_grad(self):
self.optimizer.zero_grad()
def _commit_lr(self):
self.curr_lr = max(self.min_lr, self.curr_lr)
self.optimizer.param_groups[0]['lr'] = self.curr_lr
def _score_func(self, new_score, new_score_method=None):
score_method = self.score_method if new_score_method is None else new_score_method
if score_method=="max":
return new_score > max(self.past_scores_list)
elif score_method == "min":
return new_score < min(self.past_scores_list)
else:
raise NotImplementedError("Unknown score method " + self.score_method)
def shrink_learning_rate(self):
# directly shrink lr
self.curr_lr *= self.shrink_factor
self.curr_lr = max(self.curr_lr, self.min_lr)
if self.verbose: print("lr updated: ", self.curr_lr)
self._commit_lr()
def update_learning_rate(self, new_score, new_score_method=None):
if self.shrink_factor >= 0.999:
return
if len(self.past_scores_list) < self.past_scores_considered:
self.past_scores_list.append(new_score)
return
if self._score_func(new_score, new_score_method):
self.curr_fail_count += 1
if self.verbose: print("lrd bad_count: ", self.curr_fail_count)
if self.curr_fail_count >= self.max_fail_limit:
self.curr_lr *= self.shrink_factor
self.curr_lr = max(self.curr_lr, self.min_lr)
if self.verbose: print("lr updated: ", self.curr_lr)
self._commit_lr()
self.past_scores_list = [new_score]
self.curr_fail_count = 0
else:
self.past_scores_list.append(new_score)
if len(self.past_scores_list) > self.past_scores_considered:
self.past_scores_list = self.past_scores_list[-self.past_scores_considered:]
else:
self.curr_fail_count = 0
self.past_scores_list.append(new_score)
if len(self.past_scores_list) > self.past_scores_considered:
self.past_scores_list = self.past_scores_list[-self.past_scores_considered:]
|
12,534 | 9f3a46ee7e55435e16b387593a62f121d08a7356 | from flask.views import MethodView
from flask import Blueprint, request, jsonify
from flask_jwt_extended import jwt_required, get_jwt_identity
from app.app import app
from owner.utils.utils import OwnerActions
owner_bp = Blueprint('owner_bp', __name__, )
class OwnerList(MethodView):
decorators = [jwt_required, ]
def get(self):
page = request.args.get('page')
search = request.args.get('search')
if page is None:
return jsonify({"detail": "page required"}), 400
response, data = OwnerActions().get_owner_list(
get_jwt_identity()['email'], page, search)
if not response:
return jsonify({'detail': data}), 400
return jsonify(data), 200
class OwnerAccount(MethodView):
decorators = [jwt_required, ]
def get(self):
id = request.args.get('id')
if id is None:
return jsonify({"detail": "id required"}), 400
response, data = OwnerActions().get_owner_detail(
get_jwt_identity()['email'], id)
if not response:
return jsonify({'detail': data}), 400
return jsonify(data), 200
def post(self):
if not request.is_json:
return jsonify({"detail": "Data is missing in request"}), 400
name = request.json.get('name', None)
email = request.json.get('email', None)
phone = request.json.get('phone', None)
company = request.json.get('company', None)
website = request.json.get('website', None)
address = request.json.get('address', None)
country = request.json.get('country', None)
state = request.json.get('state', None)
district = request.json.get('district', None)
city = request.json.get('city', None)
postalcode = request.json.get('postalcode', None)
aadhar = request.json.get('aadhar', None)
if name is None:
return jsonify({"detail": "name required"}), 400
if email is None:
return jsonify({"detail": "email required"}), 400
if phone is None:
return jsonify({"detail": "phone required"}), 400
if company is None:
return jsonify({"detail": "company required"}), 400
if website is None:
return jsonify({"detail": "website required"}), 400
if address is None:
return jsonify({"detail": "address required"}), 400
if country is None:
return jsonify({"detail": "country required"}), 400
if state is None:
return jsonify({"detail": "state required"}), 400
if district is None:
return jsonify({"detail": "district required"}), 400
if city is None:
return jsonify({"detail": "city required"}), 400
if postalcode is None:
return jsonify({"detail": "postalcode required"}), 400
if aadhar is None:
return jsonify({"detail": "aadhar required"}), 400
response, data = OwnerActions().create_owner(get_jwt_identity()['email'], name, email, phone, company, website,
address, country, state, district, city, postalcode, aadhar, app.config.get('SITE_ORIGIN'))
if not response:
return jsonify({'detail': data}), 401
return jsonify(data), 200
app.add_url_rule(
"/api/owner/list", view_func=OwnerList.as_view("owner-list"), methods=["GET"]
)
app.add_url_rule(
"/api/owner/account", view_func=OwnerAccount.as_view("owner-account"), methods=["GET", "POST"]
)
|
12,535 | d11591c1494783ea7c03d4c77c5333a3ededcab3 |
#calss header
class _CLIMAX():
def __init__(self,):
self.name = "CLIMAX"
self.definitions = [u'the most important or exciting point in a story or situation, especially when this happens near the end: ', u'the highest point of sexual pleasure']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
12,536 | 7ebc8dcfe54f613c29c8bde87c2995e88377f9ea | #!/usr/bin/env python
# -*- coding:utf8 -*-
"""
for 迭代对象 in 对象:
循环体
#基本应用,进行数值循环
range(start, stop[, step]) -> range object
start:开始数值
stop:结束数值
step:步长
"""
'''
for i in range(1, 11, 2):
print(i, end=" ")
for i in range(1, 11):
print(i, end=" ")
print()
for i in range(11):
print(i, end=" ")
'''
'''
print("计算1+2+3+4....100的结果")
result = 0 #保存累加结果的变量
for i in list(range(101)):
result = result + i
print(result)
'''
print("今有物,不知其数,三三数之余2,五五数之余3,七七数之余2,问何物?")
for i in range(1001):
if i % 3 == 2 and i % 5 == 3 and i % 7 == 2:
print("答曰 这个数值是:{}".format(i))
# for循环依次迭代字符串
string1 = "不要再说我不能"
print(string1)
for ch in string1:
print(ch)
|
12,537 | 268ea278571eea680956a172c2dbd6c3eed9223d | #
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from . import models, __version__
from collections import MutableMapping
import pprint
import pdb
import logging
logger = logging.getLogger(__name__)
# MutableMapping should be enough, but it causes problems with py2
DICTCLASSES = (MutableMapping, dict, models.BaseModel)
def check_template(indict, template):
if isinstance(template, DICTCLASSES) and isinstance(indict, DICTCLASSES):
for k, v in template.items():
if k not in indict:
raise models.Error('{} not in {}'.format(k, indict))
check_template(indict[k], v)
elif isinstance(template, list) and isinstance(indict, list):
for e in template:
for i in indict:
try:
check_template(i, e)
break
except models.Error as ex:
# raise
continue
else:
raise models.Error(('Element not found.'
'\nExpected: {}\nIn: {}').format(pprint.pformat(e),
pprint.pformat(indict)))
else:
if indict != template:
raise models.Error(('Differences found.\n'
'\tExpected: {}\n'
'\tFound: {}').format(pprint.pformat(template),
pprint.pformat(indict)))
def convert_dictionary(original, mappings):
result = {}
for key, value in original.items():
if key in mappings:
key = mappings[key]
result[key] = value
return result
def easy_load(app=None, plugin_list=None, plugin_folder=None, **kwargs):
'''
Run a server with a specific plugin.
'''
from flask import Flask
from .extensions import Senpy
if not app:
app = Flask(__name__)
sp = Senpy(app, plugin_folder=plugin_folder, **kwargs)
if not plugin_list:
from . import plugins
import __main__
plugin_list = plugins.from_module(__main__)
for plugin in plugin_list:
sp.add_plugin(plugin)
sp.install_deps()
sp.activate_all()
return sp, app
def easy_test(plugin_list=None, debug=True):
logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.INFO)
try:
if not plugin_list:
import __main__
logger.info('Loading classes from {}'.format(__main__))
from . import plugins
plugin_list = plugins.from_module(__main__)
plugin_list = list(plugin_list)
for plug in plugin_list:
plug.test()
plug.log.info('My tests passed!')
logger.info('All tests passed for {} plugins!'.format(len(plugin_list)))
except Exception:
if not debug:
raise
pdb.post_mortem()
def easy(host='0.0.0.0', port=5000, debug=False, **kwargs):
'''
Run a server with a specific plugin.
'''
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('senpy').setLevel(logging.INFO)
sp, app = easy_load(**kwargs)
easy_test(sp.plugins())
app.debug = debug
import time
logger.info(time.time())
logger.info('Senpy version {}'.format(__version__))
logger.info('Server running on port %s:%d. Ctrl+C to quit' % (host,
port))
app.debug = debug
app.run(host,
port,
debug=app.debug)
|
12,538 | a93876f46e8ccd91d9e6c1355b3e501066fe6906 | #! /usr/bin/env python2
import os
import sys
import psycopg2
DB_NAME = "news"
# 1. The most popular three articles of all time?
query1 = ("SELECT articles.title, count(*) AS counting_view FROM log"
",articles WHERE log.status = '200 OK' AND substr(log.path,10,100)"
"= articles.slug GROUP BY articles.title,log.path ORDER BY (counting_view)" # noqa
"DESC limit 3;")
# 2. The most popular author of all time?
query2 = ("SELECT authors.name, count(*) AS counting_view1 FROM log"
",authors,articles WHERE substr(log.path,10,100) = articles.slug AND " # noqa
"authors.id = articles.author AND log.status = '200 OK'"
"GROUP BY authors.name ORDER BY (counting_view1) DESC limit 3;")
# 3. Days that lead to more than 1% of requests lead to errors?
query3 = ("SELECT * FROM v8 WHERE Errorr > 1 ORDER BY Errorr DESC;")
def getresult(query):
db = psycopg2.connect(database=DB_NAME)
cur = db.cursor()
cur.execute(query)
output = cur.fetchall()
db.close()
return output
# get query result
q1_result = getresult(query1)
q2_result = getresult(query2)
q3_result = getresult(query3)
def printresult(query):
n = len(query)
for i in range(0, n):
title = query[i][0]
views = query[i][1]
print ("\t %s || %d" % (title, views) + " views")
print("\n")
def printsresult(query):
n = len(query)
for i in range(0, n):
title = query[i][0]
error = query[i][1]
print ("\t %s || %.2f" % (title, error) + "% error")
print("\n")
# displaying query's output
if __name__ == "__main__":
print("What are the most popular three articles of all time?")
printresult(q1_result)
print("Who are the most popular article authors of all time?")
printresult(q2_result)
print("On which days did more than 1% of requests lead to errors?")
printsresult(q3_result)
|
12,539 | 64ae3b7803bcd299dca7aef6e6538f06e3e6a182 | # -*- coding: utf-8 -*-
# Tags constants
DATE_CLASSES_KEYWORDS = [
x.lower()
for x in [
"news_date",
"news-date-time",
"date",
"news-date",
"newslistdate",
"createdate",
"date_block",
"b_date",
"entry-date",
"pub_date",
"g-date",
"post-date",
"textdate",
"datestamp",
"date-time",
"dateBlock",
"date-posted",
"NewsDate",
"newsDate",
"artDate",
"gDate",
"postDate",
"pubDate",
"newsPubDate",
"NewsDateTime",
"timestamp",
"publish_time",
"news_time",
"news-time",
"newsTime",
"newsdate",
"meta-date",
"n_date",
"time_news",
"newsdatetime",
"date_time",
"g-time",
]
]
DATE_CLASSES_KEYS = ["date", "time"]
NEWS_CLASSES_KEYWORDS = [
x.lower()
for x in [
"news-item",
"news",
"latestnews",
"news_title",
"news-title",
"news_item",
"news-text",
"news-list",
"news_text",
"newstitle",
"mainnews",
"newslist",
"news-main",
"firstnews",
"news-link",
"newslink",
"newsblock",
"news-block",
"news-content",
"news_block",
"news-name",
"newsItem",
"newstext",
"news-ann",
"news_img",
"News",
"news_caption",
"newsTitle",
"item_news",
"news-picture",
"news_anons",
"novost",
]
]
NEWS_CLASSES_KEYS = ["news", "novost", "novosti"]
FEED_CONTENT_TYPES = [
"application/rss+xml",
"application/rdf+xml",
"application/atom+xml",
"application/xml",
"text/xml",
]
TAG_TYPE_TEXT = "tag:type:text"
TAG_TYPE_TAIL = "tag:type:tail"
TAG_TYPE_WRAPPER = "tag:type:wrapper"
TAG_TYPE_LAST = "tag:type:last"
TAG_TYPE_EMPTY = "tag:type:empty"
TAG_TYPE_HREF = "tag:type:url"
TAG_TYPE_BOLD = "tag:type:bold" # Shows title type of texts
TAG_TYPE_IMG = "tag:type:img"
TAG_TYPE_DATE = "tag:type:date"
CLEANABLE_QUERY_KEYS = [
"PHPSESSID",
"utm_source",
"utm_campaign",
"utm_medium",
"utm_content",
"utm_hp_ref",
]
|
12,540 | e20258a35189e593f37dee7c8a2754f0bb3d902f | from forumapp.models import Channel, Thread, Comment
from forumapp.tests import create_channel, create_thread, create_comment
from django.contrib.auth.models import User
|
12,541 | e2a010985c2673cdc35e9882064075002ecb72d6 | from lib2to3.fixer_base import BaseFix
class FixBadOrder(BaseFix):
order = "crazy"
|
12,542 | 52706010dcf8612342387c8560675c4eb70ca946 | import flatten_dict
import requests
import requests.exceptions
# from typing import Any, _VT, _KT
import ruamel.yaml as yaml
class FlatKeysDict(dict):
@staticmethod
def dot_reducer(k1, k2):
if k1 is None:
return k2
else:
return k1 + "." + k2
def __init__(self, dict_like: dict = None) -> None:
dd = {}
if dict_like is not None:
dd = flatten_dict.flatten(dict_like, reducer=FlatKeysDict.dot_reducer)
super().__init__(dd)
@classmethod
def from_yaml(cls, yaml_string):
return cls(yaml.safe_load(yaml_string))
@classmethod
def from_file(cls, filename, fmt='yaml'):
with open(filename, 'r') as f:
yml_str = f.read()
if fmt == 'yaml':
o = cls.from_yaml(yml_str)
else:
raise ValueError("Format '{}' not recognized".format(fmt))
return o
# def __getitem__(self, k: _KT) -> _VT:
# try:
# item = super().__getitem__(k)
# except KeyError:
# for kk in self.keys():
# if str(kk).startswith(k)
# return item
# def __setattr__(self, name: str, value: Any) -> None:
# try:
# super().__setattr__(name, value)
# except KeyError:
# path = name.split(".")
# try:
# super().__setattr__(name, value)
#
class FlatConfig(FlatKeysDict):
def __init__(self, filename=None, url=None) -> None:
dict_like = {}
if filename is not None:
dict_like = FlatKeysDict.from_file(filename)
else:
if url is not None:
try:
r = requests.get(url)
except ValueError:
pass
else:
dict_like = FlatKeysDict.from_yaml(r.text)
super().__init__(dict_like)
if __name__ == '__main__':
pass
|
12,543 | b69c13e1b7b7383bc873fdddf017c9c8e65ab25f | import dask.dataframe as dd
import json
import pandas as pd
from pprint import pprint
from pandas.io.json import json_normalize
import time
import numpy as np
from itertools import repeat
import datetime, time
from datetime import datetime
from dateutil import relativedelta
import sklearn
import seaborn as sns
from tslearn.utils import to_time_series
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from tslearn.clustering import TimeSeriesKMeans
from pathlib import Path
from surprise import Reader, Dataset, Trainset
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OrdinalEncoder
import gower
from sklearn.metrics.pairwise import cosine_similarity
import re
import os
import datetime, time
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OrdinalEncoder
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
material = pd.read_csv('/Users/Julien/Desktop/LBS/Cours/8 - TERM 3/London Lab/Data.nosync/Original_Data/material.csv')
material['material_id'] = material['material_id'].apply(lambda x: x.lower())
material = material.set_index('material_id')
l = ['avg_con_hourly', 'avg_con_daily', 'avg_con_monthly', 'months', 'total_consumption', 'center_or_not', 'classification_new', 'total_pop_in_province', 'total_pop_in_city','male_pop', 'female_pop', 'male_perc', 'female_perc', 'never_social_per_100', 'everyday_social_per_100', 'persons.aged.11.years.and.over.who.consume.wine', 'persons.aged.11.years.and.over..who.consume.beer', 'more.of.half.a.liter.of.wine.a.day', 'he.she.consumes.wine.more.rarely', 'he.she.consumes.beer.only.seasonally', 'he.she.consumes.beer.more.rarely', 'he.she.consumes.beer.everyday', 'X1.2.glasses.of.wine.a.day', 'sd', 'med_income', 'avg_income', 'ci_craft', 'ci_lager', 'ci_specialties', 'ci_craft_prop_beers', 'ci_lager_prop_beers', 'ci_specialties_prop_beers', 'ci_craft_prop_con', 'ci_lager_prop_con', 'ci_specialties_prop_con', 'number_of_brands', 'near_basketball', 'near_cinema', 'near_football', 'near_neighborhood', 'near_shopping', 'near_theatre', 'near_tourist', 'near_university', 'real_name', 'google_address', 'google_classification', 'rating', 'keyword_1', 'keyword_2', 'keyword_3', 'outdoor_seating']
customers = pd.read_excel('/Users/Julien/Desktop/LBS/Cours/8 - TERM 3/London Lab/Data.nosync/2 - Latest/newest_customer.xlsx')
customers = customers.set_index('crl')
customers = customers.drop(columns = 'Unnamed: 128')
beer_columns = [x for x in [*customers.columns] if x.startswith('b9')]
prediction_columns = ['outdoor_seating', 'rating', 'main_classification', 'total_pop_in_city', 'near_basketball', 'near_cinema', 'near_football','near_neighborhood', 'near_shopping', 'near_theatre', 'near_tourist', 'near_university', 'med_income', 'persons_aged_11_years_and_over_who_consume_beer', 'everyday_social_per_100', 'ci_craft_prop_con', 'ci_lager_prop_con', 'ci_craft_prop_beers', 'ci_lager_prop_beers', 'male_perc', 'center_or_not']
prediction_columns_new_outlet = [x for x in prediction_columns if x.startswith('ci') == False]
class DataCleanReco():
def __init__(self, predictions_columns, customers):
self.predictions_columns = predictions_columns
self.all_columns = self.predictions_columns + ['total_consumption', 'avg_con_monthly']
# self.customers = customers[self.all_columns].dropna()
self.customers = customers[self.all_columns]
self.customers_original = customers
def test_for_cat(self, df):
cat_var = []
num_var = []
prediction_columns = [*df.columns]
for x in prediction_columns:
if customers[x].dtype == object:
cat_var.append(x)
else:
num_var.append(x)
return(cat_var, num_var)
def fill_missing_values(self):
imp_num = SimpleImputer(strategy='mean')
imp_cat = SimpleImputer(strategy='most_frequent')
df = self.customers
cat_var, num_var = self.test_for_cat(df)
full_pipeline = ColumnTransformer([("num", imp_num, num_var), ("cat", imp_cat, cat_var)])
df_new = pd.DataFrame(full_pipeline.fit_transform(df))
df_new.columns = num_var + cat_var
df_new.index = df.index
return(df_new)
def clean_data(self):
customers_cleaned = self.fill_missing_values()
# customers_cleaned = self.customers[self.all_columns].dropna()
customers_cleaned['outdoor_seating'] = customers_cleaned['outdoor_seating'].apply(lambda x: x.strip())
#Ordinal Variables
columns_to_ordinal = ['near_basketball', 'near_cinema', 'near_football', 'near_neighborhood', 'near_shopping', 'near_theatre', 'near_tourist', 'near_university', 'center_or_not', 'outdoor_seating']
enc = OrdinalEncoder()
after_pipelined = enc.fit_transform(customers_cleaned[columns_to_ordinal])
customers_cleaned[columns_to_ordinal] = after_pipelined
#Categorical Variables
categorical_variables = ['main_classification']
customers_cleaned = pd.get_dummies(data=customers_cleaned[self.predictions_columns], columns=categorical_variables)
return(customers_cleaned)
# def clean_data_v2(self):
# imp_num = SimpleImputer(strategy='mean')
# imp_cat = SimpleImputer(strategy='mean')
#
# self.customers['rating'] = imp_mean.fit_transform(pd.DataFrame(self.customers['rating']).values)
#
# customers_cleaned = self.customers[self.all_columns].dropna()
# customers_cleaned['outdoor_seating'] = customers_cleaned['outdoor_seating'].apply(lambda x: x.strip())
#
# #Ordinal Variables
# columns_to_ordinal = ['near_basketball', 'near_cinema', 'near_football', 'near_neighborhood', 'near_shopping', 'near_theatre', 'near_tourist', 'near_university', 'center_or_not', 'outdoor_seating']
# enc = OrdinalEncoder()
# after_pipelined = enc.fit_transform(customers_cleaned[columns_to_ordinal])
# customers_cleaned[columns_to_ordinal] = after_pipelined
#
# #Categorical Variables
# categorical_variables = ['main_classification']
#
# customers_cleaned = pd.get_dummies(data=customers_cleaned[self.predictions_columns], columns=categorical_variables)
# return(customers_cleaned)
def split_data(self):
clean_data = self.clean_data()
prediction_choices = ['total_consumption', 'avg_con_monthly']
what_to_predict = 'avg_con_monthly'
clean_data[what_to_predict] = self.customers[what_to_predict]
#Get X's
pred_cols_final = [*clean_data.columns]
pred_cols_final.remove(what_to_predict)
X = clean_data[pred_cols_final]
#Get y
y = clean_data[what_to_predict]
X_train_cust, X_test_cust, y_train_cust, y_test_cust = train_test_split(X, y, random_state=42)
return X_train_cust, X_test_cust, y_train_cust, y_test_cust
class RecoSystem():
def __init__(self, DataCleanReco, n_neighbors, n_beers):
self.DataCleanReco = DataCleanReco
self.data = self.DataCleanReco.clean_data()
self.customers_original = DataCleanReco.customers_original
self.beer_cols = [*filter(lambda string: string.startswith("b9"), self.customers_original.columns)]
self.beer_cols_recommended = [x+'_reco' for x in self.beer_cols]
self.n_neighbors = n_neighbors
self.n_beers = n_beers
def get_gower_matrix(self):
distances = gower.gower_matrix(self.data)
distances = pd.DataFrame(distances, index=self.data.index)
distances.columns = distances.index
distances=distances.replace(0, 1000)
return(distances)
def find_n_neighbours(self, n):
df = self.get_gower_matrix()
order = np.argsort(df.values, axis=1)[:, :n]
df = df.apply(lambda x: pd.Series(x.sort_values(ascending=True)
.iloc[:n].index,
index=['top{}'.format(i) for i in range(1, n+1)]), axis=1)
return df
def get_reco_db(self, crl):
x = crl
neighbors = self.find_n_neighbours(self.n_neighbors)
df = neighbors.loc[x].values
df_cust = self.customers_original[self.customers_original.index.isin(df)][self.beer_cols]
df_real = self.customers_original[self.customers_original.index.isin([x])][self.beer_cols]
df_cust.columns = self.beer_cols_recommended
totals = df_cust.sum(axis=0)
totals_mean = df_cust.mean(axis=0)
totals_real = df_real.sum(axis=0)
n = self.n_beers
sorted_recommended = totals_mean.sort_values(ascending=False)[:n]
sorted_recommended_values = totals_mean.sort_values(ascending=False)[:n].values
sorted_real = totals_real.sort_values(ascending=False)[:n]
sorted_real_values = totals_real.sort_values(ascending=False)[:n].values
both_compared = df = pd.DataFrame(columns=['real', 'recommended', 'real_values', 'recommended_values'])
both_compared['real'] = sorted_real.index
both_compared['recommended'] = sorted_recommended.index
both_compared['recommended_2'] = both_compared['recommended'].apply(lambda x: x.split('_')[0])
both_compared['real_values'] = sorted_real_values
both_compared['recommended_values'] = sorted_recommended_values
both = pd.concat([totals_mean, totals_real], axis = 0)
both_3 = pd.DataFrame(both)
both_3 = both_3.reset_index()
both_3['index'] = both_3['index'].apply(lambda x: x.split('_')[0])
both_2 = both_3.groupby('index').max().reset_index()
both_2.columns = both_2.columns.astype(str)
both_2 = both_2.rename({'0' : 'value'})
both_2.columns = ['beer', 'value']
best_combination = both.sort_values(ascending=False)[:n]
both_compared['optimal_combination'] = best_combination.index
dict_1 = dict(zip(both_compared.real,both_compared.real_values))
dict_2 = dict(zip(both_compared.recommended,both_compared.recommended_values))
z = {**dict_1, **dict_2}
both_compared['optimal_values'] = both_compared['optimal_combination'].apply(lambda x: z[x])
both_compared['real_name'] = both_compared['real'].apply(lambda x: material.loc[x.split('_')[0]]['material'])
both_compared['recommended_name'] = both_compared['recommended'].apply(lambda x: material.loc[x.split('_')[0]]['material'])
both_compared['optimal_combination_name'] = both_compared['optimal_combination'].apply(lambda x: material.loc[x.split('_')[0]]['material'])
return(both_compared)
def get_recommended_beers(self, crl):
reco_db = self.get_reco_db_v2(crl)
reco_db['optimal_combination_stripped'] = reco_db['optimal_combination'].apply(lambda x: x.split('_')[0])
reco_db['optimal_combination_names'] = reco_db['optimal_combination_stripped'].apply(lambda x: material.loc[x]['material'])
return(reco_db['optimal_combination_names'])
def my_func(self, db):
index = [*db.index]
index_filtered = [*filter(lambda string: string.endswith("reco"),index)]
index_filtered_split = [x.split('_')[0] for x in index_filtered]
for x in index_filtered:
db_row = db.loc[x]
value_reco = db_row['value']
real = x.split('_')[0]
real_value = db.loc[real]['value']
if real_value != float(0):
db = db.drop(x)
return(db)
def get_reco_db_v2(self, crl):
x = crl
neighbors = self.find_n_neighbours(self.n_neighbors)
df = neighbors.loc[x].values
df_cust = self.customers_original[self.customers_original.index.isin(df)][self.beer_cols]
df_real = self.customers_original[self.customers_original.index.isin([x])][self.beer_cols]
df_cust.columns = self.beer_cols_recommended
totals = df_cust.sum(axis=0)
totals_mean = df_cust.mean(axis=0)
totals_real = df_real.sum(axis=0)
n = self.n_beers #Top n beers
sorted_recommended = totals_mean.sort_values(ascending=False)
sorted_recommended_values = totals_mean.sort_values(ascending=False).values
sorted_real = totals_real.sort_values(ascending=False)
sorted_real_values = totals_real.sort_values(ascending=False).values
sorted_recommended_n = totals_mean.sort_values(ascending=False)[:n]
sorted_recommended_values_n = totals_mean.sort_values(ascending=False)[:n].values
sorted_real_n = totals_real.sort_values(ascending=False)[:n]
sorted_real_values_n = totals_real.sort_values(ascending=False)[:n].values
concat_all = pd.concat([sorted_recommended, sorted_real], axis = 0)
concat_all = pd.DataFrame(concat_all)
concat_all.columns = ['value']
concat_all_v2 = self.my_func(concat_all)
concat_all_v2 = concat_all_v2.sort_values(ascending = False, by = 'value')
both_compared = df = pd.DataFrame(columns=['real', 'recommended', 'real_values', 'recommended_values'])
both_compared['real'] = sorted_real_n.index
both_compared['recommended'] = sorted_recommended_n.index
# both_compared['recommended_2'] = both_compared['recommended'].apply(lambda x: x.split('_')[0])
both_compared['real_values'] = sorted_real_values_n
both_compared['recommended_values'] = sorted_recommended_values_n
# # print(both_compared)
both = pd.concat([totals_mean, totals_real], axis = 0)
best_combination = concat_all_v2[:n]
both_compared['optimal_combination'] = best_combination.index
dict_1 = dict(zip(both_compared.real,both_compared.real_values))
dict_2 = dict(zip(both_compared.recommended,both_compared.recommended_values))
z = {**dict_1, **dict_2}
both_compared['optimal_values'] = both_compared['optimal_combination'].apply(lambda x: z[x])
both_compared['real_name'] = both_compared['real'].apply(lambda x: material.loc[x.split('_')[0]]['material'])
both_compared['recommended_name'] = both_compared['recommended'].apply(lambda x: material.loc[x.split('_')[0]]['material'])
both_compared['optimal_combination_name'] = both_compared['optimal_combination'].apply(lambda x: material.loc[x.split('_')[0]]['material'])
return(both_compared)
def get_similar_outlets_db(self, crl):
db_clean = self.DataCleanReco.customers_original
outlet_neighbors = self.find_n_neighbours(10).loc[crl]
outlet_neighbors = outlet_neighbors.reset_index()
outlet_neighbors.columns = ['index', 'crl']
outlet_neighbors = outlet_neighbors.set_index('crl')
similar_outlets = outlet_neighbors.join(db_clean, how = 'left')
return(similar_outlets)
class RecoSystemNewOutlet():
def __init__(self, DataCleanReco, n_neighbors, n_beers):
self.DataCleanReco = DataCleanReco
self.data = self.DataCleanReco.clean_data()
self.customers_original = DataCleanReco.customers_original
self.beer_cols = [*filter(lambda string: string.startswith("b9"), self.customers_original.columns)]
self.beer_cols_recommended = [x+'_reco' for x in self.beer_cols]
self.n_neighbors = n_neighbors
self.n_beers = n_beers
def my_func(self, db):
index = [*db.index]
index_filtered = [*filter(lambda string: string.endswith("reco"),index)]
index_filtered_split = [x.split('_')[0] for x in index_filtered]
for x in index_filtered:
db_row = db.loc[x]
value_reco = db_row['value']
real = x.split('_')[0]
real_value = db.loc[real]['value']
if real_value != float(0):
db = db.drop(x)
return(db)
def get_reco_db_new_outlet(self, crl):
x = crl
neighbors = self.find_n_neighbours(self.n_neighbors)
df = neighbors.loc[x].values
df_cust = self.customers_original[self.customers_original.index.isin(df)][self.beer_cols]
df_cust.columns = self.beer_cols_recommended
totals = df_cust.sum(axis=0)
totals_mean = df_cust.mean(axis=0)
n = self.n_beers #Top n beers
sorted_recommended = totals_mean.sort_values(ascending=False)
sorted_recommended_values = totals_mean.sort_values(ascending=False).values
sorted_recommended_n = totals_mean.sort_values(ascending=False)[:n]
sorted_recommended_values_n = totals_mean.sort_values(ascending=False)[:n].values
sorted_recommended = pd.DataFrame(sorted_recommended).reset_index()
sorted_recommended.columns = ['beer', 'value']
concat_all_v2 = sorted_recommended
concat_all_v2 = concat_all_v2.sort_values(ascending = False, by = 'value')
both_compared = df = pd.DataFrame(columns=['recommended', 'recommended_values'])
both_compared['recommended'] = sorted_recommended_n.index
both_compared['recommended_values'] = sorted_recommended_values_n
dict_2 = dict(zip(both_compared.recommended,both_compared.recommended_values))
both_compared['recommended_name'] = both_compared['recommended'].apply(lambda x: material.loc[x.split('_')[0]]['material'])
return(both_compared)
def get_gower_matrix(self):
distances = gower.gower_matrix(self.data)
distances = pd.DataFrame(distances, index=self.data.index)
distances.columns = distances.index
distances=distances.replace(0, 1000)
return(distances)
def find_n_neighbours(self, n):
df = self.get_gower_matrix()
order = np.argsort(df.values, axis=1)[:, :n]
df = df.apply(lambda x: pd.Series(x.sort_values(ascending=True)
.iloc[:n].index,
index=['top{}'.format(i) for i in range(1, n+1)]), axis=1)
return df
my_class = DataCleanReco(prediction_columns_new_outlet, customers)
my_next_class = RecoSystem(my_class, 10, 5)
|
12,544 | 7762206ee4c929c1c7db8a984d5e817d7eb2c48c | local_env = Environment() # initialize the environment
library = local_env.SharedLibrary(
target="chapter_one",
source=["src/chapter_one.cpp"],
CPPPATH=["src"])
main = local_env.Program(
target="chapter_one_driver",
source=["src/main.cpp"],
CPPPATH=["src"],
LIBS=[library],
LIBPATH=["src"],
RPATH=["src", "../build"])
# Depends(main, library)
|
12,545 | 5e76e8aaf3d92c7b1e1c7b9d4149b61085be13f2 | # coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
def is_installed():
# type: () -> bool
"""
Returns whether the C extension is installed correctly.
"""
try:
# noinspection PyUnresolvedReferences
from ccurl import Curl as CCurl
except ImportError:
return False
else:
# noinspection PyUnresolvedReferences
from iota.crypto import Curl
return issubclass(Curl, CCurl)
def check_installation():
"""
Outputs a message indicating whether the C extension is installed
correctly.
"""
print(
'Hooray! CCurl is installed correctly!'
if is_installed()
else 'Aww, man! CCurl is NOT installed correctly!'
)
print('For support, visit the #iota-libs-pyota channel on the IOTA Slack.')
print('https://slack.iota.org/')
|
12,546 | 4bb12ea70ddf168bfa914e3771dcdd54edaba0b3 | from django.contrib import admin
from .models import Order, OrderDetail, Product, Student, ClassModel, Skill, Subject, SubjectRegistration, Profile
from students import models
# Register your models here.
class StudentAdmin(admin.ModelAdmin):
# fields = ['name', 'gender', 'address', 'student_code', 'class_model']
fieldsets = [
('Thông tin sinh viên', {'fields': [ 'name', 'gender', 'address', 'student_code']}),
('Lớp học', {'fields': ['class_model']}),
('Tình trạng thanh toán', {'fields': ['is_paid']}),
('Ngày nhập học', {'fields': ['go_to_school_at']}),
('Kỹ năng', {'fields': ['skills']}),
('Profile', {'fields': ['profile']}),
]
list_display = ('name', 'address', 'student_code', 'isPaid', 'genderStr', 'class_model', 'skillStr')
list_filter = ['gender', 'class_model', 'is_paid', 'go_to_school_at']
search_fields = ['name', 'address', 'student_code']
# class StudentInline(admin.StackedInline):
# model = Student
# extra = 2
class StudentInline(admin.TabularInline):
model = Student
extra = 2
class ClassAdmin(admin.ModelAdmin):
fields = ['name', 'teacher_name']
inlines = [StudentInline]
class SubjectRegistrationAdmin(admin.ModelAdmin):
fields = ['student', 'subject', 'reg_at', 'score']
list_display = ['id', 'student', 'subject', 'reg_at', 'score']
list_filter = ['student', 'subject']
# inlines = [StudentInline]
class OrderDetailInline(admin.TabularInline):
model = OrderDetail
class OrderAdmin(admin.ModelAdmin):
fields = ['customer_name', 'address', 'phone', 'total']
list_display = ['id', 'customer_name', 'address', 'phone', 'total']
inlines = [OrderDetailInline]
admin.site.register(ClassModel, ClassAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(Skill)
admin.site.register(Subject)
admin.site.register(Profile)
admin.site.register(Product)
admin.site.register(Order, OrderAdmin)
# admin.site.register(OrderDetail)
admin.site.register(SubjectRegistration, SubjectRegistrationAdmin)
|
12,547 | 7ac0bccb9cc66b9a3c830007d47f4ff216b7ee23 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-04-04 08:14
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0042_auto_20160403_0931'),
('blog', '0009_userprofile_website'),
]
operations = [
]
|
12,548 | c3de87544f48f9b5cf22722f9cac552a43a242be | #!/usr/bin/env python3
"""Computes the proportions of mellizas and invariantly diacriticized tokens in a corpus."""
import argparse
import itertools
import os
from tqdm import tqdm
import unidecode
import diacriticize
if __name__=='__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('corpus_path', help = 'str: path to corpus; e.g. `dev.txt` ')
args = parser.parse_args()
# instantiates a `Diacriticize` object that will attempt to diacriticize all undiacriticized tokens
diacriticizer = diacriticize.Diacriticize()
os.chdir('..')
os.chdir('data')
with open(args.corpus_path, 'r') as source:
# creates two lists:
# 1. a list of tokens from the original corpus, titled `original_toks`
# 2. a list of tokens from the diacriticized corpus, titled `predicted_toks`
original_toks = []
predicted_toks = []
for sent in tqdm(source):
tok_sent = sent.split()
for tok in tok_sent:
tok = tok.casefold()
original_toks.append(tok)
for index, tok in enumerate(tok_sent):
unidec_melliza = unidecode.unidecode(tok.casefold())
if unidec_melliza in diacriticizer.melliza_and_clsfr_dict:
predicted_toks.append(diacriticizer.predict_token(tok_sent, index, unidec_melliza))
elif unidec_melliza in diacriticizer.invariantly_diacriticized_tokens_dict:
predicted_toks.append(diacriticizer.invariantly_diacriticized_tokens_dict[unidec_melliza])
elif 'ion' in unidec_melliza[-3:]:
predicted_toks.append(unidec_melliza.replace('ion', 'ión'))
elif 'patia' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('patia', 'patía'))
elif 'patias' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('patias', 'patías'))
elif 'grafía' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('grafia', 'grafía'))
elif 'grafías' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('grafias', 'grafías'))
elif 'latria' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('latria', 'latría'))
elif 'eria' in unidec_melliza[-4:]:
predicted_toks.append(unidec_melliza.replace('eria', 'ería'))
elif 'erias' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('erias', 'erías'))
elif 'latrias' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('latrias', 'latrías'))
elif 'tomia' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('tomia', 'tomía'))
elif 'tomias' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('tomias', 'tomías'))
elif 'logia' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('logia', 'logía'))
elif 'logias' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('logias', 'logías'))
elif 'abamos' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('abamos', 'ábamos'))
elif 'america' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('america', 'américa'))
elif 'americas' in unidec_melliza[-8:]:
predicted_toks.append(unidec_melliza.replace('americas', 'américas'))
elif 'logico' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('logico', 'lógico'))
elif 'fobico' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('fobico', 'fóbico'))
elif 'fobicos' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('fobicos', 'fóbicos'))
elif 'logicos' in unidec_melliza[-8:]:
predicted_toks.append(unidec_melliza.replace('logicos', 'lógicos'))
elif 'zon' in unidec_melliza[-3:]:
predicted_toks.append(unidec_melliza.replace('zon', 'zón'))
elif 'zones' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('zones', 'zónes'))
elif 'scopico' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('scopico', 'scópico'))
elif 'scopicos' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('scopicos', 'scópicos'))
elif 'onimo' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('onimo', 'ónimo'))
elif 'onimos' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('onimos', 'ónimos'))
elif 'onicos' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('onicos', 'ónicos'))
elif 'onicas' in unidec_melliza[-6:]:
predicted_toks.append(unidec_melliza.replace('onicas', 'ónicas'))
elif 'onica' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('onica', 'ónica'))
elif 'onico' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('onico', 'ónico'))
elif 'ectomias' in unidec_melliza[-8:]:
predicted_toks.append(unidec_melliza.replace('ectomias', 'ectomías'))
elif 'ectomia' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('ectomia', 'ectomía'))
elif 'cigotico' in unidec_melliza[-8:]:
predicted_toks.append(unidec_melliza.replace('cigotico', 'cigótigo'))
elif 'cigoticos' in unidec_melliza[-9:]:
predicted_toks.append(unidec_melliza.replace('cigoticos', 'cigótigos'))
elif 'centrico' in unidec_melliza[-9:]:
predicted_toks.append(unidec_melliza.replace('centrico', 'céntrico'))
elif 'centricos' in unidec_melliza[-9:]:
predicted_toks.append(unidec_melliza.replace('centricos', 'céntricos'))
elif 'aceo' in unidec_melliza[-4:]:
predicted_toks.append(unidec_melliza.replace('aceo', 'áceo'))
elif 'aceos' in unidec_melliza[-4:]:
predicted_toks.append(unidec_melliza.replace('aceos', 'áceos'))
elif 'orico' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('orico', 'órico'))
elif 'oricos' in unidec_melliza[-5:]:
predicted_toks.append(unidec_melliza.replace('oricos', 'óricos'))
elif 'iendose' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('iendose', 'iéndose'))
elif 'ificamente' in unidec_melliza[-10:]:
predicted_toks.append(unidec_melliza.replace('ificamente', 'íficamente'))
elif 'rian' in unidec_melliza[-4:]:
predicted_toks.append(unidec_melliza.replace('rian', 'rían'))
elif 'graficas' in unidec_melliza[-8:]:
predicted_toks.append(unidec_melliza.replace('graficas', 'gráficas'))
elif 'graficos' in unidec_melliza[-8:]:
predicted_toks.append(unidec_melliza.replace('graficos', 'gráficos'))
elif 'grafico' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('grafico', 'gráfico'))
elif 'grafica' in unidec_melliza[-7:]:
predicted_toks.append(unidec_melliza.replace('grafica', 'gráfica'))
else:
predicted_toks.append(unidec_melliza)
# computes percentage of mellizas that are correctly predicted
## puts all the mellizas that were used to train clsfrs into a list, titled 'pickles'
os.chdir('pickles')
pickles = os.listdir()
picks = [pickle.replace('.pickle', '') for pickle in pickles]
## computes the total number of mellizas that were correctly and incorrectly diacriticized
correct = 0
incorrect = 0
for original_tok, predicted_tok in tqdm(zip(original_toks, predicted_toks)):
if unidecode.unidecode(original_tok) in picks:
if original_tok == predicted_tok:
correct += 1
else:
incorrect +=1
## computes the total number of mellizas and stores in variable titled, `total_mells`, and prints the precent of mellizas that are correctly predicted
total_mells = correct + incorrect
melliza_tok_acc = correct / total_mells
print('\n' + '\n' + f'MELLIZA TOKEN ACCURACY')
print('==============================================================')
print(f'Correct: {correct}')
print(f'Incorrect: {incorrect}')
print(f'Total: {total_mells}')
print('==============================================================')
print(f'Accuracy: {round((correct / total_mells), 4)}'+ '\n'+ '\n')
# computes percentage of invariantly diacriticized tokens that are correctly predicted
## unidecodes (i.e. strips tokens of diacritics) tokens from original toks and appends those tokens to a list
unidec_toks = []
for tok in original_toks:
unidec_tok = unidecode.unidecode(tok)
unidec_toks.append(unidec_tok)
## computes and prints the following:
# 1. total number of tokens in original corpus that are diacriticized
# 2. total number of tokens in original corpus that are invariantly diacriticized
# 3. percentage of invariantly diacriticized tokens in the predicted corpus that are correctly diacriticized
diacriticized_toks = 0
invariantly_diacriticized_toks = 0
diacriticized_mellizas = 0
correct = 0
incorrect = 0
for tok, unidec_tok, predicted_tok in zip(original_toks, unidec_toks, predicted_toks):
if tok != unidec_tok:
diacriticized_toks += 1
if unidecode.unidecode(tok) in picks:
diacriticized_mellizas += 1
if unidecode.unidecode(tok) not in picks:
invariantly_diacriticized_toks += 1
if tok.casefold() == predicted_tok:
correct += 1
else:
incorrect += 1
invar_dia_tok_acc = (correct / (incorrect + correct))
print(f'INVARIANTLY DIACRITICIZED TOKEN ACCURACY')
print('==============================================================')
print(f'Correct: {correct}')
print(f'Incorrect: {incorrect}')
print(f'Total: {invariantly_diacriticized_toks}')
print('==============================================================')
print(f'Accuracy: {round(invar_dia_tok_acc, 4)}' + '\n' + '\n')
assert(diacriticized_mellizas + invariantly_diacriticized_toks == diacriticized_toks)
# computes percentage of all diacriticized tokens that are correctly predicted
## creates a set of all tokens that are diacriticized in either `original_toks` or `predicted_toks`
diacriticized_tok_set = set()
for tok, unidec_tok, predicted_tok in zip(original_toks, unidec_toks, predicted_toks):
if tok != unidec_tok:
diacriticized_tok_set.add(unidec_tok)
if predicted_tok != unidec_tok:
diacriticized_tok_set.add(unidec_tok)
## computes percentage described above
correct = 0
incorrect = 0
for tok, unidec_tok, predicted_tok in zip(original_toks, unidec_toks, predicted_toks):
if unidec_tok in diacriticized_tok_set:
if tok == predicted_tok:
correct += 1
else:
incorrect += 1
diacriticized_tok_acc = (correct / (incorrect + correct))
print(f'DIACRITICIZED TOKEN ACCURACY:')
print('==============================================================')
print(f'Correct: {correct}')
print(f'Incorrect: {incorrect}')
print(f'Total: {correct + incorrect}')
print('==============================================================')
print(f'Accuracy: {round(diacriticized_tok_acc, 4)}' + '\n' + '\n')
# computes the baseline token accuracy, which is equal to the token accuracy of the text if the corpus were left undiacriticized .
baseline_accuracy = (len(original_toks) - diacriticized_toks)/len(original_toks)
print(f'BASELINE TOKEN ACCURACY')
print('==============================================================')
print(f'Correct: {len(original_toks) - diacriticized_toks}')
print(f'Incorrect: {diacriticized_toks}')
print(f'Total: {len(original_toks)}')
print('==============================================================')
print(f'Accuracy: {round(baseline_accuracy, 4)}' + '\n' + '\n')
# computes the token accuracy, which is equal to the percentage of tokens in predicted corpus that match those in the original corpus
correct = 0
incorrect = 0
incorrect_toks = []
for tok, predicted_tok in zip(original_toks, predicted_toks):
if tok == predicted_tok:
correct += 1
else:
incorrect += 1
incorrect_toks.append(tok)
per_correct_pred_toks = (correct / (incorrect + correct))
print(f'TOKEN ACCURACY')
print('==============================================================')
print(f'Correct: {correct}')
print(f'Incorrect: {incorrect}')
print(f'Total: {len(original_toks)}')
print('==============================================================')
print(f'Accuracy: {round(per_correct_pred_toks, 4)}' + '\n' + '\n')
|
12,549 | 939768ba44afdf271ab9a7080b5dda1f45bb0699 | from MathUtills import integerdivide
from numba import int32, float32, boolean, short, int64
#from FaceNormal import FaceNormal
import numpy as np
import math
from MathUtills import integerdivide
import numba
from Graphics3D import method2798, method2799
from numba.experimental import jitclass
from expModel import Model
Model_type = Model.class_type.instance_type
@jitclass()
class SceneTileModel:
vertexX : int32[:]
vertexY : int32[:]
vertexZ : int32[:]
triangleColorA : int32[:]
triangleColorB : int32[:]
triangleColorC : int32[:]
field1772 : int32[:]
field1774 : int32[:]
field1778 : int32[:]
triangleTextureId : numba.optional(int32[:])
flatShade : boolean
shape : int32
rotation : int32
underlay : int32
overlay : int32
def __init__(self, field1790, field1791, var1, var2, var3, var4, var5, var6, var7, var8, var9, var10, var11, var12, var13, var14, var15, var16, var17, var18, var19) -> None:
self.triangleTextureId = None
# Static
# self.tmpScreenX = [0] * 6
# self.tmpScreenY = [0] * 6
# self.vertexSceneX = [0] * 6
# self.vertexSceneY = [0] * 6
# self.vertexSceneZ = [0] * 6
# self.field1790 = [[1, 3, 5, 7], [1, 3, 5, 7], [1, 3, 5, 7], [1, 3, 5, 7, 6], [1, 3, 5, 7, 6], [1, 3, 5, 7, 6], [1, 3, 5, 7, 6], [
# 1, 3, 5, 7, 2, 6], [1, 3, 5, 7, 2, 8], [1, 3, 5, 7, 2, 8], [1, 3, 5, 7, 11, 12], [1, 3, 5, 7, 11, 12], [1, 3, 5, 7, 13, 14]]
# self.field1791 = [[0, 1, 2, 3, 0, 0, 1, 3], [1, 1, 2, 3, 1, 0, 1, 3], [0, 1, 2, 3, 1, 0, 1, 3], [0, 0, 1, 2, 0, 0, 2, 4, 1, 0, 4, 3], [0, 0, 1, 4, 0, 0, 4, 3, 1, 1, 2, 4], [0, 0, 4, 3, 1, 0, 1, 2, 1, 0, 2, 4], [0, 1, 2, 4, 1, 0, 1, 4, 1, 0, 4, 3], [0, 4, 1, 2, 0, 4, 2, 5, 1, 0, 4, 5, 1, 0, 5, 3], [
# 0, 4, 1, 2, 0, 4, 2, 3, 0, 4, 3, 5, 1, 0, 4, 5], [0, 0, 4, 5, 1, 4, 1, 2, 1, 4, 2, 3, 1, 4, 3, 5], [0, 0, 1, 5, 0, 1, 4, 5, 0, 1, 2, 4, 1, 0, 5, 3, 1, 5, 4, 3, 1, 4, 2, 3], [1, 0, 1, 5, 1, 1, 4, 5, 1, 1, 2, 4, 0, 0, 5, 3, 0, 5, 4, 3, 0, 4, 2, 3], [1, 0, 5, 4, 1, 0, 1, 5, 0, 0, 4, 3, 0, 4, 5, 3, 0, 5, 2, 3, 0, 1, 2, 5]]
var7 == var6 and var8 == var6 and var9 == var6
self.flatShade = var7
self.shape = var1
self.rotation = var2
self.underlay = var18
self.overlay = var19
var20 = 128
var21 = integerdivide(var20, 2)
var22 = integerdivide(var20, 4)
var23 = integerdivide(var20 * 3, 4)
var24 = field1790[var1]
var25 = len(var24)
self.vertexX = np.zeros(shape = (var25), dtype = np.int32) # [0] * var25
self.vertexY = np.zeros(shape = (var25), dtype = np.int32) #[0] * var25
self.vertexZ = np.zeros(shape = (var25), dtype = np.int32) #[0] * var25
var26 = np.zeros(shape = (var25), dtype = np.int32) # [0] * var25
var27 = np.zeros(shape = (var25), dtype = np.int32) #[0] * var25
var28 = var20 * var4
var29 = var5 * var20
for var30 in range(0, var25):
var31 = var24[var30]
if((var31 & 1) == 0 and var31 <= 8):
var31 = (var31 - var2 - var2 - 1 & 7) + 1
if(var31 > 8 and var31 <= 12):
var31 = (var31 - 9 - var2 & 3) + 9
if(var31 > 12 and var31 <= 16):
var31 = (var31 - 13 - var2 & 3) + 13
if(var31 == 1):
var32 = var28
var33 = var29
var34 = var6
var35 = var10
var36 = var14
elif(var31 == 2):
var32 = var28 + var21
var33 = var29
var34 = var7 + var6 >> 1
var35 = var11 + var10 >> 1
var36 = var15 + var14 >> 1
elif(var31 == 3):
var32 = var28 + var20
var33 = var29
var34 = var7
var35 = var11
var36 = var15
elif(var31 == 4):
var32 = var28 + var20
var33 = var29 + var21
var34 = var8 + var7 >> 1
var35 = var11 + var12 >> 1
var36 = var15 + var16 >> 1
elif(var31 == 5):
var32 = var28 + var20
var33 = var29 + var20
var34 = var8
var35 = var12
var36 = var16
elif(var31 == 6):
var32 = var28 + var21
var33 = var29 + var20
var34 = var8 + var9 >> 1
var35 = var13 + var12 >> 1
var36 = var17 + var16 >> 1
elif(var31 == 7):
var32 = var28
var33 = var29 + var20
var34 = var9
var35 = var13
var36 = var17
elif(var31 == 8):
var32 = var28
var33 = var29 + var21
var34 = var9 + var6 >> 1
var35 = var13 + var10 >> 1
var36 = var17 + var14 >> 1
elif(var31 == 9):
var32 = var28 + var21
var33 = var29 + var22
var34 = var7 + var6 >> 1
var35 = var11 + var10 >> 1
var36 = var15 + var14 >> 1
elif(var31 == 10):
var32 = var28 + var23
var33 = var29 + var21
var34 = var8 + var7 >> 1
var35 = var11 + var12 >> 1
var36 = var15 + var16 >> 1
elif(var31 == 11):
var32 = var28 + var21
var33 = var29 + var23
var34 = var8 + var9 >> 1
var35 = var13 + var12 >> 1
var36 = var17 + var16 >> 1
elif(var31 == 12):
var32 = var28 + var22
var33 = var29 + var21
var34 = var9 + var6 >> 1
var35 = var13 + var10 >> 1
var36 = var17 + var14 >> 1
elif(var31 == 13):
var32 = var28 + var22
var33 = var29 + var22
var34 = var6
var35 = var10
var36 = var14
elif(var31 == 14):
var32 = var28 + var23
var33 = var29 + var22
var34 = var7
var35 = var11
var36 = var15
elif(var31 == 15):
var32 = var28 + var23
var33 = var29 + var23
var34 = var8
var35 = var12
var36 = var16
else:
var32 = var28 + var22
var33 = var29 + var23
var34 = var9
var35 = var13
var36 = var17
self.vertexX[var30] = var32
self.vertexY[var30] = var34
self.vertexZ[var30] = var33
var26[var30] = var35
var27[var30] = var36
var38 = field1791[var1]
var31 = integerdivide(len(var38), 4)
self.field1772 = np.zeros(shape = (var31), dtype = np.int32) #[0] * var31
self.field1774 = np.zeros(shape = (var31), dtype = np.int32) #[0] * var31
self.field1778 = np.zeros(shape = (var31), dtype = np.int32) #[0] * var31
self.triangleColorA = np.zeros(shape = (var31), dtype = np.int32) #[0] * var31
self.triangleColorB = np.zeros(shape = (var31), dtype = np.int32) #[0] * var31
self.triangleColorC = np.zeros(shape = (var31), dtype = np.int32) #[0] * var31
if(var3 != -1):
self.triangleTextureId = np.zeros(shape = (var31), dtype = np.int32) # [0] * var31
var32 = 0
for var33 in range(0, var31):
var34 = var38[var32]
var35 = var38[var32 + 1]
var36 = var38[var32 + 2]
var37 = var38[var32 + 3]
var32 += 4
if(var35 < 4):
var35 = var35 - var2 & 3
if(var36 < 4):
var36 = var36 - var2 & 3
if(var37 < 4):
var37 = var37 - var2 & 3
self.field1772[var33] = var35
self.field1774[var33] = var36
self.field1778[var33] = var37
if(var34 == 0):
self.triangleColorA[var33] = var26[var35]
self.triangleColorB[var33] = var26[var36]
self.triangleColorC[var33] = var26[var37]
if(self.triangleTextureId is not None):
self.triangleTextureId[var33] = -1
else:
self.triangleColorA[var33] = var27[var35]
self.triangleColorB[var33] = var27[var36]
self.triangleColorC[var33] = var27[var37]
if(self.triangleTextureId is not None):
self.triangleTextureId[var33] = var3
var33 = var6
var34 = var7
if(var7 < var6):
var33 = var7
if(var7 > var7):
var34 = var7
if(var8 < var33):
var33 = var8
if(var8 > var34):
var34 = var8
if(var9 < var33):
var33 = var9
if(var9 > var34):
var34 = var9
var34 = integerdivide(var34, 14)
|
12,550 | 8ae4a3c85b426f67e0f0d32b9256e4f424ca03b8 | #!/usr/bin/env python
import json
import argparse
import datetime
import os
from filters import valid_json_filter
from functools import partial
from pyspark import SparkContext, SparkConf
def rmkey(k, o):
if k in o:
del o[k]
return o
def extractKeys(keys, o):
rtn = {}
for k in keys:
if k in o:
rtn[k] = o[k]
return rtn
def removeAttachments(x):
x['attachments'] = map(lambda o: rmkey('contents64', o), x['attachments'])
return x
def extractAttachments(x):
parent_fields = {
'id' : x['id'],
'datetime' : x['datetime'],
"ingest_id" : x["ingest_id"],
"case_id" : x["case_id"],
"alt_ref_id" : x["alt_ref_id"],
"label" : x["label"],
"original_artifact" : x["original_artifact"]
}
attachments = map(lambda o: extractKeys([
'guid',
'extension',
'filename',
'content',
'contents64',
'content_extracted',
'content_encrypted',
'content_length',
'content_type',
'content_hash',
'content_tika_langid',
'exif',
'image_analytics',
'metadata',
'size'
], o), x['attachments'])
attachments = [dict(a, **parent_fields) for a in attachments]
return attachments
def dump(x):
return json.dumps(x)
if __name__ == "__main__":
desc='newman split emails and attachment for indexing '
parser = argparse.ArgumentParser(
description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=desc)
parser.add_argument("input_emails_content_path", help="joined email extracted content and base64 attachment")
parser.add_argument("output_path_emails", help="output directory for spark results emails without base64 attachment")
parser.add_argument("output_path_raw_attachments", help="output directory for spark results attachments ")
parser.add_argument("-v", "--validate_json", action="store_true", help="Filter broken json. Test each json object and output broken objects to tmp/failed.")
args = parser.parse_args()
lex_date = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
print "INFO: Running with json filter {}.".format("enabled" if args.validate_json else "disabled")
filter_fn = partial(valid_json_filter, os.path.basename(__file__), lex_date, not args.validate_json)
conf = SparkConf().setAppName("Newman split attachments and emails")
sc = SparkContext(conf=conf)
rdd_emails = sc.textFile(args.input_emails_content_path).filter(filter_fn).map(lambda x: json.loads(x))
rdd_emails.map(removeAttachments).map(dump).saveAsTextFile(args.output_path_emails)
rdd_emails.flatMap(extractAttachments).map(dump).saveAsTextFile(args.output_path_raw_attachments)
|
12,551 | c0788353657db6ef8b923770243bec5de01d9ebc | import os
from collections import deque
import requests
from flask import Response
from .constants import PROGRAMS_BASE_URL
def send_health_update():
url = f'{os.getenv("WEB_BASE_URL")}/node-health'
res = requests.post(url)
if res.status_code == 200:
return {'Result': 'Successfully updated node up-time.'}, 200
else:
return {'Status': 'Failed to update node up-time.'}, 400
def load_test():
sample_test = [
lambda driver: driver.get("https://www.google.com/"),
lambda driver: driver.set_window_size(880, 623),
lambda driver: driver.find_element(By.NAME, "q").send_keys("https://www.google.com/recaptcha/api2/demo"),
lambda driver: driver.find_element(By.NAME, "q").send_keys(Keys.ENTER),
lambda driver: driver.find_element(By.CSS_SELECTOR, ".g:nth-child(1) .LC20lb > span").click(),
lambda driver: driver.find_element(By.ID, "recaptcha-demo-submit").click()
]
return deque(sample_test)
def execute_command(driver, command_function):
try:
command_function(driver)
except Exception as e:
import traceback
traceback.print_exc()
return False
return True
def run_test():
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
test_queue = load_test()
if not test_queue:
return 'No tests loaded.', 400
driver = webdriver.Chrome()
while test_queue:
result = execute_command(driver, test_queue[0])
if result:
test_queue.popleft()
else:
pass
driver.quit()
print('finished running test')
|
12,552 | 90f31c4ca0aba01736ff4e62ec41896456bf312b | import random
class Patient(object):
def __init__(self, name, allergies):
self.id = random.randint(1, 10000)
self.name = name
self.allergies = allergies
self.bed_number = None
class Hospital(object):
def __init__(self, name):
self.patients = []
self.name = name
self.capacity = random.randint(10, 30)
def admit(self, patient):
if len(self.patients) == self.capacity:
print 'We are at full capacity'
return self
else:
self.patients.append(patient)
patient.bed_number = len(self.patients) - 1
class Something(Hospital):
def __init__(self):
super(Something, self).__init__('something')
# pass
class Works(Something):
def __init__(self):
super(Works, self).__init__()
# pass
x = Works()
print x.capacity
# a = Patient('Bob', [])
# b = Patient('Cody', [])
# c = Hospital('Codys Hospital')
# c.admit(a)
# c.admit(b)
# print a.bed_number
# print b.bed_number
|
12,553 | 6d8694b74f0da666d2960f607634e1fea96d30bd | def binarySearch(lista,target):
low=0
high=len(lista)
count=0
while low<=high:
mid=(high+low)//2
if mid<0 or mid >len(lista)-1:
return False,count
if lista[mid]==target:
return True ,mid,count
else:
if lista[mid]>target:
high=mid-1
else:
low=mid+1
count+=1
b=[]
|
12,554 | 6a8c26e35a93052b603b687e17d772f1a1061348 | from django.http import HttpResponse
from django.shortcuts import render,get_object_or_404
from django.shortcuts import redirect
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth import login
from .forms import ContactForm
from requests.models import Requests
def home_page(request):
print(request.session.get('first_name','Unknown'))
print (request.session.get('cart_id'))
context = {
"title" : "BilShare",
"intro" : "This is the home page",
'username' : request.session.get('first_name','Unknown')
}
if request.user.is_authenticated():
context["premium_content"] = "The content on this page is exclusive to this user"
return render(request,"home_page.html", context)
def about_page(request):
context = {
"title": "About",
"intro": "This is the about page",
}
return render(request, "home_page.html", context)
def contact_page(request):
contact_form = ContactForm(request.POST or None)
context = {
"title": "Contact",
"intro": "This is the con tact page",
"form": contact_form,
"brand" : "New Brand Name"
}
if contact_form.is_valid():
print(contact_form.cleaned_data)
return render(request, "contact/view.html", context)
def my_orders(request):
user = request.user
orders = Requests.objects.filter(user = user)[::-1]
print(orders)
context = {
'orders':orders
}
#print(orders)
return render(request,"my_orders.html", context)
# def request_update(request):
# order = get_object_or_404(Requests, pk=pk)
# if request.method == 'POST':
# form = RequestForm(request.POST, instance=order)
# else:
# form = RequestForm(instance=order)
# return save_book_form(request, form, 'books/includes/partial_book_update.html') |
12,555 | 65af86ec82d641df9aa2096d917cc7f371a4bbc7 | import allure
import pytest
class TestSetting:
@allure.severity("blocker")
@allure.step(title="用于测试“XXX”功能的测试脚本")
def test_login1(self):
print("");
assert 1
@allure.severity("critical")
@allure.step(title="用于测试“XXXX”功能的测试脚本")
def test_login2(self):
print("");
assert 1
@allure.severity("normal")
@allure.step(title="用于测试“XXXX”功能的测试脚本")
def test_login3(self):
print("");
assert 1
#默认就是"normal"级别,即相当于:@allure.severity("normal")
@allure.step(title="用于测试“XXXX”功能的测试脚本")
def test_login4(self):
print("");
assert 1
@allure.severity("minor")
@allure.step(title="用于测试“XXXX”功能的测试脚本")
def test_login5(self):
print("");
assert 1
@allure.severity("trivial")
@allure.step(title="用于测试“XXXX”功能的测试脚本")
def test_login6(self):
print("");
assert 1
|
12,556 | 276c4b699fe7e78bf1f5e6d659512ac9ba3ed58d | import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split, cross_validate
import dill
def main(N=3, save=True, verbose=False):
data_df = pd.read_csv('data/training_data_%d_years.csv' % N)
N_CATS = int(len(data_df.columns) / N)
# Cut off _Y# from categories
CATS = [c[:-3] for c in data_df.columns[:N_CATS]]
data_df = data_df.drop(data_df.index[data_df.iloc[:,-N_CATS:].sum(axis=1) == 0])
# Split input and output data
X = data_df.iloc[:,:(N_CATS*(N-1))]
y = data_df.iloc[:,-N_CATS:]
# Process output data
# Drop output columns Age, GP, MP_* and *_std
y = y.drop(columns = y.columns[:4])
y = y.drop(columns=[c for c in y.columns if '_avg' not in c])
y = y.rename(columns = {c:c[:-7] for c in y.columns})
# Split train/test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# Generate XGBoost Regressor Model
if verbose:
print('XGBoost Regressor Results:')
for col in y.columns:
model = xgb.XGBRegressor(learning_rate=0.01,
reg_alpha=0.001,
reg_lambda=0.005,
n_estimators=1000,
max_depth=4,
min_child_weight=1,
gamma=0.2,
subsample=0.7,
colsample_bytree=0.6,
scale_pos_weight=1,
objective='reg:squarederror',eval_metric='rmse')
model.fit(X_train.values, y_train[col].values)
model_pred = model.predict(X_test.values)
model_cv = cross_validate(model, X_train.values, y_train[col].values, cv=3, scoring=['neg_mean_squared_error','r2'], return_train_score=True)
if verbose:
print('\t%s:' % col)
print('\tTraining Data Cross Validation:')
print('\t\t%.4f\tNegative mean squared error' % np.mean(model_cv['train_neg_mean_squared_error']))
print('\t\t %.4f\tR^2' % np.mean(model_cv['train_r2']))
print('\tTesting Data:')
print('\t\t%.4f\tNegative mean squared error' % -mean_squared_error(y_test[col].values,model_pred))
print('\t\t %.4f\tR^2' % r2_score(y_test[col].values,model_pred))
if save:
with open('models/%s_%d.xgbm' % (col,N),'wb') as fout:
# Train on all data
model.fit(X.values,y[col].values)
model_cv = cross_validate(model, X.values, y[col].values, cv=3, scoring=['neg_mean_squared_error','r2'], return_train_score=True)
if verbose:
print('\tFinal Model Training Metrics:')
print('\t\t%.4f\tNegative mean squared error' % np.mean(model_cv['train_neg_mean_squared_error']))
print('\t\t %.4f\tR^2' % np.mean(model_cv['train_r2']))
dill.dump(model,fout)
if verbose:
print('\n')
if __name__ == '__main__':
# Default parameters
N = 3
save = True
verbose = False
# Parse parameters
import sys
if len(sys.argv) > 1:
parameters = sys.argv[1:]
for idx,p in enumerate(parameters):
if p == '--N':
assert ((idx+1) < len(parameters) and int(parameters[idx+1]) and int(parameters[idx+1]) in [3,4]), '--N requires an additional parameter: \
the number of years included in the \
model (3 or 4)'
N = int(parameters[idx+1])
elif p == '--nosave':
save = False
elif p == '--verbose':
verbose = True
# Generate models
main(N=N,save=save,verbose=verbose)
|
12,557 | 5eaa469a4d79b251f55b8f982d38a131284592c2 | #!/usr/bin/env python
from __future__ import print_function, unicode_literals
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('num_chapters', type=int)
args = parser.parse_args()
task_templates = [
"read chapter {}",
"make chapter summary for chapter {}",
"make flashcards for chapter {}",
]
for i in range(1, args.num_chapters+1):
for template in task_templates:
print(template.format(i))
|
12,558 | c94e87b789eea19deb6641b23ad6a93fb50503b3 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-26 15:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('content', models.TextField(verbose_name='内容')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('change_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=150, verbose_name='内容')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article', verbose_name='所属文章')),
('quote', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Reply', verbose_name='引用')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='标签')),
('description', models.CharField(max_length=100, verbose_name='描述')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('change_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('synonym', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Tag', verbose_name='关联标签')),
],
),
migrations.AddField(
model_name='article',
name='tags',
field=models.ManyToManyField(to='blog.Tag', verbose_name='文章标签'),
),
]
|
12,559 | fb71e8a07725bd501715fae09e9a8ac5709a6411 | import sys
n = int(raw_input('enter number:'))
s = '#'
for i in range( 1 , n+1):
print " "*(n-i) + s*i |
12,560 | 0beabf756f471d80313e7d2d93d2e76ee65c0fd6 | from collections import defaultdict
# from torchtext.vocab import Vocab
from torch.utils.data.dataset import Dataset, TensorDataset
from pathlib import Path
from collections import Counter
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from torch.utils.data.dataloader import DataLoader
from utils import *
import matplotlib.pyplot as plt
from chu_liu_edmonds import *
from os import path
# taken from the paper
MLP_HIDDEN_DIM = 100
EPOCHS = 150
WORD_EMBEDDING_DIM = 100
POS_EMBEDDING_DIM = 25
HIDDEN_DIM = 125
LEARNING_RATE = 0.01
EARLY_STOPPING = 10 # num epochs with no validation acc improvement to stop training
PATH = "./basic_model_best_params"
cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# class not_efficientMLP(nn.Module):
# def __init__(self, lstm_dim, mlp_hidden_dim):
# super(not_efficientMLP, self).__init__()
# self.first_linear = nn.Linear(2 * lstm_dim, mlp_hidden_dim)
# self.non_linearity = nn.ReLU()
# self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
#
# def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden
class MLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a simple SGD optimizer to minimize the loss function
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
acumulate_grad_steps = 128 # This is the actual batch_size, while we officially use batch_size=1
# Training start
print("Training Started")
epoch_loss_list = []
epoch_train_acc_list = []
epoch_test_acc_list = []
best_val_acc = 0
num_epochs_wo_improvement = 0
for epoch in range(EPOCHS):
val_acc = evaluate(model, val_dataloader)
print("EPOCH = ", epoch)
print("EPOCH val acc = ", val_acc)
if val_acc < best_val_acc: # no improvement
num_epochs_wo_improvement += 1
if num_epochs_wo_improvement >= EARLY_STOPPING:
print("STOPPED TRAINING DUE TO EARLY STOPPING")
return
else: # improvement
print("saving model since it improved on validation :)")
torch.save(model.state_dict(), PATH)
num_epochs_wo_improvement = 0
best_val_acc = val_acc
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
print(epoch_train_acc_list)
plt.savefig('./basic_model_graphs.png')
# train
acc = 0 # to keep track of accuracy
printable_loss = 0 # To keep track of the loss value
i = 0
batch_loss = 0
batch_acc = 0
epoch_loss = 0
for batch_idx, input_data in enumerate(train_dataloader):
i += 1
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
loss = NLLL_function(tag_scores, heads_tensor[0].to(device))
# epoch statistics
epoch_loss += loss
#
loss = loss / acumulate_grad_steps
loss.backward()
batch_loss += loss
acc = (accuracy(heads_tensor[0].cpu(), tag_scores.cpu())) / acumulate_grad_steps
batch_acc += acc
if i % acumulate_grad_steps == 0:
optimizer.step()
model.zero_grad()
print("batch_loss = ", batch_loss.item())
print("batch_acc = ", batch_acc)
batch_loss = 0
batch_acc = 0
# end of epoch - get statistics
epoch_loss_list.append(epoch_loss / i)
epoch_train_acc_list.append(evaluate(model, train_dataloader))
epoch_test_acc_list.append(evaluate(model, test_dataloader))
# end of train - plot the two graphs
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
plt.show()
plt.savefig('basic_model_graphs.png')
if __name__ == "__main__" :
if HYPER_PARAMETER_TUNING:
hyper_parameter_tuning()
else:
main() |
12,561 | db205c38ec2209d444d04c73fa021b6651031744 | """
This module provides 1/2-D FFTs for functions taken on the interval
n = [-N/2, ..., N/2-1] in all transformed directions. This is accomplished
quickly by making a change of variables in the DFT expression, leading to
multiplication of exp(+/-jPIk) * DFT{exp(+/-jPIn) * [n]}. Take notice that
BOTH your input and output arrays will be arranged on the negative-to-positive
interval. To take regular FFTs, shifting can be turned off.
"""
import numpy as N
from scipy.weave import inline
import os
from os.path import join, split
from numpy.distutils.system_info import get_info
from path import path
example_code = \
r"""
int32_t *i = (int32_t *)a;
if(ndim == 2) {
BlitzExample<int32_t, 2> t;
blitz::Array<int32_t, 2> A(a, blitz::shape(shape[0], shape[1]),
blitz::neverDeleteData);
t(A);
} else if(ndim == 1) {
BlitzExample<int32_t, 1> t;
blitz::Array<int32_t, 1> A(a, shape[0], blitz::neverDeleteData);
t(A);
}
"""
def example(a):
ndim = N.int(a.ndim)
shape = N.array(a.shape, dtype=N.int32)
inline(example_code, ['a', 'shape', 'ndim'],
headers=['<iostream>', '<stdint.h>', '"example.hpp"',
'<blitz/array.h>'],
include_dirs=[os.getcwd()],
compiler='gcc', force=1)
def go():
d = N.arange(100, dtype=N.int32)
d.shape = 10, 10
example(d)
d = N.arange(100, dtype=N.int32)
example(d)
if __name__ == '__main__':
go()
|
12,562 | f55a56700316efe55270afc741a351ce8ab1f0ed | import pandas as pd
import numpy as np
af_df = pd.read_csv('Milk_2/AF_score/af_raw_data.csv')
af_df = (af_df
.groupby(['to_user'])
.agg({'AF score': 'mean', 'fid':'count'})
.reset_index()
.sort_values(['AF score'], ascending=False)
.rename({'to_user': 'from_user', 'fid': 'count'}, axis='columns'))
af_df.to_csv('af_scr.csv') |
12,563 | a1b132633181aa2037eba87db98f53f8eaccbcd0 | #%%
"""
Created on 08 Mar 2019
Pathwise estimation for delta and vega for the Black-Scholes model
@author: Lech A. Grzelak
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import enum
# This class defines puts and calls
class OptionType(enum.Enum):
CALL = 1.0
PUT = -1.0
# Black-Scholes call option price
def BS_Call_Put_Option_Price(CP,S_0,K,sigma,t,T,r):
K = np.array(K).reshape([len(K),1])
d1 = (np.log(S_0 / K) + (r + 0.5 * np.power(sigma,2.0))
* (T-t)) / (sigma * np.sqrt(T-t))
d2 = d1 - sigma * np.sqrt(T-t)
if CP == OptionType.CALL:
value = st.norm.cdf(d1) * S_0 - st.norm.cdf(d2) * K * np.exp(-r * (T-t))
elif CP == OptionType.PUT:
value = st.norm.cdf(-d2) * K * np.exp(-r * (T-t)) - st.norm.cdf(-d1)*S_0
return value
def BS_Delta(CP,S_0,K,sigma,t,T,r):
K = np.array(K).reshape([len(K),1])
d1 = (np.log(S_0 / K) + (r + 0.5 * np.power(sigma,2.0)) * \
(T-t)) / (sigma * np.sqrt(T-t))
if CP == OptionType.CALL:
value = st.norm.cdf(d1)
elif CP == OptionType.PUT:
value = st.norm.cdf(d1)-1
return value
def BS_Gamma(S_0,K,sigma,t,T,r):
K = np.array(K).reshape([len(K),1])
d1 = (np.log(S_0 / K) + (r + 0.5 * np.power(sigma,2.0)) * \
(T-t)) / (sigma * np.sqrt(T-t))
return st.norm.pdf(d1) / (S_0 * sigma * np.sqrt(T-t))
def BS_Vega(S_0,K,sigma,t,T,r):
d1 = (np.log(S_0 / K) + (r + 0.5 * np.power(sigma,2.0)) * \
(T-t)) / (sigma * np.sqrt(T-t))
return S_0*st.norm.pdf(d1)*np.sqrt(T-t)
def GeneratePathsGBMEuler(NoOfPaths,NoOfSteps,T,r,sigma,S_0):
Z = np.random.normal(0.0,1.0,[NoOfPaths,NoOfSteps])
W = np.zeros([NoOfPaths, NoOfSteps+1])
# Approximation
S = np.zeros([NoOfPaths, NoOfSteps+1])
S[:,0] =S_0
X = np.zeros([NoOfPaths, NoOfSteps+1])
X[:,0] =np.log(S_0)
time = np.zeros([NoOfSteps+1])
dt = T / float(NoOfSteps)
for i in range(0,NoOfSteps):
# Making sure that samples from a normal have mean 0 and variance 1
if NoOfPaths > 1:
Z[:,i] = (Z[:,i] - np.mean(Z[:,i])) / np.std(Z[:,i])
W[:,i+1] = W[:,i] + np.power(dt, 0.5)*Z[:,i]
X[:,i+1] = X[:,i] + (r -0.5*sigma**2.0)* dt + sigma * (W[:,i+1] - W[:,i])
time[i+1] = time[i] +dt
# Return S
paths = {"time":time,"S":np.exp(X)}
return paths
def EUOptionPriceFromMCPathsGeneralized(CP,S,K,T,r):
# S is a vector of Monte Carlo samples at T
result = np.zeros([len(K),1])
if CP == OptionType.CALL:
for (idx,k) in enumerate(K):
result[idx] = np.exp(-r*T)*np.mean(np.maximum(S-k,0.0))
elif CP == OptionType.PUT:
for (idx,k) in enumerate(K):
result[idx] = np.exp(-r*T)*np.mean(np.maximum(k-S,0.0))
return result
def PathwiseDelta(S0,S,K,r,T):
temp1 = S[:,-1]>K
return np.exp(-r*T)*np.mean(S[:,-1]/S0*temp1)
def PathwiseVega(S0,S,sigma,K,r,T):
temp1 = S[:,-1]>K
temp2 = 1.0/sigma* S[:,-1]*(np.log(S[:,-1]/S0)-(r+0.5*sigma**2.0)*T)
return np.exp(-r*T)*np.mean(temp1*temp2)
def mainCalculation():
CP = OptionType.CALL
S0 = 1
r = 0.06
sigma = 0.3
T = 1
K = np.array([S0])
t = 0.0
NoOfSteps = 1000
delta_Exact = BS_Delta(CP,S0,K,sigma,t,T,r)
vega_Exact = BS_Vega(S0,K,sigma,t,T,r)
NoOfPathsV = np.round(np.linspace(5,20000,50))
deltaPathWiseV = np.zeros(len(NoOfPathsV))
vegaPathWiseV = np.zeros(len(NoOfPathsV))
for (idx,nPaths) in enumerate(NoOfPathsV):
print('Running simulation with {0} paths'.format(nPaths))
np.random.seed(3)
paths1 = GeneratePathsGBMEuler(int(nPaths),NoOfSteps,T,r,sigma,S0)
S = paths1["S"]
delta_pathwise = PathwiseDelta(S0,S,K,r,T)
deltaPathWiseV[idx]= delta_pathwise
vega_pathwise = PathwiseVega(S0,S,sigma,K,r,T)
vegaPathWiseV[idx] =vega_pathwise
plt.figure(1)
plt.grid()
plt.plot(NoOfPathsV,deltaPathWiseV,'.-r')
plt.plot(NoOfPathsV,delta_Exact*np.ones([len(NoOfPathsV),1]))
plt.xlabel('number of paths')
plt.ylabel('Delta')
plt.title('Convergence of pathwise delta w.r.t number of paths')
plt.legend(['pathwise est','exact'])
plt.figure(2)
plt.grid()
plt.plot(NoOfPathsV,vegaPathWiseV,'.-r')
plt.plot(NoOfPathsV,vega_Exact*np.ones([len(NoOfPathsV),1]))
plt.xlabel('number of paths')
plt.ylabel('Vega')
plt.title('Convergence of pathwise vega w.r.t number of paths')
plt.legend(['pathwise est','exact'])
mainCalculation()
|
12,564 | 0e57ca4f6620491060ea712738448fcaad6e176f | import pytest
from src.zadanie_kalkulator import ZadanieKalkulator
class TestZadanieKalkulator():
@pytest.fixture()
def return_kalkulator(self):
return ZadanieKalkulator(4,5)
@pytest.fixture()
def return_kalkulator2(self):
return ZadanieKalkulator(6,7)
def test_dodaj(self, return_kalkulator, return_kalkulator2):
assert return_kalkulator.dodaj() == 9
assert return_kalkulator2.dodaj() == 13
def test_odejmij(selfs, return_kalkulator, return_kalkulator2):
assert return_kalkulator.odejmij(True) == 1
assert return_kalkulator2.odejmij() == -1 |
12,565 | fb07bd7674f623acc201b29269eb0e773338385a | x = True
y = False
z = False
if not x or y:
print(1)
elif not x or not y and z:
print(2)
elif not x or y or not y and x:
print(3)
else:
print(4) |
12,566 | 5511361ba73f8739f4900083c0fdf7fa298c423b | """
对时间处理的模块
time
"""
import time
# 1. 人的时间:2020年10月27日 09:30
# 时间元组:年,月,日,时,分,秒,星期,年的第几天,夏令时
tuple_time = time.localtime()
print(tuple_time[0]) # 获取年
print(tuple_time[-3]) # 获取星期
print(tuple_time[3: 6]) # 获取时分秒
# 2. 机器时间:
# 时间戳:从1970年元旦到现在经过的秒数
print(time.time()) # 1603762810.9457989
# 3. 时间元组 <---> 时间戳
# 语法:时间元组 = time.localtime( 时间戳 )
print(time.localtime(1603762810.9457989))
# 语法:时间戳 = time.mktime( 时间元组 )
print(time.mktime(tuple_time))
# 语法:时间戳 = time.mktime(9个元素的元组)
print(time.mktime((2020, 10, 27, 9, 0, 0, 0, 0, 0)))
# 4. 时间元组<--->字符串
# 语法:字符串 = time.strftime( "格式" ,时间元组)
print(time.strftime( "%y/%m/%d %H:%M:%S" ,tuple_time))
# 2020/10/27 09:55:19
print(time.strftime( "%Y/%m/%d %H:%M:%S" ,tuple_time))
# 下面代码含义:给整数占位
# "..%d..."%(100)
# 语法:时间元组 =time.strptime("时间字符串","格式")
print(time.strptime("2020/10/27 09:55:19","%Y/%m/%d %H:%M:%S")) |
12,567 | 44e1a16a97bfc253815c17d6abe79e441d035da3 | import math
#计算两点之间距离是否小于r
def distanceInR(cx,cy,r,x,y):
dist=math.sqrt((math.pow(x-cx,2)+math.pow(y-cy,2)))
if dist<r:
return True
else:
return False
def distanceCal(cx,cy,x,y):
dist=math.sqrt((math.pow(x-cx,2)+math.pow(y-cy,2)))
return dist
|
12,568 | 67bb622ebc8cef0ba8d1ea1834897c109d764601 | # -*- coding: utf-8 -*-
import os.path
import re
import warnings
# Ugh, pip 10 is backward incompatible, but there is a workaround:
# Thanks Stack Overflow https://stackoverflow.com/a/49867265
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
from setuptools import setup, find_packages
version = '0.3.7'
long_description = """
freezing-model is the database model and message definitions shared by freezing saddles components.
"""
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(os.path.join(os.path.dirname(__file__), 'requirements.txt'), session=False)
# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir.req) for ir in install_reqs]
setup(
name='freezing-model',
version=version,
author='Hans Lellelid',
author_email='hans@xmpl.org',
url='http://github.com/freezingsaddles/freezing-model',
license='Apache',
description='Freezing Saddles database and messaging models.',
long_description=long_description,
packages=['freezing.model', 'freezing.model.msg'],
include_package_data=True,
package_data={'freezing.model': ['migrations/*']},
install_requires=reqs,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
zip_safe=True
)
|
12,569 | 7338a76fae44463bb2f33c6ae2ddcc6c20315d86 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pymysql
def conn():
# Criar um arquivo setup.txt com os parametros de conexao divididos por
# espaço
arquivo = open('setup.txt', 'r')
leitor = arquivo.read()
parametros = leitor.split()
db = pymysql.connect(host=parametros[0],
user=parametros[1],
password=parametros[2],
database=parametros[3],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
return db
|
12,570 | de2ba2b5dc3836abad9f97c8482b62fbc1e64383 | #!/usr/bin/env python
"""Test gs-wrap cp many to many live."""
# pylint: disable=missing-docstring
# pylint: disable=too-many-lines
# pylint: disable=protected-access
# pylint: disable=expression-not-assigned
import pathlib
import tempfile
import unittest
import uuid
from typing import List, Sequence, Tuple, Union # pylint: disable=unused-import
import temppathlib
import gswrap
import tests.common
class TestCPManyToMany(unittest.TestCase):
def setUp(self) -> None:
self.client = gswrap.Client()
self.client._change_bucket(tests.common.TEST_GCS_BUCKET)
self.bucket_prefix = str(uuid.uuid4())
self.tmp_dir = tempfile.TemporaryDirectory()
tests.common.gcs_test_setup(
tmp_dir_name=self.tmp_dir.name, prefix=self.bucket_prefix)
def tearDown(self) -> None:
tests.common.gcs_test_teardown(prefix=self.bucket_prefix)
self.tmp_dir.cleanup()
def test_cp_remote_many_to_many(self) -> None:
gcs_bucket = 'gs://{}'.format(tests.common.TEST_GCS_BUCKET)
prefix = self.bucket_prefix
# yapf: disable
test_cases = [
(
'{}/{}/d1/'.format(gcs_bucket, prefix),
'{}/{}/d1-m2many'.format(gcs_bucket, prefix)
),
(
'{}/{}/d1/f11'.format(gcs_bucket, prefix),
'{}/{}/d1-m2many/files/f11'.format(gcs_bucket, prefix)
)
] # type: Sequence[Tuple[str, str]]
# yapf: enable
self.client.cp_many_to_many(srcs_dsts=test_cases, recursive=True)
# yapf: disable
self.assertEqual(4, len(tests.common.call_gsutil_ls(
path='{}/{}/d1-m2many'.format(gcs_bucket, prefix), recursive=True)))
# yapf: enable
def test_cp_download_many_to_many(self) -> None:
gcs_bucket = 'gs://{}'.format(tests.common.TEST_GCS_BUCKET)
prefix = self.bucket_prefix
tmp_dir = self.tmp_dir.name
# yapf: disable
test_cases = [
(
'{}/{}/d1/'.format(gcs_bucket, prefix),
pathlib.Path('{}/{}/d1-m2many'.format(tmp_dir, prefix))),
(
'{}/{}/d1/f11'.format(gcs_bucket, prefix),
pathlib.Path('{}/{}/d1-m2many/files/f11'.format(tmp_dir, prefix)
))] # type:Sequence[Tuple[str, pathlib.Path]]
# yapf: enable
self.client.cp_many_to_many(srcs_dsts=test_cases, recursive=True)
# yapf: disable
self.assertEqual(4, len(tests.common.ls_local(
path='{}/{}/d1-m2many'.format(self.tmp_dir.name, prefix))))
# yapf: enable
def test_cp_download_many_to_many_with_creating_local_dir(self) -> None:
gcs_bucket = 'gs://{}'.format(tests.common.TEST_GCS_BUCKET)
with temppathlib.TemporaryDirectory() as tmp_dir:
for index in range(10):
file = tmp_dir.path / "{}/file".format(index)
file.parent.mkdir(parents=True, exist_ok=True)
file.write_text("hello")
tests.common.call_gsutil_cp(
src=file.as_posix(),
dst="{}/{}/cp-m2m/{}/file".format(
gcs_bucket, self.bucket_prefix, index),
recursive=False)
url = "{}/{}/cp-m2m".format(gcs_bucket, self.bucket_prefix)
srcs = tests.common.call_gsutil_ls(path=url, recursive=True)
srcs_dsts = []
for index, src in enumerate(srcs):
srcs_dsts.append(
(src, (tmp_dir.path / "cp-m2m/{}/file".format(index))))
self.client.cp_many_to_many(srcs_dsts=srcs_dsts, recursive=True)
result = tests.common.ls_local(
path=(tmp_dir.path / "cp-m2m").as_posix())
self.assertEqual(10, len(result))
expected = [
(tmp_dir.path / "cp-m2m/{}/file".format(index)).as_posix()
for index in range(10)
]
self.assertListEqual(sorted(expected), sorted(result))
if __name__ == '__main__':
unittest.main()
|
12,571 | 1c83f81993310c86dccdb8fc266c0195716dfa35 | from PIL import Image
img = Image.open('C:\\Users\\Matt\\codeguild\\javascript\\mole.png')
roated_image = img.rotate(45).save('rotated.png')
|
12,572 | 4aec64ef6a540973a3d87392a4c9a6a26a75422b | from django.shortcuts import render_to_response, redirect
from models import *
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def index(request, ):
cats = Comcat1.objects.all().order_by('-search_rank')[:3]
coms = Company2.objects.all().order_by('-search_rank')[:3]
pers = Person5.objects.all().order_by('-search_rank')[:3]
return render_to_response('index.html',
{
"categories" : cats,
"companies" : coms,
"persons" : pers
})
def search_company(request,):
query = request.GET.get('q', '')
if query:
qset = (Q(company_name__icontains=query))
results = Company2.objects.filter(qset).distinct()
else:
results = []
return render_to_response("search-company.html",
{
"results": results,
"query": query
})
def search_person(request,):
query = request.GET.get('q', '')
if query:
qset = (Q(person_name__icontains=query))
results = Person5.objects.filter(qset).distinct()
else:
results = []
return render_to_response("search-person.html",
{
"results": results,
"query": query
})
def search_PIN(request,):
query = request.GET.get('q', '')
if query:
qset = (Q(address_pin__icontains=query))
results = Address3.objects.filter(qset).distinct()
else:
results = []
return render_to_response("search-PIN.html",
{
"results": results,
"query": query
})
def search_mobile(request,):
query = request.GET.get('q', '')
if query:
qset = (Q(mobile_number__icontains=query))
results = Mobile6.objects.filter(qset).distinct()
else:
results = []
return render_to_response("search-mobile.html",
{
"results": results,
"query": query
})
def search_STD(request,):
query = request.GET.get('q', '')
if query:
qset = (Q(std_code__icontains=query))
s = Stdcode1.objects.filter(qset).distinct()
results = Landline5.objects.filter(std_for_landline = s).distinct().order_by('landline_of_company')
else:
results = []
return render_to_response("search-STD.html",
{
"results": results,
"query": query
})
def categories(request,):
cats = Comcat1.objects.all().order_by('-search_rank')
return render_to_response('categories.html',
{
"categories": cats
})
def emails(request,):
emails = Email6.objects.all().order_by('domain_of_email', 'email_name')
return render_to_response('emails.html',
{
"emails" : emails
})
def category(request,num):
cat = Comcat1.objects.filter(id=num)
if cat:
cat[0].search_rank = cat[0].search_rank + 1
cat[0].save()
comps = Company2.objects.filter(company_categories = cat)
return render_to_response('category.html',
{
"companies": comps,
"category":Comcat1.objects.get(id=num)
})
else:
return render_to_response('error.html',)
def companies(request,):
coms = Company2.objects.all().order_by('company_name')
paginator = Paginator(coms, 5)
page = request.GET.get('page')
try:
companies = paginator.page(page)
except PageNotAnInteger:
companies = paginator.page(1)
except EmptyPage:
companies = paginator.page(paginator.num_pages)
return render_to_response( 'companies.html',
{
"page" : page,
"companies": companies
})
def company(request,num):
com = Company2.objects.filter(id=num)
if com:
company = Company2.objects.get(id=num)
company.search_rank = company.search_rank + 1
company.save()
cats = Comcat1.objects.filter(company2__id=num)
addresses = Address3.objects.filter(address_of_company=company)
positions = Compos4.objects.filter(position_of_company=company)
persons = Person5.objects.filter(company_of_person=company)
return render_to_response('company.html',
{
"company" : company,
"categories": cats,
"addresses" : addresses,
"positions" : positions,
"persons" : persons
})
else:
return render_to_response('error.html',)
def position(request,num):
pos = Compos4.objects.filter(id=num)
if pos:
position = Compos4.objects.get(id=num)
landlines = Landline5.objects.filter(landline_for_position=pos)
persons = Person5.objects.filter(person_for_position=pos)
emails = Email6.objects.filter(email_of_position=pos)
return render_to_response('position.html',
{
"position" : position,
"landlines" : landlines,
"persons" : persons,
"emails" : emails
})
else:
return render_to_response('error.html',)
def person(request,num):
p = Person5.objects.filter(id=num)
if p:
person = Person5.objects.get(id=num)
person.search_rank = person.search_rank + 1
person.save()
mobiles = Mobile6.objects.filter(mobile_of_person=person)
emails = Email6.objects.filter(email_of_person=person)
return render_to_response('person.html',
{
"person" : person,
"mobiles" : mobiles,
"emails" : emails
})
else:
return render_to_response('error.html',)
def address(request,num):
a = Address3.objects.filter(id=num)
if a:
address = Address3.objects.get(id=num)
pos = Compos4.objects.filter(address_of_position=address)
landlines = Landline5.objects.filter(landline_in_address=address)
return render_to_response('address.html',
{
"address" : address,
"positions": pos,
"landlines" : landlines
})
else:
return render_to_response('error.html',)
def alternate(request,):
cats = Comcat1.objects.all()
coms_all = Company2.objects.all().order_by('-search_rank')
coms = coms_all[:10]
pers_all = Person5.objects.all().order_by('-search_rank')
pers = pers_all[:10]
return render_to_response('alternate.html',
{
"categories": cats,
"top_10_companies": coms,
"number_of_companies": coms_all.count,
"top_10_persons": pers,
"number_of_persons": pers_all.count
})
def landing(request,):
return render_to_response('landing-page.html',)
|
12,573 | 70c263591f0e76ce956d97a6d167a3ba364ae223 | # Time Conversion
def timeConversion(s):
""" Converts to military (24-hour) time """
new_s = ''
if s[-2:] == 'PM' and s[:2] != '12':
new_s = str(int(s[:2]) + 12) + s[2:-2]
elif s[-2:] == 'AM' and s[:2] == '12':
new_s = '0' + str(int(s[:2]) - 12) + s[2:-2]
else:
new_s = s[:-2]
return new_s
# Running some tests..
print(timeConversion('07:05:45PM') == '19:05:45') |
12,574 | 0d2ae5ab3e8af42f8cf8f67a98b961d0cb85515f | from django.contrib import admin
from api.models import Tag, Category, Post
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ['id', 'title']
list_display_links = ['title']
|
12,575 | f27cbf4448225edf3778c04537a7ceb4a65542ef | import numpy as np
# hyper parameter
learning_rate = 0.01
np.random.seed(777) # for reproducibility
# Training Data
train_X = np.asarray([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1])
train_Y = np.asarray([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
m = train_X.shape[0] # m denotes numbers of samples
# convert to column vector
X_train = train_X.transpose()
Y_train = train_Y.transpose()
# forward pass
# initial parameter
# W = np.random.normal(0.0, 1.0)
# b = np.random.normal(0.0, 1.0)
# in order to check our model, we set the initial
# value as the version of tf
W = 2.2086694
b = -0.8204183
# define our model y = Wx + b
def hypothesis(W, b):
return X_train * W + b
# define our loss function RMSE
def loss(W, b):
return 1 / (2 * float(m)) * np.dot(W * X_train + b - Y_train, W * X_train + b - Y_train)
# for output
loss_val = []
for epoch in range(1000):
# compute error term
diff = hypothesis(W, b) - Y_train
# compute gradient for weigths and bias
G_w = 1 / float(m) * np.matmul(diff.transpose(), X_train)
G_b = np.average(diff)
# update paremeters
b -= learning_rate * G_b
W -= learning_rate * G_w
if epoch % 200 == 0:
print(epoch, loss(W, b), W, b)
loss_val.append(loss(W, b))
print("W = ", W, " b =", b, " loss = ", loss(W, b))
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, train_X * W + b, label='Fitted line')
plt.xlabel("x")
plt.ylabel("y")
plt.figure(2)
plt.plot(loss_val)
plt.show()
'''
200 0.20810790483923886 0.45697420361829144 -0.6569617006050175
400 0.1576287042242674 0.41269065587310166 -0.34301127883658994
600 0.1265744754215395 0.377957315951072 -0.09676753204618373
800 0.10747026785122075 0.35071458216919466 0.09637118868502578
W=: 0.3294414711711726 b= 0.24718797076818763 loss = 0.09576327293101893
''' |
12,576 | 3ff8458fa1961cdcf1ec219d58d8bd46363e4fac | from django.db import models
from django.contrib.auth.models import User
import os
# Create your models here.
def get_image_path(instance, filename):
return os.path.join('photos', str(instance.id), filename)
class Club(models.Model):
club_name = models.CharField( max_length = 100)
player_name = models.CharField( max_length = 100)
rank_score = models.FloatField(default = 1000.0)
wins = models.IntegerField(default = 0)
loss = models.IntegerField(default = 0)
draws = models.IntegerField(default = 0)
rank = models.IntegerField()
last_rank = models.IntegerField()
rank_difference = models.CharField(max_length = 20, default = "-")
goal_for = models.IntegerField(default = 0)
goal_against = models.IntegerField( default = 0)
goal_difference = models.IntegerField( default = 0)
club_image = models.ImageField(upload_to=get_image_path, default = 'Elo_System/static/static_dirs/img/club_banners/deault.jpg', blank=True, null=True)
def __unicode__(self):
return self.club_name
class Meta:
ordering = ['-rank_score']
|
12,577 | b91355ed62bfd34351d67eef560ba8a85baa82b0 | from importlib import import_module
import numpy as np
import pytest
from armory.data import datasets
from armory.utils import external_repo
from armory.utils.config_loading import load_dataset
from armory.data.utils import maybe_download_weights_from_s3
from armory import paths
from armory.utils.metrics import object_detection_AP_per_class, video_tracking_mean_iou
DATASET_DIR = paths.DockerPaths().dataset_dir
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_pytorch_mnist():
classifier_module = import_module("armory.baseline_models.pytorch.mnist")
classifier_fn = getattr(classifier_module, "get_art_model")
classifier = classifier_fn(model_kwargs={}, wrapper_kwargs={})
train_dataset = datasets.mnist(
split="train", epochs=1, batch_size=600, dataset_dir=DATASET_DIR,
)
test_dataset = datasets.mnist(
split="test", epochs=1, batch_size=100, dataset_dir=DATASET_DIR,
)
classifier.fit_generator(
train_dataset, nb_epochs=1,
)
accuracy = 0
for _ in range(test_dataset.batches_per_epoch):
x, y = test_dataset.get_batch()
predictions = classifier.predict(x)
accuracy += np.sum(np.argmax(predictions, axis=1) == y) / len(y)
assert (accuracy / test_dataset.batches_per_epoch) > 0.9
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_pytorch_mnist_pretrained():
classifier_module = import_module("armory.baseline_models.pytorch.mnist")
classifier_fn = getattr(classifier_module, "get_art_model")
weights_path = maybe_download_weights_from_s3("undefended_mnist_5epochs.pth")
classifier = classifier_fn(
model_kwargs={}, wrapper_kwargs={}, weights_path=weights_path
)
test_dataset = datasets.mnist(
split="test", epochs=1, batch_size=100, dataset_dir=DATASET_DIR,
)
accuracy = 0
for _ in range(test_dataset.batches_per_epoch):
x, y = test_dataset.get_batch()
predictions = classifier.predict(x)
accuracy += np.sum(np.argmax(predictions, axis=1) == y) / len(y)
assert (accuracy / test_dataset.batches_per_epoch) > 0.98
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_keras_cifar():
classifier_module = import_module("armory.baseline_models.pytorch.cifar")
classifier_fn = getattr(classifier_module, "get_art_model")
classifier = classifier_fn(model_kwargs={}, wrapper_kwargs={})
train_dataset = datasets.cifar10(
split="train", epochs=1, batch_size=500, dataset_dir=DATASET_DIR,
)
test_dataset = datasets.cifar10(
split="test", epochs=1, batch_size=100, dataset_dir=DATASET_DIR,
)
classifier.fit_generator(
train_dataset, nb_epochs=1,
)
accuracy = 0
for _ in range(test_dataset.batches_per_epoch):
x, y = test_dataset.get_batch()
predictions = classifier.predict(x)
accuracy += np.sum(np.argmax(predictions, axis=1) == y) / len(y)
assert (accuracy / test_dataset.batches_per_epoch) > 0.25
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_pytorch_xview_pretrained():
detector_module = import_module("armory.baseline_models.pytorch.xview_frcnn")
detector_fn = getattr(detector_module, "get_art_model")
weights_path = maybe_download_weights_from_s3(
"xview_model_state_dict_epoch_99_loss_0p67"
)
detector = detector_fn(
model_kwargs={}, wrapper_kwargs={}, weights_path=weights_path,
)
NUM_TEST_SAMPLES = 250
dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.datasets",
"name": "xview",
}
test_dataset = load_dataset(
dataset_config,
epochs=1,
split="test",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
list_of_ys = []
list_of_ypreds = []
for x, y in test_dataset:
y_pred = detector.predict(x)
list_of_ys.extend(y)
list_of_ypreds.extend(y_pred)
average_precision_by_class = object_detection_AP_per_class(
list_of_ys, list_of_ypreds
)
mAP = np.fromiter(average_precision_by_class.values(), dtype=float).mean()
for class_id in [4, 23, 33, 39]:
assert average_precision_by_class[class_id] > 0.9
assert mAP > 0.25
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_pytorch_gtsrb():
classifier_module = import_module("armory.baseline_models.pytorch.micronnet_gtsrb")
classifier_fn = getattr(classifier_module, "get_art_model")
preprocessing_fn = getattr(classifier_module, "preprocessing_fn")
classifier = classifier_fn(model_kwargs={}, wrapper_kwargs={})
train_dataset = datasets.german_traffic_sign(
split="train",
epochs=5,
batch_size=128,
dataset_dir=DATASET_DIR,
preprocessing_fn=preprocessing_fn,
)
test_dataset = datasets.german_traffic_sign(
split="test",
epochs=1,
batch_size=128,
dataset_dir=DATASET_DIR,
preprocessing_fn=preprocessing_fn,
)
classifier.fit_generator(
train_dataset, nb_epochs=5,
)
accuracy = 0
for _ in range(test_dataset.batches_per_epoch):
x, y = test_dataset.get_batch()
predictions = classifier.predict(x)
accuracy += np.sum(np.argmax(predictions, axis=1) == y) / len(y)
assert (accuracy / test_dataset.batches_per_epoch) > 0.8
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_pytorch_carla_video_tracking():
runtime_paths = paths.runtime_paths()
external_repo_dir = runtime_paths.external_repo_dir
external_repo.download_and_extract_repos(
"amoudgl/pygoturn", external_repo_dir=external_repo_dir,
)
tracker_module = import_module("armory.baseline_models.pytorch.carla_goturn")
tracker_fn = getattr(tracker_module, "get_art_model")
weights_path = maybe_download_weights_from_s3("pytorch_goturn.pth.tar")
tracker = tracker_fn(model_kwargs={}, wrapper_kwargs={}, weights_path=weights_path,)
NUM_TEST_SAMPLES = 10
dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.adversarial_datasets",
"name": "carla_video_tracking_dev",
}
dev_dataset = load_dataset(
dataset_config,
epochs=1,
split="dev",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
for x, y in dev_dataset:
y_object, y_patch_metadata = y
y_init = np.expand_dims(y_object[0]["boxes"][0], axis=0)
y_pred = tracker.predict(x, y_init=y_init)
mean_iou = video_tracking_mean_iou(y_object, y_pred)[0]
assert mean_iou > 0.45
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_carla_od_rgb():
detector_module = import_module(
"armory.baseline_models.pytorch.carla_single_modality_object_detection_frcnn"
)
detector_fn = getattr(detector_module, "get_art_model")
weights_path = maybe_download_weights_from_s3("carla_rgb_weights.pt")
detector = detector_fn(
model_kwargs={"num_classes": 4}, wrapper_kwargs={}, weights_path=weights_path,
)
NUM_TEST_SAMPLES = 10
dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.datasets",
"name": "carla_obj_det_train",
}
train_dataset = load_dataset(
dataset_config,
modality="rgb",
epochs=1,
split="train",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, y in train_dataset:
y_pred = detector.predict(x)
ys.extend(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
dev_dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.adversarial_datasets",
"name": "carla_obj_det_dev",
}
dev_dataset = load_dataset(
dev_dataset_config,
modality="rgb",
epochs=1,
split="dev",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, (y, y_patch_metadata) in dev_dataset:
y_pred = detector.predict(x)
ys.append(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
test_dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.adversarial_datasets",
"name": "carla_obj_det_test",
}
test_dataset = load_dataset(
test_dataset_config,
modality="rgb",
epochs=1,
split="test",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, (y, y_patch_metadata) in test_dataset:
y_pred = detector.predict(x)
ys.append(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_carla_od_depth():
detector_module = import_module(
"armory.baseline_models.pytorch.carla_single_modality_object_detection_frcnn"
)
detector_fn = getattr(detector_module, "get_art_model")
weights_path = maybe_download_weights_from_s3("carla_depth_weights.pt")
detector = detector_fn(
model_kwargs={"num_classes": 4}, wrapper_kwargs={}, weights_path=weights_path,
)
NUM_TEST_SAMPLES = 10
dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.datasets",
"name": "carla_obj_det_train",
}
train_dataset = load_dataset(
dataset_config,
modality="depth",
epochs=1,
split="train",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, y in train_dataset:
y_pred = detector.predict(x)
ys.extend(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
dev_dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.adversarial_datasets",
"name": "carla_obj_det_dev",
}
dev_dataset = load_dataset(
dev_dataset_config,
modality="depth",
epochs=1,
split="dev",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, (y, y_patch_metadata) in dev_dataset:
y_pred = detector.predict(x)
ys.append(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
test_dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.adversarial_datasets",
"name": "carla_obj_det_test",
}
test_dataset = load_dataset(
test_dataset_config,
modality="depth",
epochs=1,
split="test",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, (y, y_patch_metadata) in test_dataset:
y_pred = detector.predict(x)
ys.append(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_carla_od_multimodal():
detector_module = import_module(
"armory.baseline_models.pytorch.carla_multimodality_object_detection_frcnn"
)
detector_fn = getattr(detector_module, "get_art_model_mm")
weights_path = maybe_download_weights_from_s3("carla_multimodal_naive_weights.pt")
detector = detector_fn(
model_kwargs={}, wrapper_kwargs={}, weights_path=weights_path,
)
NUM_TEST_SAMPLES = 10
dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.datasets",
"name": "carla_obj_det_train",
}
train_dataset = load_dataset(
dataset_config,
modality="both",
epochs=1,
split="train",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, y in train_dataset:
y_pred = detector.predict(x)
ys.extend(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
dev_dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.adversarial_datasets",
"name": "carla_obj_det_dev",
}
dev_dataset = load_dataset(
dev_dataset_config,
modality="both",
epochs=1,
split="dev",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, (y, y_patch_metadata) in dev_dataset:
y_pred = detector.predict(x)
ys.append(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
test_dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.adversarial_datasets",
"name": "carla_obj_det_test",
}
test_dataset = load_dataset(
test_dataset_config,
modality="both",
epochs=1,
split="test",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, (y, y_patch_metadata) in test_dataset:
y_pred = detector.predict(x)
ys.append(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
@pytest.mark.usefixtures("ensure_armory_dirs")
def test_carla_od_multimodal_robust_fusion():
detector_module = import_module(
"armory.baseline_models.pytorch.carla_multimodality_object_detection_frcnn_robust_fusion"
)
detector_fn = getattr(detector_module, "get_art_model_mm_robust")
weights_path = maybe_download_weights_from_s3(
"carla_multimodal_robust_clw_1_weights.pt"
)
detector = detector_fn(
model_kwargs={}, wrapper_kwargs={}, weights_path=weights_path,
)
NUM_TEST_SAMPLES = 10
dataset_config = {
"batch_size": 1,
"framework": "numpy",
"module": "armory.data.datasets",
"name": "carla_obj_det_train",
}
train_dataset = load_dataset(
dataset_config,
modality="both",
epochs=1,
split="train",
num_batches=NUM_TEST_SAMPLES,
shuffle_files=False,
)
ys = []
y_preds = []
for x, y in train_dataset:
y_pred = detector.predict(x)
ys.extend(y)
y_preds.extend(y_pred)
ap_per_class = object_detection_AP_per_class(ys, y_preds)
assert [ap_per_class[i] > 0.35 for i in range(1, 4)]
|
12,578 | 823080ee874427a5901558f707f4ca2bf927a869 |
import requests
from pprint import pprint
import config
import logging
from aiogram import Bot, Dispatcher, executor, types
from aiogram.dispatcher import FSMContext
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher.filters.state import State, StatesGroup
def func1(tag):
r = requests.get(
f"https://codeforces.com/api/problemset.problems?tags={tag}"
)
data = r.json()
for i in data['result']['problems']:
dat1 = data['result']['problems'][0]
pprint(dat1['tags'])
tag = "2-sat"
func1(tag) |
12,579 | 21f82801b5d324d0aac073dfc36a691a4d92e687 | import math
Radians60 = math.radians(60)
sin60 = math.sin(Radians60)
sin60Rounded = round(sin60,2)
Radians90 = math.radians(90)
tan90 = math.tan(Radians90)
tan90Rounded = round(tan90,2)
print("Sin60:{}".format(sin60Rounded))
print("tan90:{}".format(tan90Rounded)) |
12,580 | 929232be7b0eb0faff26c3b126b3e451dbb63afa | from django.db import models
#from django.contrib.auth.models import User
class Persona(models.Model):
nombres = models.CharField(max_length=255)
apellidos = models.CharField(max_length=255)
cedula = models.CharField(max_length=255)
sexo = models.CharField(max_length=1)
telefono_convencional = models.CharField(max_length=10,null=True)
telefono_celular = models.CharField(max_length=10,null=True)
email = models.EmailField()
class Meta:
abstract = True
class Pais(models.Model):
nombre = models.CharField(max_length=255)
class Provincia(models.Model):
nombre = models.CharField(max_length=255)
pais = models.ForeignKey(Pais)
class Canton(models.Model):
nombre = models.CharField(max_length=255)
provincia = models.ForeignKey(Provincia)
class Parroquia(models.Model):
nombre = models.CharField(max_length=255)
ciudad = models.ForeignKey(Canton)
class Direccion(models.Model):
calle_principal = models.CharField(max_length=255)
calle_secundaria = models.CharField(max_length=255)
numero_casa = models.IntegerField()
parroquia=models.ForeignKey(Parroquia)
class TipoUsuario(models.Model):
nombre = models.CharField(max_length=100)
class Usuario(models.Model):
usuario = models.CharField(max_length=25)
clave = models.CharField(max_length=16)
tipo_usuario = models.ForeignKey(TipoUsuario)
class Empleados(Persona):
direccion = models.ForeignKey(Direccion, null=True)
usuario = models.ForeignKey(Usuario)
class Progenitores(Persona):
nivel_educacion = models.CharField(max_length=50)
profesion = models.CharField(max_length=150, null=True)
ocupacion = models.CharField(max_length=150)
class Pariente(models.Model):
nombre = models.CharField(max_length=100)
class Representante(Persona):
ocupacion = models.CharField(max_length=255)
pariente = models.ForeignKey(Pariente, null=True)
nivel_educacion = models.CharField(max_length=50)
profesion = models.CharField(max_length=150, null=True)
ocupacion = models.CharField(max_length=150)
vive_estudiante = models.BooleanField(default=False)
direccion = models.ForeignKey(Direccion)
class Cursos(models.Model):
descripcion = models.CharField(max_length=255)
estado = models.BooleanField(default=True)
class Paralelos(models.Model):
descripcion = models.CharField(max_length=1)
estado = models.BooleanField(default=True)
class Especialidad(models.Model):
nombre = models.CharField(max_length=150)
estado = models.BooleanField(default=True)
class DetalleCursoParaleloEspecialidad(models.Model):
cursos = models.ForeignKey(Cursos)
especialidad = models.ForeignKey(Especialidad, null=True)
paralelos = models.ForeignKey(Paralelos)
cupos_disponibles = models.IntegerField()
maximo_cupos = models.IntegerField()
class Estudiantes(Persona):
direccion = models.ForeignKey(Direccion, null=True)
fecha_nacimiento = models.DateField(null=True)
nacionalidad = models.CharField(max_length=250, default="Ecuador")
nombres_persona_emergencia = models.CharField(max_length=255, null=True)
observaciones = models.CharField(max_length=10, null=True)
progenitor = models.ForeignKey(Progenitores , null=True)
primer_acceso=models.BooleanField(default=True)
representante = models.ForeignKey(Representante, null=True)
telefono_convencional_e = models.CharField(max_length=10, null=True)
telefono_celular_e = models.CharField(max_length=10, null=True)
usuario=models.ForeignKey(Usuario)
class DetalleEstudiantesProgenitores(models.Model):
progenitores = models.ForeignKey(Progenitores)
estudiantes = models.ForeignKey(Estudiantes)
sexo = models.CharField(max_length=1)
es_representante=models.BooleanField(default=False)
es_huerfano=models.BooleanField(default=False)
vive_estudiante = models.BooleanField(default=False)
retira_carpeta_estudiantil = models.BooleanField(default=False)
class Matricula(models.Model):
det_cur_par_es = models.ForeignKey(DetalleCursoParaleloEspecialidad)
estudiantes = models.ForeignKey(Estudiantes)
fecha_matricula = models.DateField()
numero_matricula = models.IntegerField()
modalidad = models.CharField(max_length=155)
seccion = models.CharField(max_length=50)
folio = models.IntegerField(null=True)
tipo_matricula = models.CharField(max_length=50,default='Ordinaria')
class Institucion(models.Model):
nombre = models.CharField(max_length=255)
email = models.EmailField()
mision = models.TextField()
vision = models.TextField()
convencional = models.CharField(max_length=10)
celular = models.CharField(max_length=10)
direccion = models.ForeignKey(Direccion)
class Parametros(models.Model):
fecha_inicio_matriculas = models.DateField()
fecha_fin_matriculas = models.DateField()
class EncuestasGraduados(models.Model):
tema = models.CharField(max_length=255)
descripcion = models.CharField(max_length=255)
#estudiante = models.ForeignKey(Estudiantes)
fecha_inicio = models.DateField()
fecha_fin = models.DateField()
class TipoPregunta(models.Model):
nombre = models.CharField(max_length=255)
descripcion = models.CharField(max_length=255)
class Preguntas(models.Model):
descripcion = models.CharField(max_length=450)
encuestas_graduados = models.ForeignKey(EncuestasGraduados)
tipo_pregunta = models.ForeignKey(TipoPregunta)
class OpcionesPreguntas(models.Model):
descripcion = models.TextField(null=True)
another_option = models.BooleanField(default=False) #para saber si la respuesta es de tipo texto
class DetallePreguntasOpciones(models.Model):
preguntas = models.ForeignKey(Preguntas)
opciones = models.ForeignKey(OpcionesPreguntas)
descripcion = models.TextField(null=True)
respuesta = models.BooleanField(default=False)
class DetalleEstudianteEncuesta(models.Model):
estudiante = models.ForeignKey(Estudiantes)
encuesta = models.ForeignKey(EncuestasGraduados)
|
12,581 | 07aeea887f2193c4055c813b31446fbb733e8393 | from django.shortcuts import render
from ..models import Site
from django.core.paginator import Paginator, EmptyPage
from copy import deepcopy
from django.db.models import Avg
def embed_sites(request):
search_request_dict = deepcopy(request.GET)
# 如果沒輸入page,預設值=1
if request.method == 'GET' and 'page' in request.GET:
page = int(request.GET["page"])
else:
page = 1
# 看看使用者在一開始的搜尋欄中有輸入哪些值
if "chromosome_field" in request.GET:
has_chromosome = (request.GET["chromosome_field"] != "0")
has_gene_name = (len(request.GET["gene_name_field"]) != 0)
has_genomic_region = (request.GET["genomic_region_field"] != "any")
has_aa_change = (request.GET["aa_change_field"] != "any")
has_rep = (request.GET["repeat_field"] != "any")
criterias = {'redi': 1}
if has_chromosome:
chromosome = "chr" + request.GET["chromosome_field"]
criterias['chromo'] = chromosome
for f in ['gain__gte', 'gain__lte', 'loss__gte', 'loss__lte', 'loc__gte', 'loc__lte',
'siteanno__has_cox__gte', 'siteanno__has_cox__lte']:
if request.GET[f] != '':
criterias[f] = int(request.GET[f])
if has_gene_name:
gene_name = request.GET["gene_name_field"]
criterias['gene'] = gene_name
if has_genomic_region:
criterias['region'] = request.GET["genomic_region_field"]
if has_aa_change:
criterias['aa'] = request.GET["aa_change_field"]
if has_rep:
criterias['location'] = request.GET["repeat_field"]
if any(criterias):
editing_site_module_set = Site.objects.filter(**criterias)
elif 'mirna' in request.GET: # if search from mirna
mir = request.GET['mirna']
gl = request.GET['gl']
criterias = {gl: mir}
editing_site_module_set = Site.objects.filter(**criterias)
else:
return render(request, "search_form.html")
# 如果使用者沒有輸入barcode的話則執行以下指令,資料抓取的方式和有輸入barcode大同小異
if 'current_sort' in request.GET:
current_sort = request.GET["current_sort"]
else:
current_sort = "site"
if 'sorted_direction' in request.GET:
sorted_direction = request.GET["sorted_direction"]
else:
sorted_direction = "up"
if "click_sort" in request.GET:
if current_sort == request.GET["click_sort"]:
if sorted_direction == "up":
if request.GET["click_sort"] == "site":
editing_site_module_set = editing_site_module_set.order_by("-chromo", "-loc")
else:
editing_site_module_set = editing_site_module_set.order_by("-" + request.GET["click_sort"])
search_request_dict["sorted_direction"] = "down"
else:
if request.GET["click_sort"] != "site":
editing_site_module_set = editing_site_module_set.order_by(request.GET["click_sort"])
search_request_dict["sorted_direction"] = "up"
else:
if request.GET["click_sort"] != "site":
editing_site_module_set = editing_site_module_set.order_by(request.GET["click_sort"])
search_request_dict["sorted_direction"] = "up"
search_request_dict["current_sort"] = request.GET["click_sort"]
else:
if current_sort != "site":
editing_site_module_set = editing_site_module_set.order_by(current_sort)
search_request_dict["sorted_direction"] = "up"
search_request_dict["current_sort"] = current_sort
#看使用者選取要多少筆資料為一頁,預設為50
if 'datas_per_page' in request.GET:
datas_per_page = int(request.GET["datas_per_page"])
else:
datas_per_page = 10
paginator = Paginator(editing_site_module_set[:100000], datas_per_page)
try:
editing_site_modules = paginator.page(page)
print('check 1')
except EmptyPage:
editing_site_modules = paginator.page(paginator.num_pages)
rediLevels = [x.redilevel_set.aggregate(Avg('level'))['level__avg'] for x in editing_site_modules]
rediLevels = [2 if x == None else x for x in rediLevels]
page_record = []
search_record = []
for key in search_request_dict:
if key != "page" and key != "click_sort":
page_record.append(key + "=" + search_request_dict[key])
if key != "current_sort" and key != "sorted_direction":
search_record.append(key + "=" + search_request_dict[key])
page_record = "&".join(page_record)
search_record = "&".join(search_record)
return render(request, "embedtable.html", {"editing_modules": editing_site_modules,\
"page_record": page_record, "search_record": search_record, "search_request_dict": search_request_dict,\
"datas_per_page": datas_per_page, "sorted_direction": search_request_dict["sorted_direction"],\
"current_sort": search_request_dict["current_sort"], 'rl': rediLevels,
'cri': criterias, 'target_frame': request.GET['frame']})
|
12,582 | ee29f01c92262a84a3860b224eceb9eed1cd4a94 | """
**********************************************************
* Main thread that runs the IoT Doorbell application *
**********************************************************
Main thread runs the Application and links all the features of the doorbell together
Switch face check On/Off
Run listen function listening to Firebase real time database changes
Controls all the features of IoT Doorbell
"""
from aws import AWS
from firebase import Firebase
from time import sleep
from new_face import NewFace
from lock import Solenoid
from bell_button import BellButton
import os
from check_face import FaceCheck
# Function to turn off facial recognition checking and update firebase value
def switch_face_check_off():
fc.perform_action = False
fb.update_data({
'doorbell/facial_recognition/is_active': 0
})
sleep(2)
# Function to turn on facial recognition checking and update firebase value
def switch_face_check_on():
fc.perform_action = True
fb.update_data({
'doorbell/facial_recognition/is_active': 1
})
sleep(2)
# Function to listen to changes from Firebase real time database
def listen():
while True:
# Set data variable to the database root reference
data = fb.get_data()
# Get AWS started database status
aws_start_requested = data['doorbell']['streaming']['start_requested']
# Get AWS stopped database status
aws_stop_requested = data['doorbell']['streaming']['stop_requested']
# Get new face database status
new_face_request = data['doorbell']['face']['start_new']
# Get audio database status
audio = data['doorbell']['audio']['state']
# Get lock database status
is_unlocked = data['doorbell']['lock']['state']
# AWS start check
if aws_start_requested == 1 and aws.is_active is False:
# Turn face check off
switch_face_check_off()
# Start the AWS stream
aws.start_stream()
# AWS stop check
if aws_stop_requested == 1 and aws.is_active is True:
# Stop the AWS stream
aws.stop_stream()
# Turn face check on
switch_face_check_on()
# if new face requested and AWS stream is inactive
if new_face_request == 1 and nf.is_active is False and aws.is_active is False:
print("Adding Face")
# Turn off face check
switch_face_check_off()
# Start adding new face
nf.take_pictures()
# Turn face check on
switch_face_check_on()
# if the lock is set to 1 in Firebase, unlock the door
if is_unlocked == 1:
print("Unlocking Door")
lock.unlock_door()
print("Door Locked")
# if audio is set to new in Firebase, download the latest audio file and play it via the speaker
if audio == "new":
fb.get_storage()
os.system("omxplayer audioVisitor.mp3")
fb.update_data({
'doorbell/audio/state': 'waiting'
})
if __name__ == '__main__':
# Firebase class instance
fb = Firebase()
# AWS class instance
aws = AWS(fb)
# NewFace class instance
nf = NewFace(fb)
# Solenoid Lock class instance
lock = Solenoid()
# Face Check class instance
fc = FaceCheck()
# Doorbell button class instance
bb = BellButton(aws, nf, switch_face_check_on, switch_face_check_off)
# Start the BellButton Thread class
bb.start()
# Start the Face Check Thread class
fc.start()
# Turn on face check
switch_face_check_on()
# Run the listen function
listen()
|
12,583 | 8dfd02840706455f17a215deac6cda4db5747407 | import base64
source_file = open("data.txt", "r")
contents = source_file.read()
decoded = base64.b64decode(contents)
print(decoded.decode())
#decoded_file = open("data_decoded.txt", "w")
#decoded_file.write(decoded)
#decoded_file.close() |
12,584 | 4db98722a9901d1de190232c83f016ab555f59dc | # coding: utf- 8
def title(titl):
print('\n'+'>' * 10 + titl + '<' * 10)
title( ' bool ')
a = True
b = False
print( a )
print( b )
print( type(a) )
print( type(b) )
print( 1 == 1)
print( 2 > 1 )
print( 2 < 1 ) |
12,585 | 15557d5638783a874a2733ec5998eaed808c0cd7 | '''
递归:一个函数在自己函数内部调用自己。
循环
'''
num = 5
def func():
global num
if num == 0:
return 0
else:
print("12345")
num -= 1
func()
func()
# 复习递归遍历目录
|
12,586 | 423a29ca544ae7a41a362e7d80d1f5c61d7b3a69 | """
Tests for tasking exceptions
"""
import os
import zipfile
from django.test import TestCase, override_settings
from tasking.common_tags import (
MISSING_FILE,
NO_SHAPEFILE,
TARGET_DOES_NOT_EXIST,
UNNECESSARY_FILE,
)
from tasking.exceptions import (
MissingFiles,
ShapeFileNotFound,
TargetDoesNotExist,
UnnecessaryFiles,
)
from tasking.utils import get_shapefile, get_target
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class TestExceptions(TestCase):
"""
Test class for tasking exceptions
"""
def test_target_does_not_exist(self):
"""
Test TargetDoesNotExist error message is what we expect
"""
with self.assertRaises(TargetDoesNotExist) as context:
get_target(app_label="foo", target_type="bar")
the_exception = context.exception
self.assertEqual(TARGET_DOES_NOT_EXIST, the_exception.message)
def test_shape_file_not_found(self):
"""
Test ShapeFileNotFound error message is what we expect
"""
path = os.path.join(BASE_DIR, "tests", "fixtures", "missing_shp.zip")
zip_file = zipfile.ZipFile(path)
with self.assertRaises(ShapeFileNotFound) as context:
get_shapefile(zip_file)
the_exception = context.exception
self.assertEqual(NO_SHAPEFILE, the_exception.message)
def test_missing_dbf(self):
"""
Test missing .dbf file
"""
path = os.path.join(BASE_DIR, "tests", "fixtures", "missing_dbf.zip")
zip_file = zipfile.ZipFile(path)
with self.assertRaises(MissingFiles) as context:
get_shapefile(zip_file)
the_exception = context.exception
self.assertEqual(MISSING_FILE, the_exception.message)
def test_missing_shx(self):
"""
Test missing .shx file
"""
path = os.path.join(BASE_DIR, "tests", "fixtures", "missing_shx.zip")
zip_file = zipfile.ZipFile(path)
with self.assertRaises(MissingFiles) as context:
get_shapefile(zip_file)
the_exception = context.exception
self.assertEqual(MISSING_FILE, the_exception.message)
@override_settings(TASKING_CHECK_NUMBER_OF_FILES_IN_SHAPEFILES_DIR=True)
def test_missing_files(self):
"""
Test MissingFiles error message is what we expect
"""
path = os.path.join(BASE_DIR, "tests", "fixtures", "test_missing_files.zip")
zip_file = zipfile.ZipFile(path)
with self.assertRaises(MissingFiles) as context:
get_shapefile(zip_file)
the_exception = context.exception
self.assertEqual(MISSING_FILE, the_exception.message)
@override_settings(TASKING_CHECK_NUMBER_OF_FILES_IN_SHAPEFILES_DIR=True)
def test_unnecessary_files(self):
"""
Test UnnecessaryFiles error message is what we expect
"""
path = os.path.join(BASE_DIR, "tests", "fixtures", "test_unnecessary_files.zip")
zip_file = zipfile.ZipFile(path)
with self.assertRaises(UnnecessaryFiles) as context:
get_shapefile(zip_file)
the_exception = context.exception
self.assertEqual(UNNECESSARY_FILE, the_exception.message)
|
12,587 | 367d794fa185906b29d457bbe7f77ac9c17cf0ae | import numpy as np
a = np.array((
[1,2,3],
[4,5,6]
))
print('matrix a dengan ukuran', a.shape)
print('matrix a:')
print(a)
# transpose matrix 3 cara
print('transpose matrix dari a:')
print(a.transpose())
print(np.transpose(a))
print(a.T)
# flatten array, -- > vector baris
print('flatten matrix a:')
print(a.ravel())
print(np.ravel(a))
# reshape matrix
print('reshape matrix a:')
print(a.reshape(3,2)) # harus sama value baris * kolomnya
print(a.reshape(6,1))
# resize matrix
# dapat merubah value a, dan mengambalikan None
print('resize matrix a:')
a.resize(3,2)
print(a) # berubah
print(a.resize(3,2)) # mengembalikan None
print('matrix a dengan ukuran', a.shape)
|
12,588 | 620d0dbbdaec080e71a2207379320a65f4b2d64f | #!/usr/bin/env python3
from os import path
from networkx import DiGraph, topological_sort
def main():
with open(path.join(path.dirname(__file__), "input.txt")) as f:
g = DiGraph()
for line in f:
line = line.strip()
parts = line.split()
if parts[0] == "value":
val = int(parts[1])
bot = int(parts[5])
if bot not in g:
g.add_node(bot, vals=[val])
else:
g.nodes[bot]["vals"].append(val)
else:
start = int(parts[1])
low_type = parts[5]
low_to = int(parts[6])
high_type = parts[10]
high_to = int(parts[11])
if start not in g:
g.add_node(start, vals=[])
if low_type == "bot":
if low_to not in g:
g.add_node(low_to, vals=[])
g.add_edge(start, low_to, x="low")
else:
g.nodes[start]["output_low"] = low_to
if high_type == "bot":
if high_to not in g:
g.add_node(high_to, vals=[])
g.add_edge(start, high_to, x="high")
else:
g.nodes[start]["output_high"] = high_to
outputs = {}
for bot in topological_sort(g):
low = min(g.nodes[bot]["vals"])
high = max(g.nodes[bot]["vals"])
if low == 17 and high == 61:
print("Part 1:", bot)
for n in g[bot]:
if g[bot][n]["x"] == "low":
g.nodes[n]["vals"].append(low)
else:
g.nodes[n]["vals"].append(high)
if "output_low" in g.nodes[bot]:
outputs[g.nodes[bot]["output_low"]] = low
if "output_high" in g.nodes[bot]:
outputs[g.nodes[bot]["output_high"]] = high
print("Part 2:", outputs[0] * outputs[1] * outputs[2])
if __name__ == "__main__":
main()
|
12,589 | d70497ea71613df12fd1b3434bc3111bc40678dd | from data_proc.DataGeneratorCelebA import DataGeneratorCelebA
from data_proc.DataGeneratorCelebASparse import create_map
from data_proc.DataGeneratorWiki import load_config_wiki
from data_proc.ConfigLoaderCelebA import load_attr_vals_txts, load_atributes_txts
import numpy as np
from keras.preprocessing import image
from data_proc.ImageHandler import get_image
from data_proc.ImagePreProcess import load_crop_boxes
# IMAGES_FOLDER_WIKI = "data_proc/data/"
IMAGES_FOLDER_CELEB = "data_proc/CelebA/img_align_celeba/"
CONF_FILE_WIKI = "wiki_cat_merged.txt"
CONF_FILE_IMDB = "imdb.txt"
IMAGES_FOLDER_IMDB = "data_proc/data/imdb/"
def create_map_m(attr_vals):
"""
Helper method for loading attributes values from file.
:param attr_vals: Raw data from file. List of string lines.
:return: dictionary {name_of_image:list_of_ints}
"""
_map = {}
for attr_val in attr_vals:
key = attr_val.split()[0].split("/")[-1]
values = [i - 1 for i in list(map(int, attr_val.split()[1:]))]
# add -1 for age
values.append(-1)
_map[key] = values
return _map
def load_config_merged(conf_file):
train = set()
val = set()
test = set()
attr_map = {}
with open("data_proc/config_files/" + conf_file, encoding="utf8") as f:
lines = f.readlines()
for line in lines:
arr = line.split("\t")
key = arr[0]
# if invalid_img(IMAGES_FOLDER_IMDB + key):
# continue
gender_i = 0
if arr[7] == 'F':
gender_i = 1
if int(arr[6]) < 22:
age_cat = 0
elif int(arr[6]) < 30:
age_cat = 1
elif int(arr[6]) < 40:
age_cat = 2
elif int(arr[6]) < 50:
age_cat = 3
elif int(arr[6]) < 60:
age_cat = 4
else:
age_cat = 5
attr_map[key] = [-1, -1, gender_i, -1, -1, age_cat]
if arr[-1] == "1\n":
train.add(key)
if arr[-1] == "2\n":
val.add(key)
if arr[-1] == "3\n":
test.add(key)
print("---Training set has len: ", str(len(train)))
print("---Testing set has len: ", str(len(test)))
print("---Validation set has len: ", str(len(val)))
return list(train), list(val), list(test), attr_map
class DataGeneratorMerged(DataGeneratorCelebA):
def __init__(self, img_shape=(100, 100), chunk_size=1024):
"""
:param img_shape: resolution of final image
:param chunk_size: size of super batch
:param rot_int: interval for image rotation
"""
'Initialization'
self.img_shape = img_shape
self.chunk_size = chunk_size
self.attr_map_celeb = create_map_m(load_atributes_txts())
self.coord_dict = load_crop_boxes()
# split data to training,testing,validation
self.train_ids_imdb, self.validation_ids_imdb, self.test_ids_imdb, self.attr_map_imdb = load_config_merged(CONF_FILE_IMDB)
self.train_ids = []
self.test_ids = []
self.validation_ids = []
self.find_split_ids()
self.img_source = IMAGES_FOLDER_CELEB
def generate_data_m(self, pict_ids_imdb, pict_ids_c):
"""
Generates data with hiding attributes according to MASKs
:param pict_ids: ids of pictures
:return:
"""
# Generate Wiki data
print("Generating W")
indx = 0
to = indx + self.chunk_size
while indx <= len(pict_ids_imdb):
images, errs = self.get_images_online_imdb(pict_ids_imdb[indx: to])
if len(errs) > 0:
# get only labels for images which were correctly loade
img_labels = self.get_raw_labs_imdb(
[name for name in pict_ids_imdb[indx: to] if name not in errs])
else:
img_labels = self.get_raw_labs_imdb(pict_ids_imdb[indx: to])
# get next boundaries
to += self.chunk_size
indx += self.chunk_size
if to != len(pict_ids_imdb) and (indx + self.chunk_size) > len(pict_ids_imdb):
# chunk increase overflow, we need to get the last chunk of data, which is smaller then defined
to = len(pict_ids_imdb)
yield images, img_labels
print("Generating A")
# Generate Celeb Data
indx = 0
to = indx + self.chunk_size
while indx <= len(pict_ids_c):
images, errs = self.get_images_online(pict_ids_c[indx: to],)
if len(errs) > 0:
# get only labels for images which were correctly loade
img_labels = self.get_raw_labs_c([name for name in pict_ids_c[indx: to] if name not in errs])
else:
img_labels = self.get_raw_labs_c(pict_ids_c[indx: to])
# get next boundaries
to += self.chunk_size
indx += self.chunk_size
if to != len(pict_ids_c) and (indx + self.chunk_size) > len(pict_ids_c):
# chunk increase overflow, we need to get the last chunk of data, which is smaller then defined
to = len(pict_ids_c)
yield images, img_labels
def get_raw_labs_c(self, keys):
"""
Generate raw labels from attribute file for list of keys.
:param keys: list of labels in string format
:return: labels for specific batch of data in one-hot encoded format
"""
to_return = []
for key in keys:
to_return.append(self.attr_map_celeb[key])
# need to transform to N arrays, as KERAS requires all labels for one output/attribute
# in single array, so for 5 attributes and bulk 1024, it will be 5 arrays of length
# 10240
return [np.array(tmp_arr) for tmp_arr in zip(*to_return)]
def get_raw_labs_imdb(self, keys):
"""
Generate raw labels from attribute file for list of keys.
:param keys: list of labels in string format
:return: labels for specific batch of data in one-hot encoded format
"""
to_return = []
for key in keys:
to_return.append(self.attr_map_imdb[key])
# need to transform to N arrays, as KERAS requires all labels for one output/attribute
# in single array, so for 5 attributes and bulk 1024, it will be 5 arrays of length
# 10240
return [np.array(tmp_arr) for tmp_arr in zip(*to_return)]
def get_images_online_imdb(self, img_names):
"""
Reads list of images from specidied folder.
The images are resized to self.img_shape specified
in the generator contructor.
In case of error, image is not added to return list
and error is just printed.
:param img_names: List of image names
:return: list of vstacked images, channel_last format
"""
images = []
errs = []
for img_name in img_names:
try:
path = IMAGES_FOLDER_IMDB + img_name
# print(path)
img = get_image(path, self.img_shape)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images.append(x)
except Exception as e:
# print(path, str(e))
errs.append(img_name)
print("Caught ", str(len(errs)), " errors, which is ", str(len(errs)/len(img_names)*100), "%")
return np.vstack(images), errs
def generate_training(self):
return self.generate_data_m(self.train_ids_imdb, self.train_ids)
def generate_validation(self):
return self.generate_data_m(self.validation_ids_imdb, self.validation_ids)
def generate_testing(self):
return self.generate_data_m(self.test_ids_imdb, self.test_ids)
|
12,590 | 418685850c39bd9a008ade082cac962a92704c94 | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
def check(vertex, size, seen, count=1):
if count == size:
return True
for next_vertex in range(size):
if next_vertex in graph[vertex] and not seen[next_vertex]:
seen[next_vertex] = True
if check(next_vertex, size, seen, count + 1):
return True
return False
t = int(input())
for _ in range(t):
n, m = map(int, input().strip().split())
graph = defaultdict(list)
for _ in range(m):
x, y = map(lambda z: int(z) - 1, input().strip().split())
graph[x].append(y)
graph[y].append(x)
visited = [False] * n
for i in range(n):
visited[i] = True
if check(i, n, visited):
print('Yes')
break
visited[i] = False
else:
print('No')
|
12,591 | ab4162c6034ad51fbd2c4d93150bd9cded8f8d1b | # -*- coding: utf-8 -*-
#import json
#import urllib
import logging
import datetime
import traceback
from lib import error_codes
#from terminal_base import terminal_commands
import pymongo
#from tornado.web import asynchronous
from tornado import gen
from helper_handler import HelperHandler
from lib import utils
#from lib import sys_config
#from lib.sys_config import SysConfig
from pymongo import errors
from get_base_info import get_base_info
class ChoosePet(HelperHandler):
@gen.coroutine
def _deal_request(self):
logging.debug("ChoosePet, %s", self.dump_req())
self.set_header("Content-Type", "application/json; charset=utf-8")
pet_dao = self.settings["pet_dao"]
res = {"status": error_codes.EC_SUCCESS}
imei = None
try:
uid = int(self.get_argument("uid"))
pet_id = int(self.get_argument("pet_id", -1))
token = self.get_argument("token")
st = yield self.check_token("OnChoosePet", res, uid, token)
if not st:
return
except Exception, e:
logging.warning("ChoosePet, invalid args1, %s %s", self.dump_req(),
str(e))
res["status"] = error_codes.EC_INVALID_ARGS
self.res_and_fini(res)
return
if pet_id <= 0 :
res["status"] = error_codes.EC_PET_NOT_EXIST
self.res_and_fini(res)
return
# get imei
try:
pet_info = yield pet_dao.get_pet_info_by_petid(pet_id, ("device_imei",))
if pet_info is not None :
imei = pet_info.get("device_imei","")
if pet_info is None or imei == "" :
logging.error("ChoosePet fail, uid:%d, , pet_id:%d, req:%s",
uid,imei, pet_id, self.dump_req())
res["status"] = error_codes.EC_DEVICE_NOT_EXIST
self.res_and_fini(res)
return
except Exception, e:
logging.warning("ChoosePet, error, %s %s", self.dump_req(),
self.dump_exp(e))
res["status"] = error_codes.EC_SYS_ERROR
self.res_and_fini(res)
return
try:
#切换主控设备
pet_info_now = yield pet_dao.get_pet_info(("pet_id",),uid = uid, choice = 1)
if pet_info_now:
old_pet_id = pet_info_now.get("pet_id", -1)
if old_pet_id > 0:
yield pet_dao.update_pet_info(old_pet_id, choice = 0)
yield pet_dao.update_pet_info(pet_id, choice = 1)
user_dao = self.settings["user_dao"]
yield user_dao.update_user_info(uid, choice_petid = pet_id)
res = yield get_base_info(pet_dao, uid, pet_id)
except Exception, e:
logging.warning("ChoosePet,uid:%d imei:%s error, %s %s", uid, imei, self.dump_req(),
self.dump_exp(e))
res["status"] = error_codes.EC_SYS_ERROR
self.res_and_fini(res)
return
# 成功
logging.debug("ChoosePet,uid:%d pet_id:%d success %s", uid, pet_id, self.dump_req())
self.res_and_fini(res)
def post(self):
return self._deal_request()
def get(self):
return self._deal_request()
|
12,592 | 61686a9e3f6e6f9a0606cd66cc15679f079973f2 | """
All constants used for dialysis project
accumulated in one file for easier accessibility
and cleaner code.
"""
# Window settings and all colors used in project
WINDOW_TITLE = "Dialysis Nutrition Information By Yihan Ye"
MAIN_WINDOW_SIZE = "1000x1000"
MAIN_WINDOW_COLOR = "#bedddc"
MAIN_FRAME_COLOR = "#f4efeb"
GOOD_FOOD_COLOR = "#9be281"
BAD_FOOD_COLOR = "#f9a08b"
BTN_COLOR = "#e5c5c8"
# Guidelines Category
# => Daily Intake Content
DAILY_NUTR_LEFT = ["Calories", "Salt", "Protein", "Potassium", "Phosphorous", "Liquid"]
DAILY_NUTR_RIGHT = [
"30cal/kg per day",
"5-6g per day (including Sodium)",
"1,2g/kg per day",
"2000-2500mg per day",
"1000-1400mg per day",
"500ml + residual excretion/24h",
]
# => Recommended Foods List
GOOD_LIST_LEFT = [
"Zucchini, Cucumbers",
"Lemons, Lime",
"Blueberries",
"Apple, Pears",
"Salads",
"Couscous",
"Lean Meat",
"Most Fish",
"Cauliflower",
"Olive Oil, Butter",
"Mushrooms",
]
GOOD_LIST_RIGHT = [
"Radish, Celery",
"Green Pepper",
"Strawberries",
"Carrots, Green Beans",
"Cream",
"Mozzarella",
"Onion, Garlic",
"Honey, Jam",
"Eggs",
"Watermelon",
"Cooked Rice, Pasta",
]
# => Foods to Avoid List
BAD_LIST_LEFT = [
"Potatoes",
"Tee, Cola",
"Tzatziki",
"Avocados",
"Olives, Pickles, Relish",
"Canned Fish, Meat, Beans",
"Smoked Fish or Meat",
"Offal, Sausages, Salmon",
"Processed Foods",
"Ketchup, Mayonnaise",
"Saturated Fat",
]
BAD_LIST_RIGHT = [
"Chocolate",
"Dried Fruits",
"Marzipan",
"Bananas, Kiwis",
"Dates, Figs",
"Canned Tomato Products/Juice",
"Undiluted Fruit Juice",
"Vegetable Juice",
"Feta, Parmesan, Cheddar etc.",
"Most Dairy Products",
"Coconuts, Nuts",
]
# Tips and Tricks Category Content
SALT_CONTENT = [
"- Season food after it's\n cooked for more control",
"- Don't use salt substitute!\n Use alternatives instead",
"- Alternatives are:\n Basil, Cilantro, Garlic\n Oregano, Mint, Chives\n Lemon, Parsley, Sage",
]
PHOSPHOROUS_CONTENT = [
"- Throw out cooking water\n & change while cooking",
"- Throw away canned\n vegetables & meat juice",
"- Soak diced vegetables\n in water before cooking",
"- Dice or shred vegetables\n with high phosphorous\n content",
]
ADDITIONAL_CONTENT = [
"- Avoid eating animal skin\n (poultry)",
"- Try not to eat egss more\n than 3x per week",
"- Pre-fill your water bottle\n for the entire day",
"- Remember food contains\n water as well!\n (fruits, soup, ice cream)",
]
# API
API = "DEMO_KEY"
NUTRIENT_NAME = [
"Protein",
"Energy",
"Phosphorus, P",
"Potassium, K",
"Sodium, Na",
]
|
12,593 | 94d8fcfa9e46a398ff225ad6f5dd5ecdc2f351ab | import OpenGL
OpenGL.ERROR_CHECKING = False
from OpenGL.GL import *
def line_strip( vertex_list, color_list = None ):
glBegin(GL_LINE_STRIP)
for i, v in enumerate(vertex_list):
if color_list:
glColor( color_list[i] )
glVertex( v )
glEnd()
def line( start, end ):
glBegin(GL_LINES)
glVertex( start )
glVertex( end )
glEnd()
def color( c ):
glColor( c )
def points( *args ):
glBegin(GL_POINTS)
for v in args:
glVertex( v )
glEnd()
def point_size( x ):
glPointSize(x)
def line_width( x ):
glLineWidth(x)
def enable(what, value):
if value: glEnable(what)
else: glDisable(what)
def lighting( b ):
enable(GL_LIGHTING, b)
def depth_test( b ):
enable(GL_DEPTH_TEST, b)
def blend( b ):
if b:
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable( GL_BLEND )
else:
glDisable( GL_BLEND )
|
12,594 | f92ae19bfe698db836b6a9907b2c1b8e47484032 | import discord
from discord.ext import commands, tasks
from discord.ext.commands import errors
from discord.utils import get
import os, requests, json
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('TOKEN')
# Defining main variables
BOT_PREFIX = str(os.getenv('BOT_PREFIX'))
OwnerID = int(os.getenv('OWNER_ID'))
client = discord.Client()
client = commands.Bot(command_prefix=BOT_PREFIX)
client.remove_command("help")
class bcolors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
HEADER = '\033[95m'
@client.event
async def on_connect():
print("Bot connected")
@client.event
async def on_ready():
import datetime
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"Swiss Skies"))
print(f'Bot ready\nLogged on as {client.user}')
def load_cogs():
print(f"{bcolors.BOLD}{bcolors.HEADER}Loading extensions{bcolors.ENDC}")
for filename in os.listdir('./cogs'):
if filename.endswith('.py') and not "__" in filename:
client.load_extension(f"cogs.{filename[:-3]}")
print(f"{bcolors.OKGREEN}Loaded cog:{bcolors.ENDC} {filename}")
else:
if not filename == "__pycache__":
print(f"{bcolors.WARNING}Ignored cog:{bcolors.ENDC} {filename}")
print(f"{bcolors.BOLD}{bcolors.OKBLUE}DONE.{bcolors.ENDC}")
@client.command()
async def load(ctx, cogname):
if ctx.author.id == OwnerID:
if f"{cogname}.py" in os.listdir("./cogs") and not "__" in cogname:
try:
client.load_extension(f"cogs.{cogname}")
await ctx.send(f"**Loaded:** {cogname}")
print(f"{bcolors.OKGREEN}Loaded cog:{bcolors.ENDC} {cogname}")
return
except commands.errors.ExtensionAlreadyLoaded:
await ctx.send(f"**Error:** {cogname} is already loaded")
return
else:
await ctx.send(f"**Error:** {cogname} does not exist")
else:
await ctx.send(f"**Error:** you are not the owner of this bot")
@client.command()
async def unload(ctx, cogname):
if ctx.author.id == OwnerID:
if f"{cogname}.py" in os.listdir("./cogs") and not "__" in cogname:
try:
client.unload_extension(f"cogs.{cogname}")
await ctx.send(f"**Unloaded:** {cogname}")
print(f"{bcolors.WARNING}Unloaded cog:{bcolors.ENDC} {cogname}")
return
except commands.errors.ExtensionNotLoaded:
await ctx.send(f"**Error:** {cogname} is not yet loaded")
return
else:
await ctx.send(f"**Error:** {cogname} does not exist")
else:
await ctx.send(f"**Error:** you are not the owner of this bot")
@client.command()
async def reload(ctx, cogname):
if ctx.author.id == OwnerID:
if f"{cogname}.py" in os.listdir("./cogs") and not "__" in cogname:
try:
client.unload_extension(f"cogs.{cogname}")
except:
pass
try:
client.load_extension(f"cogs.{cogname}")
except:
await ctx.send("Error occured loading the cog")
return
await ctx.send(f"**Reloaded:** {cogname}")
print(f"{bcolors.OKGREEN}Reloaded cog:{bcolors.ENDC} {cogname}")
else:
await ctx.send(f"**Error:** {cogname} does not exist")
else:
await ctx.send(f"**Error:** you are not the owner of this bot")
@client.command()
async def listcogs(ctx):
if ctx.author.id == OwnerID:
cogs_list = []
for cog in os.listdir("./cogs"):
if not "__" in cog:
cogs_list.append(cog[:-3])
nl = "\n"
await ctx.send(f"**Detected cogs are**:\n{nl.join(cogs_list)}")
else:
await ctx.send(f"**Error:** you are not the owner of this bot")
@client.command()
async def logout(ctx):
if ctx.author.id == OwnerID:
await client.logout()
load_cogs()
client.run(TOKEN) |
12,595 | 62a84d70fd3e4c7b77bdb6f9290c9ef0752d4030 | # requires Python 3.7+
import sys
import asyncio
loop = asyncio.get_event_loop()
async def example_01():
print("Start example_01 coroutine.")
await asyncio.sleep(1)
print("Finish example_01 coroutine.")
async def example_02():
print("Start example_02 coroutine.")
await asyncio.sleep(1)
print("Finish example_02 coroutine.")
def main():
#loop.run_until_complete(example_01())
#tasks = [
# asyncio.ensure_future(example_01()),
# asyncio.ensure_future(example_02()),
#]
#loop.run_until_complete(asyncio.wait(tasks))
tasks = [
loop.create_task(example_01()),
loop.create_task(example_02())
]
loop.run_until_complete(asyncio.wait(tasks))
if __name__ == "__main__":
sys.exit(main())
|
12,596 | f82ab747a4dbd38beea6fca7a2cacaaf09fd2b3f | import xml.etree.cElementTree as ET
import re
import datetime
from collections import Counter
tmp = ''
prev = '' #if xml ordered by date, we can get the next result and keep track of max
total = 0 #most common date so far for grsphing thory
prevTotal = 0 #the previous dates maximum, for graph theory matches
maxGtDate = 0 #the most common date for graph theory posts
junePosts = 0
tags = ''
cCount = 0 #number of posts tagged combinatotics
noFibCount = 0 # same as above, no fibonacci-numbers tag
#noFibPattern = re.compile('^(?=.*combinatorics)(?!.*fibonacci-numbers).*') #used in regex to filter fib
gCount = 0 #numbe of graph-theory tagged posts
# get an iterable
context = ET.iterparse('../Posts.xml', events=("start", "end"))
# turn it into an iterator
context = iter(context)
# get the root element, so we can iterate and delete from root as wee go
event, root = next(context)
for event, elem in context:
if event == "end" and elem.tag == "row":
if "CreationDate" in elem.attrib:
#get the date of the post and tally if june 2016.
if "2016-06" in elem.attrib["CreationDate"]:
junePosts += 1
if "Tags" in elem.attrib:
if "combinatorics" in elem.attrib["Tags"]:
cCount += 1
if "fibonacci-numbers" not in elem.attrib["Tags"]:
noFibCount += 1
if "graph-theory" in elem.attrib["Tags"]:
gCount += 1
#I search for the creation date
if "CreationDate" in elem.attrib:
tmp = elem.attrib["CreationDate"][0:7]
#if this date is the same as last, increment total. If not,
#the total is 1. Once we find new max, set it to prev, and
#record the current max date.
if prev == tmp:
total += 1
elif prev != tmp:
total = 1
prev = tmp #need to set prev again to catch the multiples
if total > prevTotal or prevTotal == 0:
prevTotal = total
prev = maxGtDate = tmp
#this code was to count months, which was misinterpreted
#since all dates have the same format YYYY-MM... I can simply slice the 6th and 7th digits to get the month.
#cast ro int, remove the leading 0... 07 = 7.
#tmp = int(elem.attrib["CreationDate"][5:7].strip("0"))
#tmp -= 1 #convert month to index.. ie.) maarch 03 = index 2
#monthList[tmp] += 1; #counting months.
#print(tmp)
root.clear() #delete as we go... save pc from blowing up via memory overutilization
date = datetime.datetime.strptime(maxGtDate, "%Y-%m")
formatted_date = datetime.datetime.strftime(date, "%B %Y")
print("Posts in June 2016: " , junePosts)
print("Posts tagged with combinatorics: " , cCount)
print("Posts tagged with combinatorics but not fibonacci-numbers: " , noFibCount)
print("Posts tagged wih graph-theory: " , gCount)
print("the month with most graph-theory tags ", formatted_date)
#print("the month with most graph-theory tags:", maxTotal) |
12,597 | 721920e442b3dd07cc79bbdce9d65ba078634754 | # ------------------------------
# 670. Maximum Swap
#
# Description:
# Given a non-negative integer, you could swap two digits at most once to get the
# maximum valued number. Return the maximum valued number you could get.
#
# Example 1:
# Input: 2736
# Output: 7236
# Explanation: Swap the number 2 and the number 7.
#
# Example 2:
# Input: 9973
# Output: 9973
# Explanation: No swap.
#
# Note:
# The given number is in the range [0, 108]
#
# Version: 1.0
# 10/29/19 by Jianfa
# ------------------------------
class Solution:
def maximumSwap(self, num: int) -> int:
if num < 10:
return num
digitList = list(str(num))
i = 1
while i < len(digitList) and digitList[i] <= digitList[i-1]:
# find the first index i to make digitList[i] > digitList[i-1]
i += 1
if i == len(digitList):
# the digits are sorted in descending
return num
maxDigit = digitList[i]
maxIndex = i
for j in range(i+1, len(digitList)):
# find the max digit in digitList[i:]
# NOTE: it should be digitList[j] >= maxDigit because we want to use the latter one to swap so we can make sure number after swapping is largest
# e.g. 1993 -> 9913
if digitList[j] >= maxDigit:
maxDigit = digitList[j]
maxIndex = j
for j in range(i):
# find the first digit in digitList[:i] that is smaller than maxDigit
if maxDigit > digitList[j]:
digitList[j], digitList[maxIndex] = maxDigit, digitList[j]
break
return int("".join(digitList))
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Find the first digit that is larger than its previous digit, let say num[i]
# Then find the largest digit in the rest digits
# Swap the largest digit with the first digit that is smaller than it in the num[:i]
# This is not a concise solution |
12,598 | 0bd190452fe573cab9fd1bf678c1134cc91c7ec8 | import sys
import os
sys.path.insert(0, '../oscar.py')
import re
from oscar import Project
from oscar import Time_project_info as Proj
import subprocess
from time import time as current_time
start_time = current_time()
def bash(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out
def search(hash, type):
"""
Method used to search for a specific blob, commit or tree.
If a tree is searched for, the result is splitted into its components (blobs and directories),
which are again splitted into their mode, hash and name.
In the case of a commit, we split the information string and the tree hash and
parent's commit hash are returned
"""
out = bash('echo ' + hash + ' | ~/lookup/showCnt ' + type)
if type == 'tree':
return [blob.split(';') for blob in out.strip().split('\n')]
if type == 'commit':
splitted = out.split(';')
# the tree and parent commit hashes are the second and third word, respectively
# the commit time is the last word, from which we discard the timezone and cast it to int
return splitted[1], splitted[2], int(splitted[-1].split()[0])
return out
# files used in continuous integration
ci_files = [
'\.gitlab\-ci\.yml', '\.travis\.yml', 'Jenkinsfile', 'buddy\.yml', '\.drone\.yml',
'circle\.yml', '\.circleci', 'bamboo\.yaml', 'codeship\-steps\.yml', '\.teamcity',
'wercker\.yml', 'appveyor\.yml', 'bitrise\.yml', 'codefresh\.yml', 'solano\.yml',
'shippable\.yml', 'phpci\.yml', 'cloudbuild\.yaml'
]
def ci_lookup(tree_hash):
"""
Method used to check the usage of Continuous Integration in a tree, given its hash.
"""
query = 'echo ' + tree_hash + ' | ~/lookup/showCnt tree | egrep "' + '|'.join(ci_files) +'"'
out = bash(query)
"""
# alternate method
blobs = search(tree_hash, 'tree')
index = {'mode':1, 'hash':1, 'name':2}
ci = False
for blob in blobs:
name = blob[index['name']]
hash = blob[index['hash']]
if ((name in ci_files) or
(name in ci_config_dir and ';'+ci_config_dir[name] in search(hash, 'tree'))):
ci = True
break
"""
return bool(out)
def calc_CI_introductions(commits, author):
"""
Alternative way to check_if_introduction, to compare performance.
"""
# using a dictionary that has the commits' hashes as keys,
# so as to not search multiple times for the same commit
CI_checked = {}
# delete contents
open('introductions.csv', 'w').close()
# for every commit, we look up whether the author included a CI file,
# that did not exist in the parent commit
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print count + 1, ' / ', len(commits)
tree_hash, parent_commit_hash, time = search(commit, 'commit')
if tree_hash not in CI_checked:
CI_checked[tree_hash] = ci_lookup(tree_hash)
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
# controlling for the case of no parent commits
if parent == '':
break
parent_tree_hash = search(parent, 'commit')[0]
if parent_tree_hash not in CI_checked:
parent_CI = ci_lookup(parent_tree_hash)
CI_checked[parent_tree_hash] = parent_CI
else:
parent_CI = CI_checked[parent_tree_hash]
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, increase the CI score
if CI_checked[tree_hash] and not all_parent_CI:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
f = open("introductions.csv", "a")
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote'
print (current_time()-start_time)/len(commits), 'seconds per commit'
def check_if_introduction(commit, result):
"""
We check the parent commit to see if its child commit introduced or modified a CI config file.
"""
tree_hash, parent_commit_hash, time = search(commit, 'commit')
# controlling for the case of no parent commits
if parent_commit_hash == '':
return True
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
parent_tree_hash = search(parent, 'commit')[0]
parent_CI = ci_lookup(parent_tree_hash)
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, it is an introduction
return not all_parent_CI
def calc_CI(commits, author):
"""
Used to investigate how many commits, from a user, modified a CI configuration file.
Unix commands are used for a better performance.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# c2f does seems to result in a tie error, so c2b and b2f is used instead
#getting the blobs
query = ("for x in $(echo " + commit + " | ~/lookup/getValues c2b |" +
# splitting on the semicolon and discarding the newlines
" awk -v RS='[;\\n]' 1 |" +
# discarding the commit's hash (it appears before the blobs' hashes)
" tail -n+2); do" +
# for each blob, we look up it's filename
" echo $x | ~/lookup/getValues b2f;" +
" done |" +
# we discard the first field of the results (blobs' hash)
" cut -d ';' -f2 |" +
# we check whether one of the modified files is a CI configuration file
" egrep '" + "|".join(ci_files) + "'")
result = bash(query)
if result:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
if check_if_introduction(commit, result):
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_CI_diff(commits, author):
"""
Method written as a faster alternative to calc_CI. It seems to be 30 times faster.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
#status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# cmputeDiff2.perl seems to produce junk to the stdout occasionally
diff = bash("echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl")
# if a CI configuration file is in the diff
if re.search("|".join(ci_files), diff):
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
for blob in diff.split():
# looking for the CI config blob and checking if parent blob exists
if re.search("|".join(ci_files), blob):
# if we have both an introduction and a modification
# in the same commit, we count it as an introduction
if blob.endswith(';'):
# if we don't have the parent blob, after the last semicolon,
# it is an introduction
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
break
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def find_links(author, end_time, method='sh'):
"""
Method used to find the neighbours of a given author, i.e. the authors that
affected the given author's use of good coding practices.
A timestamp is also given to define the time till which we find the connections.
"""
out = bash('echo "'+ author + '" | ~/lookup/getValues a2P')
pr = [x for x in out.strip().split(';')[1:]]
if method == 'pr_timeline':
p = Proj()
for project in pr:
rows = p.project_timeline(['time','repo', 'author'], project)
for row in rows:
print row
#### Start building the regular expression that will be used to search for unit testing libraries,
#### in the commit's blobs ####
# Java
java_lib = ['io.restassured', 'org.openqa.selenium', 'org.spockframework', 'jtest',
'org.springframework.test', 'org.dbunit', 'org.jwalk', 'org.mockito', 'org.junit']
java_regex = (['import\s+'+s.replace('.', '\.') for s in java_lib])
java_all_reg = '|'.join(java_regex)
# Perl
perl_all_reg = 'use\s+Test::'
# Javascript
js = ['assert', 'mocha', 'jasmine', 'ava', 'jest', 'karma', 'storybook', 'tape',
'cypress', 'puppeteer', 'chai', 'qunit', 'sinon', 'casper', 'buster']
js_regex = (["require\([\\\'\\\"]" + s + "[\\\'\\\"]\)" for s in js])
js_all_reg = '|'.join(js_regex)
# C#
c_sharp = ['NUnit', 'Microsoft\.VisualStudio\.TestTools\.UnitTesting',
'Xunit', 'csUnit', 'MbUnit']
c_sharp_regex = (["using\s+" + s for s in c_sharp])
c_sharp_all_reg = '|'.join(c_sharp_regex)
# C and C++
c = ['cmocka', 'unity', 'CppuTest', 'embUnit', 'CUnit', 'CuTest', 'check',
'gtest', 'uCUnit', 'munit', 'minunit', 'acutest', 'boost/test',
'UnitTest\+\+', 'cpptest', 'cppunit', 'catch', 'bandit', 'tut']
c_regex = (['#include\s+[<\\\"]' + s + '\.h[>\\\"]'for s in c])
c_all_reg = '|'.join(c_regex)
# PHP
php = ['PHPUnit', 'Codeception', 'Behat', 'PhpSpec', 'Storyplayer', 'Peridot',
'atoum', 'Kahlan', 'vendor/EnhanceTestFramework']
php_regex = (['(include|require|use).+' + s for s in php])
php_all_reg = '|'.join(php_regex)
# Python
python = ['pytest', 'unittest', 'doctest', 'testify', 'nose', 'hypothesis']
python_regex = (['import\s+'+lib+'|from\s+'+lib+'\s+import' for lib in python])
python_all_reg = '|'.join(python_regex)
all_reg = [java_all_reg, perl_all_reg, js_all_reg, c_sharp_all_reg, c_all_reg, php_all_reg, python_all_reg]
final_reg = '|'.join(all_reg)
#### End of regex building ####
def calc_test(commits, author):
"""
Used to investigate how many commits, from a user, modified a unit testing file.
Unix commands are used to achieve a better performance.
The blobs are parsed, looking for unit testing library imports. An alternative would
be using the thruMaps directories or the ClickHouse API, but those options seem slower.
"""
open('modifications.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# getting every blob from a given commit
query = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +
# splitting it and discarding the newlines and the commit's hash
'awk -v RS="[;\\n]" 1 | tail -n+2); do ' +
# We look up the content's of each blob, and discard the STDERR,
# in the case of trying to look up a blob that does not exist in the database
'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +
# We search for the use of a unit testing library, using the above regex, and
# keeping the first result only, since that is enough to know that the commit contains
# a unit testing file, to make the execution faster
'egrep -m 1 "' + final_reg + '"')
if bash(query): # if contains unit testing lib
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
# at this point we could search the parent's tree for the existence of tests, but this
# would require recursively looking at every directory and parsing every file in the tree, so, due
# to the complexity, we skip it and consider it a modification instead of a possible introduction
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_lang_features(commits, author):
"""
Method used to count the usage of certain languages' good practices and modern approaches.
We parse the diff of a modified file and the content of an introduced file, in order to find those
practices, and we count the extent of the usage. Then, we write to a file, for each commit that
included these features.
"""
lang_features = ['/\*\*', '\\"\\"\\"', '///', # documentation
'^\s*@', 'def.+:.+->', 'using\s+System\.ComponentModel\.DataAnnotations', # assertion
'assert', 'TODO', 'lambda']
# delete contents
open('lang_features.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# for each blob modified
query = ("for x in $(echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl); do " +
# get the chold and parent blob
"diff_blobs=$(echo $x | awk -v RS=';' 1 | sed -n '3,4 p');" +
# if a parent blob does not exist, the author authored all of the content of the file
"if [ $(echo $diff_blobs|wc -w) -eq 1 ]; then " +
"echo $diff_blobs | ~/lookup/showCnt blob 2> /dev/null; " +
# if a parent blob exists, find the diff, in order to search only the modified lines
"elif [ $(echo $diff_blobs|wc -w) -eq 2 ]; then " +
"vars=( $diff_blobs );" +
# using bash instead of sh in order to use the process substitution,
# to get the modified lines
"/bin/bash -c \"diff <(echo ${vars[0]} | ~/lookup/showCnt blob)" +
" <(echo ${vars[1]} | ~/lookup/showCnt blob)\";" +
"fi;" +
# grep the above practices and discard the lines that were deleted from the parent blob
# (they start with ">" in diff)
"done | egrep \"" + "|".join(lang_features) + "\" | grep -v '^>' | wc -l ")
count_uses = int(bash(query).strip())
if count_uses > 0: # good practice feature is used
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
f = open("lang_features.csv", "a")
print 'lang_f'
f.write(author + ', ' + 'LANG_F' + ', ' + str(time) + ', ' + main_proj + ', ' + str(count_uses) + '\n')
f.close()
print 'wrote: -->', commit
def calculate_metrics(author):
# getting the author's commits
out = bash('echo "'+ author + '" | ~/lookup/getValues a2c')
commits = [x for x in out.strip().split(';')[1:]]
#time1 = current_time()
#calc_CI(commits, author)
#time2 = current_time()
#print 'without diff time is ' + str(time2 - time1)
#calc_CI_diff(commits, author)
#print 'with is ' + str(current_time() - time2)
#calc_test(commits, author)
calc_lang_features(commits, author)
# checking whether the user provided the author
if len(sys.argv) == 1:
sys.exit('No author provided')
calculate_metrics(sys.argv[1])
|
12,599 | 78c6a0052fb2b61e4a669966907011746862d4e3 | import datetime
import pytz
import web
import config
db = web.database(dbn='sqlite', db='dp.sqlite')
# =================================================================
# datetime
def current_time():
return datetime.datetime.now(pytz.utc)
def local_time():
return datetime.datetime.now(config.tz)
def input_date(s):
return config.tz.localize(datetime.datetime.strptime(s, config.date_fmt)).astimezone(pytz.utc)
def display_date(dt):
return dt.astimezone(config.tz).strftime(config.date_fmt)
def input_datetime(s):
return config.tz.localize(datetime.datetime.strptime(s, config.datetime_fmt)).astimezone(pytz.utc)
def display_datetime(dt):
return dt.astimezone(config.tz).strftime(config.datetime_fmt)
def store_datetime(dt):
return dt.strftime(config.db_fmt)
def load_datetime(s):
return pytz.utc.localize(datetime.datetime.strptime(s, config.db_fmt))
# datetime
# =================================================================
# pt
def pt_name(pt, first='firstname'):
if pt:
return pt.name
else:
return ''
def pt_name_search(q):
try:
id = int(q)
pt = get_pt(id)
if pt:
l = list()
l.append(get_pt(id))
return l
else:
return list()
except ValueError:
qs = q.split()
l = list()
for q in qs:
if l:
l.append(' and ')
l.append('name like ')
l.append(web.db.sqlquote('%%%s%%' % q))
query = web.db.SQLQuery(l)
return list(db.select('patient', where=query))
def get_pt(id):
try:
return db.where('patient', id=id)[0]
except IndexError:
return None
def get_family(resparty):
return db.where('patient', resparty=resparty)
def update_pt(f, resparty):
d = dict([(k, f[k].get_value())
for k in 'name','notes'])
d['id'] = f.id.get_value() or None
d['resparty'] = resparty
d['gender'] = dict(f='female', m='male')[f.gender.get_value()[0]]
d['birthday'] = model.display_date(model.input_date(f.birthday.get_value()))
db.query('insert or replace into patient (id, name, resparty, birthday, gender, notes) values ($id, $name, $resparty, $birthday, $gender, $notes)', d)
row = db.query('select last_insert_rowid() as id')[0]
if d['id'] is None and d['resparty'] is None:
db.update('patient', where='id=%d' % row.id, resparty=row.id)
return row.id
def get_latest_address(patientid):
addresses = db.where('journal', kind='address', patientid=patientid, order='ts DESC')
try:
return addresses[0]
except IndexError:
pt = get_pt(patientid)
if pt.resparty != pt.id:
return get_latest_address(pt.resparty)
else:
return None
# pt
# =================================================================
# journal
class new_handlers (web.storage):
@staticmethod
def address(journalid, form):
pass
@staticmethod
def email(journalid, form):
pass
@staticmethod
def phone(journalid, form):
pass
@staticmethod
def contact(journalid, form):
db.insert('contact', journalid=journalid, details=form.details.get_value())
@staticmethod
def progress(journalid, form):
db.insert('progress',
journalid=journalid,
sub=form.sub.get_value(),
obj=form.obj.get_value(),
ass=form.ass.get_value(),
pln=form.pln.get_value())
@staticmethod
def plan(journalid, form):
secondaryto = form.secondaryto.get_value()
if secondaryto:
secondaryto = int(secondaryto)
else:
secondaryto = None
# we already know this names a unique patient after form validation
insured = pt_name_search(form.insured.get_value())[0]
db.insert('plan',
journalid=journalid,
secondaryto=secondaryto,
carrierid=int(form.carrier.get_value()),
insuredid=insured.id,
relationship=form.relationship.get_value(),
groupnum=form.groupnum.get_value(),
idnum=form.idnum.get_value(),
employer=form.employer.get_value(),
deductible=float(form.deductible.get_value()),
maximum=float(form.maximum.get_value()),
prevent=int(form.prevent.get_value()),
basic=int(form.basic.get_value()),
major=int(form.major.get_value()),
notes=form.notes.get_value())
@staticmethod
def claim(journalid, form):
if form.planid.get_value():
planid = form.planid.get_value()
else:
planid = get_primary_plan_for_pt(int(form.patientid.get_value())).journalid
claimid = db.insert('claim',
journalid=journalid,
preauth=False,
planid=planid,
filed=store_datetime(current_time()),
closed=None,
notes=form.notes.get_value())
db.update('tx',
where='journalid is not null and claimid is null',
claimid=claimid)
@staticmethod
def Rx(journalid, form):
db.insert('rx',
journalid=journalid,
disp=form.disp.get_value(),
sig=form.sig.get_value(),
refills=form.refills.get_value())
@staticmethod
def doc(journalid, form):
filedir = 'upload'
data = form.file.get_value()
mime = magic.from_buffer(data, mime=True)
ext = mimetypes.guess_extension(mime) #includes the leading dot
fout = open('%s/%s%s' % (filedir, journalid, ext), 'wb')
fout.write(data)
fout.close()
@staticmethod
def appointment(journalid, form):
# TODO should appointments in the past be legal? how to fail?
# ... transactions!
dt = input_datetime(form.ts.get_value())
db.insert('appointment',
journalid=journalid,
duration=int(form.duration.get_value()),
kind=form.kind.get_value(),
status=form.status.get_value(),
notes=form.notes.get_value())
db.update('journal', where=('id=%d' % journalid), ts=store_datetime(dt))
def new_journal(pt, kind, f):
journalid = db.insert('journal',
patientid = pt.id,
ts = store_datetime(current_time()),
kind = kind,
summary = f.summary.get_value())
getattr(new_handlers, kind)(journalid, f)
return journalid
def get_journal(patientid, **kw):
d = dict()
if 'limit' in kw:
d['limit'] = kw.pop('limit')
if 'offset' in kw:
d['offset'] = kw.pop('offset')
if len(kw):
raise ValueError('cannot handle keyword arguments other than limit and offset')
# this query just smells expensive
return db.query('select *, (select sum(money) from journal where patientid=jj.patientid and ts <= jj.ts) as balance from journal as jj where patientid=%d order by ts desc' % patientid).list()
def get_journal_entry(journalid):
return db.where('journal', id=journalid)[0]
def get_contact(journalid):
return db.where('contact', journalid=journalid)[0]
def get_progress(journalid):
return db.where('progress', journalid=journalid)[0]
def get_Rx(journalid):
return db.where('Rx', journalid=journalid)[0]
def get_appointment(journalid):
return db.where('appointment', journalid=journalid)[0]
def get_posted_tx(journalid):
return db.where('tx', journalid=journalid).list()
def post_appointment(appt, journal, txids):
fee = db.query('select sum(fee) as fee from tx where appointmentid=%d' % appt.journalid)[0].fee
journalid = db.insert('journal',
patientid=journal.patientid,
ts=store_datetime(current_time()),
kind='tx',
summary=journal.summary,
money=fee)
db.update('tx', where='id in (%s)' % (','.join(map(str, txids))), journalid=journalid)
db.update('appointment', where='journalid=%d' % journal.id, status='posted')
# journal
# =================================================================
# txplan
def get_txplan(patientid):
return db.where('tx', patientid=patientid)
def tx_status(tx):
status = list()
if tx.journalid:
status.append('posted')
if tx.appointmentid:
status.append('scheduled')
if tx.claimid:
status.append('filed')
return ', '.join(status)
def new_tx(patientid, **kw):
return db.insert('tx', patientid=patientid, **kw)
def get_tx_for_appointment(appointmentid):
Q = web.db.SQLQuery
P = web.db.SQLParam
return db.select('tx',
where=Q(['appointmentid=',
P(appointmentid),
' or appointmentid is null']),
order='appointmentid DESC, id')
# txplan
# =================================================================
# appointment
def update_appt(journalid, form):
db.update('appointment',
where='journalid=%d' % journalid,
duration=int(form.duration.get_value()),
kind=form.kind.get_value(),
notes=form.notes.get_value())
db.update('journal',
where='id=%d' % journalid,
ts=store_datetime(input_datetime(form.ts.get_value())),
summary=form.summary.get_value())
def appts_on_day(dt):
start_day = dt.replace(hour=0, minute=0, second=0).astimezone(pytz.utc)
end_day = (dt + datetime.timedelta(seconds=86400)).replace(hour=0, minute=0, second=0).astimezone(pytz.utc)
Q = web.db.SQLQuery
P = web.db.SQLParam
print 'from', start_day
print 'to', end_day
return db.select(['journal','appointment'],
where=Q(['journal.kind=',P('appointment'),
'and ts>',P(store_datetime(start_day)),
'and ts<',P(store_datetime(end_day)),
'and journal.id=appointment.journalid']),
order='ts DESC').list()
def new_appt(patientid, dt, **kw):
at = dt.replace(second=0, microsecond=0, minute=(dt.minute - dt.minute%10)).astimezone(pytz.utc)
journalid = db.insert('journal', patientid=patientid, ts=store_datetime(at), kind='appointment', summary=kw.get('summary','test'))
if 'summary' in kw:
kw.pop('summary')
return db.insert('appointment', journalid=journalid, **kw)
def appt_tx_set(appointmentid, txs):
db.update('tx',
where='appointmentid = %d' % appointmentid,
appointmentid=None)
db.update('tx',
where='id in %s' % str(tuple(txs)),
appointmentid=appointmentid)
# appointment
# =================================================================
# carriers
def get_carriers():
return db.select('carrier', order='name ASC')
def get_carrier(id):
return db.where('carrier', id=id)[0]
def new_carrier(form):
return db.insert('carrier',
name=form.name.get_value(),
address=form.address.get_value(),
phone=form.phone.get_value(),
web=form.web.get_value(),
eclaim=form.eclaim.get_value())
# carriers
# =================================================================
# plans
def get_plan(id):
return db.select(['journal', 'plan'], where='plan.journalid=journal.id and journalid=%d' % id)[0]
def get_primary_plan_for_pt(patientid):
plan = db.select(['journal','plan'],
where='plan.journalid=journal.id and plan.secondaryto is null',
order='ts DESC', limit=1)[0]
return plan
# plans
# =================================================================
# claims
def get_claim(claimid):
return db.where('claim', journalid=claimid)[0]
def get_tx_for_claim(claimid):
return db.where('tx', claimid=claimid).list()
def new_payment_for_pt(pt, summary, amount):
journalid = db.insert('journal',
patientid=pt.id,
ts=store_datetime(current_time()),
kind='payment',
summary=summary,
money=amount)
return journalid
# claims
# =================================================================
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.