index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
16,200 | e3cd7ef1d84c54b8373f0a4bdcbdee02d445cdb5 | import main
#alreadyImported = []
def compilefile(file, n=None):
main.main(file, n=n) |
16,201 | 0ea86680d5d4a980769e3b020326ecdc4053813f | import scrapy
class QuotesSpider(scrapy.Spider):
name = "bok"
def start_requests(self):
# base url for BOK minutes
urls = [
'https://www.bok.or.kr/portal/bbs/B0000245/list.do?menuNo=200761&sdate=2005-05-01&edate=2017-12-31',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
# get the total number of pages (for the result of search)
total_page_xpath = "//div[@class='schTotal']/span[2]/text()"
total_page = int(response.xpath(total_page_xpath).extract()[1].split()[0].replace('/', ''))
# get page urls for of the search between 2005-05-01 and 2017-12-31
urls = ('https://www.bok.or.kr/portal/bbs/B0000245/list.do?menuNo=200761&sdate=2005-05-01&edate=2017-12-31&pageIndex={}'.format(i) for i in range(1,total_page))
for url in urls:
yield scrapy.Request(url, callback=self.parse_page)
def parse_page(self, response):
# get minute names
pdf_names_xpath = "//span[@class='titlesub']/text()"
pdf_names = response.xpath(pdf_names_xpath).extract()
# get file link urls with the string '.pdf'
pdf_urls_xpath = "//div[@class='fileGoupBox']/ul/li/a[contains(., 'pdf')]/@href"
pdf_urls = response.xpath(pdf_urls_xpath).extract()
filename = 'bok_pdf_urls.tsv'
with open(filename, 'a') as f: # if file exists, append
for pdf_name, pdf_url in zip(pdf_names, pdf_urls):
f.write(pdf_name + '\t' + 'https://www.bok.or.kr/' + pdf_url + '\n')
self.log('Saved pdf paths to %s' % filename) |
16,202 | 1af2843d8090214d0aa24f184fd7204a942a1e8e | from collections import deque
"""
题目一: 滑动窗口的最大值
"""
def max_in_windows(array, win_size):
if not isinstance(array, list) or len(array) == 0 or win_size < 1 or win_size > len(array):
return
max_of_window = []
index_deque = deque()
for i in range(win_size):
while len(index_deque) != 0 and array[index_deque[-1]] < array[i]:
index_deque.pop()
index_deque.append(i)
for i in range(win_size, len(array)):
max_of_window.append(array[index_deque[0]])
while len(index_deque) != 0 and array[i] > array[index_deque[-1]]:
index_deque.pop()
while len(index_deque) != 0 and index_deque[0] <= i - win_size:
index_deque.popleft()
index_deque.append(i)
max_of_window.append(array[index_deque[0]])
return max_of_window
"""
题目二: 队列的最大值
"""
class QueueWithMax(object):
def __init__(self):
self.data = deque()
self.max_queue = deque()
self.current_index = 0
def push_back(self, num):
while len(self.max_queue) != 0 and num >= self.max_queue[-1][1]:
self.max_queue.pop()
self.max_queue.append((self.current_index, num))
self.data.append((self.current_index, num))
self.current_index += 1
def pop_front(self):
assert len(self.data) != 0
index, num = self.data.popleft()
if index == self.max_queue[0][0]:
self.max_queue.popleft()
def max(self):
assert len(self.data) != 0
return self.max_queue[0][1]
if __name__ == "__main__":
print(max_in_windows([2, 3, 4, 2, 6, 2, 5, 1], 3))
q = QueueWithMax()
q.push_back(3)
print(q.max_queue)
q.push_back(1)
print(q.max_queue)
q.push_back(2)
print(q.max_queue)
q.pop_front()
print(q.max_queue)
q.pop_front()
print(q.max_queue)
|
16,203 | d740602a5a64a703b8c7a90cd912704e311d08af | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 15:31:00 2021
@author: 洪睿
"""
import numpy as np
import gym
#from utils import plotLearning
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
#tf.compat.v1.disable_eager_execution()
num_simulation = 10000
env = TradingEnv(num_sim = num_simulation, continuous_action_flag=False,sabr_flag = True) # see tradingenv.py for more info
lr = 0.001
agent = Agent(gamma=1, epsilon=1.0, lr=lr, n_actions = 101,
input_dims=env.num_state,env = env,
mem_size=1000, batch_size=128,
prioritized_replay= True)
agent.load_model()
scores = []
#eps_history = []
for i in range(num_simulation):
done = False
score = 0
observation = env.reset() #[price, position, ttm], price=S, position=0, ttm=init_ttm
j=0
while not done:
action = agent.choose_action(tf.convert_to_tensor(np.expand_dims(observation,-1))) #action is tensor
#action = action.numpy()[0] #change to numpy
observation_, reward, done, info,_ = env.step(action)
score += reward
agent.store_transition(observation, action, reward, observation_, done)
observation = observation_
agent.learn()
#eps_history.append(agent.epsilon)
scores.append(score) # score for every episode
avg_score = np.mean(scores[-100:])
if i % 100 == 0:
print('episode %.2f' % i, 'score %.2f' % score, 'average_score %.2f' % avg_score)
# 'epsilon %.2f' % agent.epsilon)
filename = 'dddqn_tf2_lstm_dv05_risk05.png'
x = [i+1 for i in range(num_simulation)]
plot_learning_curve(x, scores, filename)
agent.save_model()
total_episode_test = 3000
env_test2 = TradingEnv(continuous_action_flag=False, sabr_flag=True, dg_random_seed=2, num_sim=total_episode_test)
delta_u,delta_r,delta_pl,delta_cpl = test(total_episode_test = total_episode_test, env = env_test2, agent = agent, name='delta_dv05_risk05', delta_flag=True, bartlett_flag=False)
barlette_u,barlette_r,barlette_pl,barlette_cpl = test(total_episode_test = total_episode_test, env = env_test2, agent = agent, name='barlette_dv05_risk05', delta_flag=False, bartlett_flag=True)
rl_u,rl_r,rl_pl,rl_cpl = test(total_episode_test = total_episode_test, env = env_test2, agent = agent, name='rl_dv05_risk05', delta_flag=False, bartlett_flag=False)
#plot_obj(delta_u, figure_file='delta_u_dv0')
#plot_obj(rl_u, figure_file='rl_u_dv0')
#plot_obj(barlette_u, figure_file='barlette_u_dv0')
########### time step reward
a = []
for i in range(len(delta_r)):
for j in delta_r[i]:
a.append(j)
upperbound = total_episode_test*25 + 1
epi = np.arange(1, upperbound, 1)
history = dict(zip(epi, a))
#name = os.path.join('history', name)
df1 = pd.DataFrame(history,index=[0])
df1.to_csv('delta_dv05_r_risk05.csv', index=False, encoding='utf-8')
b = []
for i in range(len(rl_r)):
for j in rl_r[i]:
b.append(j)
history = dict(zip(epi, b))
#name = os.path.join('history', name)
df2 = pd.DataFrame(history,index=[0])
df2.to_csv('rl_dv05_r_risk05.csv', index=False, encoding='utf-8')
c = []
for i in range(len(barlette_r)):
for j in barlette_r[i]:
c.append(j)
history = dict(zip(epi, c))
#name = os.path.join('history', name)
df7 = pd.DataFrame(history,index=[0])
df7.to_csv('barlette_dv05_r_risk05.csv', index=False, encoding='utf-8')
############# episode p&l
history = dict(zip(epi, rl_cpl))
#name = os.path.join('history', name)
df3 = pd.DataFrame(history,index=[0])
df3.to_csv('rl_dv05_cpl_risk05.csv', index=False, encoding='utf-8')
history = dict(zip(epi, delta_cpl))
#name = os.path.join('history', name)
df4 = pd.DataFrame(history,index=[0])
df4.to_csv('delta_dv05_cpl_risk05.csv', index=False, encoding='utf-8')
history = dict(zip(epi, barlette_cpl))
#name = os.path.join('history', name)
df8 = pd.DataFrame(history,index=[0])
df8.to_csv('barlette_dv05_cpl_risk05.csv', index=False, encoding='utf-8')
############# time step p&l
d = []
for i in range(len(barlette_pl)):
for j in barlette_pl[i]:
d.append(j)
history = dict(zip(epi, d))
#name = os.path.join('history', name)
df9 = pd.DataFrame(history,index=[0])
df9.to_csv('barlette_dv05_pl_risk05.csv', index=False, encoding='utf-8')
e = []
for i in range(len(rl_pl)):
for j in rl_pl[i]:
e.append(j)
history = dict(zip(epi, e))
#name = os.path.join('history', name)
df5 = pd.DataFrame(history,index=[0])
df5.to_csv('rl_dv05_pl_risk05.csv', index=False, encoding='utf-8')
f = []
for i in range(len(delta_pl)):
for j in delta_pl[i]:
f.append(j)
history = dict(zip(epi, f))
#name = os.path.join('history', name)
df6 = pd.DataFrame(history,index=[0])
df6.to_csv('delta_dv05_pl_risk05.csv', index=False, encoding='utf-8')
print('Fishined')
|
16,204 | ef81c6a964bd51fbb6ec1eac2747c16ac612eaaa | import torch
import torch.nn as nn
from .Attention import Attention
from models.base_model import BaseModel
from .Encoders.MHA import MHA_User_Encoder,MHA_Encoder
class NRMS(BaseModel):
def __init__(self, hparams, vocab, encoder):
super().__init__(hparams)
self.name = 'nrms' + encoder.name
self.title_length = hparams['title_size']
self.abs_length = hparams['abs_size']
self.encoder = encoder
self.user_encoder = MHA_User_Encoder(hparams)
self.hidden_dim = self.encoder.hidden_dim
def _click_predictor(self, cdd_repr, user_repr):
""" calculate batch of click probabolity
Args:
cdd_repr: [batch_size, cdd_size, hidden_dim]
user_repr: [batch_size, hidden_dim]
Returns:
score: tensor of [batch_size, cdd_size], which is normalized click probabilty
"""
score = torch.matmul(cdd_repr,user_repr.unsqueeze(dim=-1)).squeeze(dim=-1)
if self.cdd_size > 1:
score = nn.functional.log_softmax(score, dim=1)
else:
score = torch.sigmoid(score)
return score
def forward(self, x):
if x['candidate_title'].shape[0] != self.batch_size:
self.batch_size = x['candidate_title'].shape[0]
cdd_news = x['candidate_title'].long().to(self.device)
cdd_news_embedding, cdd_news_repr = self.encoder(
cdd_news,
user_index=x['user_index'].long().to(self.device),
news_id=x['cdd_id'].long().to(self.device),
attn_mask=x['candidate_title_pad'].to(self.device))
his_news = x['clicked_title'].long().to(self.device)
his_news_embedding, his_news_repr = self.encoder(
his_news,
user_index=x['user_index'].long().to(self.device),
news_id=x['his_id'].long().to(self.device),
attn_mask=x['clicked_title_pad'].to(self.device))
user_repr = self.user_encoder(his_news_repr)
score = self._click_predictor(cdd_news_repr, user_repr)
return score
class NRMS_MultiView(BaseModel):
def __init__(self, hparams, vocab):
super().__init__(hparams)
self.vert_num = hparams['vert_num']
self.subvert_num = hparams['subvert_num']
self.encoder = MHA_Encoder(hparams, vocab)
self.user_encoder = MHA_User_Encoder(hparams)
self.hidden_dim = self.encoder.hidden_dim
self.viewQuery = nn.Parameter(torch.randn(1,self.hidden_dim))
self.vertProject = nn.Linear(self.vert_num, self.hidden_dim)
self.subvertProject = nn.Linear(self.subvert_num, self.hidden_dim)
self.name = 'nrms-multiview'
def _click_predictor(self, cdd_repr, user_repr):
""" calculate batch of click probabolity
Args:
cdd_repr: [batch_size, cdd_size, hidden_dim]
user_repr: [batch_size, hidden_dim]
Returns:
score: tensor of [batch_size, cdd_size], which is normalized click probabilty
"""
score = torch.matmul(cdd_repr,user_repr.unsqueeze(dim=-1)).squeeze(dim=-1)
return score
def forward_(self, x):
if x['candidate_title'].shape[0] != self.batch_size:
self.batch_size = x['candidate_title'].shape[0]
cdd_title = x['candidate_title'].long().to(self.device)
_, cdd_title_repr = self.encoder(cdd_title)
cdd_abs = x['candidate_abs'].long().to(self.device)
_, cdd_abs_repr = self.encoder(cdd_abs)
cdd_vert = x['candidate_vert_onehot'].float().to(self.device)
cdd_vert_repr = self.vertProject(cdd_vert)
cdd_subvert = x['candidate_subvert_onehot'].float().to(self.device)
cdd_subvert_repr = self.subvertProject(cdd_subvert)
cdd_repr = torch.tanh(torch.stack([cdd_title_repr, cdd_abs_repr, cdd_vert_repr, cdd_subvert_repr], dim=-2))
cdd_repr = Attention.ScaledDpAttention(self.viewQuery, cdd_repr, cdd_repr).squeeze(dim=-2)
his_title = x['clicked_title'].long().to(self.device)
_, his_title_repr = self.encoder(his_title)
his_abs = x['clicked_abs'].long().to(self.device)
_, his_abs_repr = self.encoder(his_abs)
his_vert = x['clicked_vert_onehot'].float().to(self.device)
his_vert_repr = self.vertProject(his_vert)
his_subvert = x['clicked_subvert_onehot'].float().to(self.device)
his_subvert_repr = self.subvertProject(his_subvert)
his_repr = torch.tanh(torch.stack([his_title_repr, his_abs_repr, his_vert_repr, his_subvert_repr], dim=-2))
his_repr = Attention.ScaledDpAttention(self.viewQuery, his_repr, his_repr).squeeze(dim=-2)
user_repr = self.user_encoder(his_repr)
return self._click_predictor(cdd_title_repr, user_repr)
def forward(self, x):
score = self.forward_(x)
if self.cdd_size > 1:
score = nn.functional.log_softmax(score, dim=1)
else:
score = torch.sigmoid(score)
return score |
16,205 | de172374728efdb31b3fc4c18f23aff76e905d64 | from datetime import datetime
from dateutil import relativedelta
import sys
now = datetime.now()
try:
iyears = int(input("Please write the year in which you were born: "))
imonths = int(input("Please write the month in which you were born: "))
idays = int(input("Please write the day in which you were born: "))
except ValueError:
print("Sorry! That is not a valid input")
sys.exit()
def check_brithdate(year, month, day):
birthdate = datetime(year, month, day, 00, 00)
difference = relativedelta.relativedelta(now, birthdate)
return not((difference.years < 0) or (difference.months < 0) or (difference.days < 0))
def calculate_age(year, month, day):
birthdate = datetime(year, month, day, 00, 00)
difference = relativedelta.relativedelta(now, birthdate)
print("You are %s years, %s months and %s days old" % (difference.years, difference.months, difference.days))
if check_brithdate(iyears, imonths, idays) == True:
calculate_age(iyears, imonths, idays)
else:
print("Invalid birthdate. Seems to be in the future!!!")
sys.exit()
|
16,206 | a3ebe00d848512030a29474536e6c462a0415902 | # Given a filename and a string, find the location of the file
# and where in the file the string is located
from os import walk, path
f = input("File: ") # Ask for file to find; EXACT
s = input("String: ") # Ask for string to find in file; EXACT
for root, dirs, files in walk("/"): # Go through everything
if f in files: # If filename found....
filepath = path.join(root,f) # store the full path
# Search the filename for that string
with open(filepath) as f:
data = [line.strip() for line in f.readlines()]
for linenum, line in enumerate(data):
character = line.find(s)
# Found the string; Print everything out
if character != -1:
to_print = "String '" + s + "' found on line " + str(linenum)
to_print += ", starting at character " + str(character)
print(to_print)
quit()
# Didn't find the string
print("String not found")
|
16,207 | aa7a38b830756a312203b3d22f6f93b6216e5bac | from django.db import models
class CovidObservations(models.Model):
sno = models.IntegerField(primary_key=True, )
observation_date = models.DateField()
province_state = models.CharField(max_length=255)
country_region = models.CharField(max_length=255)
last_update = models.DateTimeField()
confirmed = models.IntegerField()
deaths = models.IntegerField()
recovered = models.IntegerField()
class Meta:
db_table = 'covid_observations' |
16,208 | c6c817499dc30352110fb917e9cd20b3f63637c3 | import datetime
from django.db import transaction
from rest_framework import generics, status
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from contacts.models import Contact
from contacts.serializers import ContactSerializer, ContactNestedSerializer
class SearchContactsView(generics.ListAPIView):
serializer_class = ContactSerializer
def get_queryset(self):
query = self.request.query_params.get('query', '')
by_first_name = Contact.objects.filter(first_name__icontains=query)
by_last_name = Contact.objects.filter(last_name__icontains=query)
by_email = Contact.objects.filter(emails__email__icontains=query)
by_phone = Contact.objects.filter(phone_numbers__phone__icontains=query)
queryset = by_first_name | by_last_name | by_email | by_phone
if queryset:
return queryset.order_by('first_name', 'last_name').distinct()
else:
raise NotFound()
class BirthdaysView(generics.ListAPIView):
serializer_class = ContactSerializer
def get_queryset(self):
today = datetime.datetime.now()
queryset = Contact.objects.filter(date_of_birth__month=today.month)
if queryset:
return queryset.order_by('date_of_birth__day', 'first_name', 'last_name')
else:
raise NotFound()
class ListContactsView(generics.ListCreateAPIView):
"""
Provides a GET and POST method handler
"""
queryset = Contact.objects.all().order_by('first_name', 'last_name')
serializer_class = ContactSerializer
@transaction.atomic
def post(self, request, *args, **kwargs):
request.data['phone_numbers'] = list(map(lambda x: {'phone': x}, request.data.get('phone_numbers', [])))
request.data['emails'] = list(map(lambda x: {'email': x}, request.data.get('emails', [])))
serializer = ContactNestedSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
created_contact = serializer.create(serializer.validated_data)
return Response(data=self.serializer_class(created_contact).data, status=status.HTTP_201_CREATED)
class ContactDetailsView(generics.RetrieveUpdateDestroyAPIView):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
lookup_url_kwarg = 'contact_id'
|
16,209 | ceddedc4dc22d0ee66258b284c3a0b9a709a5847 | from ai.nn.layer import *
from ai.nn.helpers import *
from ai.nn.network import *
from ai.dqn import DQN
from ai.drqn import DRQN
def drqn_model1(session):
batch_size = 16
time_steps = 16
qnn = NeuralNetwork([
Reshape(lambda (A,B,C,D,E): (batch_size*time_steps,C,D,E)),
Merge([
Sequence([
Conv2DLayer([4, 1], 16, 32, [4, 1], padding='VALID'),
Reshape(lambda (AB,C,D,E): (AB, C*D*E))
]),
Sequence([
Conv2DLayer([1, 4], 16, 32, [1, 4], padding='VALID'),
Reshape(lambda (AB,C,D,E): (AB, C*D*E))
]),
Sequence([
Conv2DLayer([2, 2], 16, 32, [1, 1], padding='VALID'),
Reshape(lambda (AB,C,D,E): (AB, C*D*E))
])
], axis = 1),
Reshape(lambda (AB, CDE): (batch_size, time_steps, CDE)),
Unroll(axis=1, num=time_steps),
LSTMCell(544, 256, minibatch_size=batch_size),
Roll(axis=1),
Reshape(lambda (A, B, CDE): (batch_size * time_steps, CDE)),
FCLayer(256, 4)
])
optimizer = tf.train.AdamOptimizer(0.001)
trainer = DRQN(qnn, optimizer, session, [4, 4, 16], 4,
final_exploration_probability=0.05,
exploration_period=1000,
reply_memory_size=48,
target_freeze_period= 2500,
unrollings_num=time_steps,
minibatch_size=batch_size)
return qnn, trainer, '4x4x16', 'log2Plain'
def drqn_model2(session):
qnn, trainier, state_representation, _ = drqn_model1(session)
return qnn, trainier, state_representation, 'log2MaxTileEmptyDiff'
|
16,210 | 2162636cce37ef15d2cdf86c4c80fa1d2e81a105 | from forte_fives import card
from forte_fives import deck
import unittest
class Deck_init(unittest.TestCase):
def setUp(self):
self.mydeck = deck.Deck()
def tearDown(self):
self.mydeck = None
def test_correct_size(self):
self.assertTrue(len(self.mydeck.cards) == 52)
def test_unique_cards(self):
# Can't use set/frozenset because cards are not hashable.
# Well... they are but two cards of the same rank and
# suit will have different hashable values.
d = dict.fromkeys([str(c) for c in self.mydeck.cards])
self.assertTrue(len(d) == 52)
def test_correct_suites(self):
suits = dict()
for c in self.mydeck.cards:
if c.suit not in suits.keys():
suits[c.suit] = 0
suits[c.suit] += 1
for suit in card.SUITS:
self.assertTrue(suits[suit] == 13)
def test_correct_ranks(self):
ranks = dict()
for c in self.mydeck.cards:
if c.rank not in ranks.keys():
ranks[c.rank] = 0
ranks[c.rank] += 1
for rank in card.RANKS:
self.assertTrue(ranks[rank] == 4)
class Deck_len(unittest.TestCase):
def setUp(self):
self.mydeck = deck.Deck()
def tearDown(self):
self.mydeck = None
def test_full_deck(self):
self.assertTrue(len(self.mydeck) == len(self.mydeck.cards) == 52)
def test_size51(self):
self.mydeck.cards.pop()
self.assertTrue(len(self.mydeck) == len(self.mydeck.cards) == 51)
def test_linked_to_cards_list(self):
self.mydeck.cards = []
self.assertTrue(len(self.mydeck) == len(self.mydeck.cards) == 0)
class Deck_contains(unittest.TestCase):
def setUp(self):
self.mydeck = deck.Deck()
def tearDown(self):
self.mydeck = None
def test_positive(self):
self.assertTrue(self.mydeck.cards[10] in self.mydeck)
def test_negative(self):
removed_card = self.mydeck.cards.pop()
self.assertFalse(removed_card in self.mydeck)
def test_invalid_card_type(self):
# Any kind of object.
any_obj = dict()
self.assertRaises(card.BadCardException, self.mydeck.__contains__,
any_obj)
class InsertCard(unittest.TestCase):
def setUp(self):
self.mydeck = deck.Deck()
def tearDown(self):
self.mydeck = None
def test_invalid_card_type(self):
any_obj = dict()
self.assertRaises(card.BadCardException, self.mydeck.insert_card,
any_obj)
def test_duplicate_card(self):
self.assertRaises(card.BadCardException, self.mydeck.insert_card,
self.mydeck.cards[0])
def test_valid_insert(self):
removed_card = self.mydeck.cards.pop()
self.assertTrue(removed_card not in self.mydeck)
self.mydeck.insert_card(removed_card)
self.assertTrue(removed_card in self.mydeck)
class PickCard(unittest.TestCase):
def setUp(self):
self.mydeck = deck.Deck()
def tearDown(self):
self.mydeck = None
def test_last_card(self):
last_card = self.mydeck.cards[-1]
picked_card = self.mydeck.pick_card()
self.assertTrue(last_card == picked_card)
def test_empty_deck(self):
self.mydeck.cards = []
picked_card = self.mydeck.pick_card()
self.assertTrue(picked_card is None)
class Shuffle(unittest.TestCase):
def test_different_order(self):
d = deck.Deck()
unshuffled_cards = list(d.cards)
self.assertTrue(unshuffled_cards == d.cards)
d.shuffle()
self.assertFalse(unshuffled_cards == d.cards)
if __name__ == '__main__':
unittest.main()
|
16,211 | 42cdc714068a884a117183056fe36a099dfdb7d1 | def sol(datas):
cnt = 0
result = []
for data in datas:
if data != 0:
result.append(data)
else:
cnt +=1
for i in range(cnt):
result.append(0)
print(result)
if __name__ == "__main__":
sol([6, 0, 8, 2, 3, 0, 4, 0, 1])
|
16,212 | 859057efd3966e6e53db7cd18f65cee796009624 | import os, json, unittest, time, shutil, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_util, h2o_log
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_log_download_view(self):
(logNameList, lineCountList) = h2o_log.checkH2OLogs()
self.assertEqual(len(logNameList), 3, "Should be 3 logs")
self.assertEqual(len(lineCountList), 3, "Should be 3 logs")
# line counts seem to vary..check for "too small"
# variance in polling (cloud building and status)?
self.assertGreater(lineCountList[0], 12, "node 0 log is too small")
self.assertGreater(lineCountList[1], 12, "node 1 log is too small")
self.assertGreater(lineCountList[2], 12, "node 2 log is too small")
if __name__ == '__main__':
h2o.unit_main()
|
16,213 | 184928431ec5ea2c26f2a1b15a02240ef5d9cc96 | # coding: utf-8
'''
File: tracker.py
Project: AlphaPose
File Created: Thursday, 1st March 2018 6:12:23 pm
Author: Yuliang Xiu (yuliangxiu@sjtu.edu.cn)
-----
Last Modified: Monday, 1st October 2018 12:53:12 pm
Modified By: Yuliang Xiu (yuliangxiu@sjtu.edu.cn>)
-----
Copyright 2018 - 2018 Shanghai Jiao Tong University, Machine Vision and Intelligence Group
'''
import cv2
from utils import *
import os.path
import copy
import numpy as np
class Tracker:
def __init__(self, link=100, drop=2.0, num=7, mag=30, match=0.2, orb=1):
# super parameters
# 1. look-ahead LINK_LEN frames to find tracked human bbox
# 2. bbox_IoU(deepmatching), bbox_IoU(general), pose_IoU(deepmatching), pose_IoU(general), box1_score, box2_score
# 3. bbox_IoU(deepmatching), bbox_IoU(general), pose_IoU(deepmatching), pose_IoU(general), box1_score, box2_score(Non DeepMatching)
# 4. drop low-score(<DROP) keypoints
# 5. pick high-score(top NUM) keypoints when computing pose_IOU
# 6. box width/height around keypoint for computing pose IoU
# 7. match threshold in Hungarian Matching
# 9. use orb matching or not
self.lastframe = None
self.link_len = link
self.weights = [1,2,1,2,0,0]
self.weights_fff = [0,1,0,1,0,0]
self.drop = drop
self.num = num
self.mag = mag
self.match_thres = match
self.use_orb = orb
self.max_pid_id = 0
self.total_track = {}
def track(self, frame, notrack):
track = {}
# json file without tracking information
# Note: time is a little long, so it is better to uncomment the following save operation at first time
for imgpath in sorted(notrack.keys()):
vname, fname = os.path.split(imgpath)
track[vname] = {}
if not vname in self.total_track:
self.total_track[vname]= {}
track[vname][fname] = {'num_boxes':len(notrack[imgpath])}
for bid in range(len(notrack[imgpath])):
track[vname][fname][bid+1] = {}
track[vname][fname][bid+1]['box_score'] = notrack[imgpath][bid]['score']
track[vname][fname][bid+1]['box_pos'] = self.get_box(notrack[imgpath][bid]['keypoints'], frame)
track[vname][fname][bid+1]['box_pose_pos'] = np.array(notrack[imgpath][bid]['keypoints']).reshape(-1,3)[:,0:2]
track[vname][fname][bid+1]['box_pose_score'] = np.array(notrack[imgpath][bid]['keypoints']).reshape(-1,3)[:,-1]
self.total_track[vname][fname] = {}
self.total_track[vname][fname] = track[vname][fname]
track_single, track = track, self.total_track
if self.lastframe is None:
self.lastframe = frame
return None, None
# tracking process
for video_name in track_single.keys():
frame_list = sorted(list(track[video_name].keys()))
t_idx = len(frame_list) - 2
frame_name = frame_list[t_idx]
for idx, frame_name in [[t_idx, frame_name]]:
next_frame_name = frame_list[idx+1]
# init tracking info of the first frame in one video
if idx == 0:
for pid in range(1, track[video_name][frame_name]['num_boxes']+1):
track[video_name][frame_name][pid]['new_pid'] = pid
track[video_name][frame_name][pid]['match_score'] = 0
max_pid_id = max(self.max_pid_id, track[video_name][frame_name]['num_boxes'])
img1 = self.lastframe
img2 = frame
self.lastframe = frame
ret = []
# Initiate ORB detector
#todo neatures=10000
orb = cv2.ORB_create(nfeatures=500, scoreType=cv2.ORB_FAST_SCORE)
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
if len(kp1) * len(kp2) < 400:
height, width, channels = img1.shape
for x in range(width):
for y in range(height):
ret.append([x, y, x, y, 1.0])
return np.array(ret)
# FLANN parameters
FLANN_INDEX_LSH = 6
index_params = dict(algorithm=FLANN_INDEX_LSH,
table_number=12, # 12
key_size=12, # 20
multi_probe_level=2) # 2
#todo checks=100
search_params = dict(checks=100) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# ratio test as per Lowe's paper
for i, m_n in enumerate(matches):
if len(m_n) != 2:
continue
elif m_n[0].distance < 0.80 * m_n[1].distance:
ret.append([kp1[m_n[0].queryIdx].pt[0], kp1[m_n[0].queryIdx].pt[0], kp2[m_n[0].trainIdx].pt[0],
kp2[m_n[0].trainIdx].pt[1], m_n[0].distance])
if len(ret) < 50:
height, width, channels = img1.shape
ret = []
for x in range(width):
for y in range(height):
ret.append([x, y, x, y, 1.0])
all_cors = np.array(ret)
# if there is no people in this frame, then copy the info from former frame
if track[video_name][next_frame_name]['num_boxes'] == 0:
track[video_name][next_frame_name] = copy.deepcopy(track[video_name][frame_name])
continue
cur_all_pids, cur_all_pids_fff = stack_all_pids(track[video_name], frame_list[:-1], idx, max_pid_id, self.link_len)
match_indexes, match_scores = best_matching_hungarian(
all_cors, cur_all_pids, cur_all_pids_fff, track[video_name][next_frame_name], self.weights, self.weights_fff, self.num, self.mag)
for pid1, pid2 in match_indexes:
if match_scores[pid1][pid2] > self.match_thres:
track[video_name][next_frame_name][pid2+1]['new_pid'] = cur_all_pids[pid1]['new_pid']
max_pid_id = max(max_pid_id, track[video_name][next_frame_name][pid2+1]['new_pid'])
track[video_name][next_frame_name][pid2+1]['match_score'] = match_scores[pid1][pid2]
# add the untracked new person
for next_pid in range(1, track[video_name][next_frame_name]['num_boxes'] + 1):
if 'new_pid' not in track[video_name][next_frame_name][next_pid]:
max_pid_id += 1
track[video_name][next_frame_name][next_pid]['new_pid'] = max_pid_id
track[video_name][next_frame_name][next_pid]['match_score'] = 0
self.max_pid_id = max_pid_id
if (len(frame_list)) > self.link_len:
del track[video_name][frame_list[0]]
# return track
return copy.deepcopy(track[video_name][frame_name]), img1
def get_box(self, pose, img):
img_height, img_width, _ = img.shape
pose = np.array(pose).reshape(-1, 3)
x = pose[:, 0]
y = pose[:, 1]
xmin = np.min(x[x > 0])
xmax = np.max(pose[:, 0])
ymin = np.min(y[y > 0])
ymax = np.max(pose[:, 1])
return expand_bbox(xmin, xmax, ymin, ymax, img_width, img_height)
def wrapped_track(self, frame, keypoints, scores, video_name, ordinal):
notrack = {}
imgpath = "%s/%08d.jpg" % (video_name, ordinal)
notrack[imgpath] = []
for i in range(len(keypoints)):
notrack[imgpath].append({"keypoints": keypoints[i], "score": scores[i]})
# track, last_frame = self.track(frame, notrack)
# if track is not None:
# frame_info = copy.deepcopy(track[video_name][list(track[video_name].keys())[0]])
# del frame_info["num_boxes"]
frame_info, last_frame = self.track(frame, notrack)
if frame_info is not None:
del frame_info["num_boxes"]
return sorted(list(frame_info.values()), key=lambda x: x["new_pid"]), last_frame
else:
return None,None
|
16,214 | ce6fbc081120f35ed8f7d122bfbf1ef8a9e0d78f | from rest_framework import serializers
from .models import Comment
from django.contrib.auth.models import User
class OwnerSerializer(serializers.RelatedField):
def to_representation(self, value):
return {'id':value.id, 'username': value.username}
class CommentSerializer(serializers.HyperlinkedModelSerializer):
owner = OwnerSerializer(read_only=True)
class Meta:
model = Comment
fields = ('id', 'text', 'owner', 'target')
read_only_fields = ('target',) |
16,215 | cebf6d0793ef564fd8647f8917b6cc278e0ff2bb | import multiprocessing
import os
import sys
import time
import webbrowser
from flask import Flask
client_id = 'c895de4e2dde4f32886ec383d6f39bd8'
redirect_uri = 'http://localhost:8642/'
config = {'client_id': client_id,
'redirect_uri': redirect_uri}
app = Flask(__name__)
@app.route('/', methods=['GET'])
def oauth_redirect():
# shoutout to http://stackoverflow.com/a/7866932/213000
return (
'<script type="text/javascript">'
'var access_token = window.location.href.split("access_token=")[1];'
'window.location = "/" + access_token;'
'</script>')
@app.route('/<access_token>/', methods=['GET'])
def get_access_token_from_response(access_token):
access_token_file = 'access_token.txt'
with open(access_token_file, 'w') as f:
f.write(access_token)
return ('Your Instagram access token is now stored within %s. You can '
'return to the shell now.' % os.path.abspath(access_token_file))
def handle_oauth_flow():
app_kwargs = {'port': 8642, 'debug': True}
server = multiprocessing.Process(target=app.run, kwargs=app_kwargs)
server.start()
browser = webbrowser.get()
instagram_auth_url = ('https://instagram.com/oauth/authorize/'
'?client_id=%(client_id)s&'
'redirect_uri=%(redirect_uri)s&'
'response_type=token' % config)
browser.open_new_tab(instagram_auth_url)
while True:
if os.path.exists('./access_token.txt'):
server.terminate()
print 'restart please :)'
sys.exit()
time.sleep(1)
def get_access_token():
access_token_file = './access_token.txt'
try:
with open(access_token_file, 'r') as f:
access_token = f.readline()
except IOError:
handle_oauth_flow()
else:
return access_token
|
16,216 | 83e486e38d2e3808685f1c245671bf3603c45254 | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
rows = [21, 7, 9, 12, 2, 10, 3, 27]
cols = [1, 4, 17, 20, 22, 16, 8, 25]
heart = [
[1, 0, 0, 1, 1, 0, 0, 1],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 0, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]
]
R = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]
]
S = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 0, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]
]
P = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]
]
def init():
for pin in rows:
GPIO.setup(pin, GPIO.OUT, initial=0)
for pin in cols:
GPIO.setup(pin, GPIO.OUT, initial=1)
init()
num = int(input('enter number: '))
try:
while True:
if num == 1:
for row in range(8):
init()
GPIO.output(rows[row], 1)
for col in range(8):
GPIO.output(cols[col], R[row][col])
elif num == 2:
for row in range(8):
init()
GPIO.output(rows[row], 1)
for col in range(8):
GPIO.output(cols[col], S[row][col])
elif num == 3:
for row in range(8):
init()
GPIO.output(rows[row], 1)
for col in range(8):
GPIO.output(cols[col], P[row][col])
except KeyboardInterrupt:
GPIO.cleanup()
|
16,217 | c2baa8dc5f47ce63a143b65cf864399a8db890b3 | from django.shortcuts import render
from django.http import HttpResponse,JsonResponse,FileResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from colcon.models import Profile,UserDetail,ProfileSerializer,Post,Channel,PostSerializer,Comments,ChannelRequests,Report
from django.core.exceptions import ObjectDoesNotExist
import math, random
from django.core.mail import send_mail
from django.conf import settings
import datetime
#utilities
keys_dict = {}
logins = {}
import pusher
pusher_client = pusher.Pusher(
app_id='933316',
key='f259e37a3a90ae0ee98e',
secret='74b90e129d3ab0cafec1',
cluster='ap2',
ssl=True
)
# pusher_client.trigger('my-channel', 'my-event', {'message': 'hello rishab'})
def get_token(user):
from uuid import uuid4
auth = uuid4().__str__()
keys_dict.update({auth:user})
logins.update({user.username:auth})
return auth
def generateOTP():
digits = "0123456789"
OTP = ""
for i in range(4):
OTP += digits[math.floor(random.random() * 10)]
return OTP
def forgot_email(receiver,msg = ''):
subject = 'Otp to change password'
email_from = settings.EMAIL_HOST_USER
recipient_list = [receiver,]
send_mail( subject, msg, email_from, recipient_list )
def activate_email(receiver,msg = ''):
subject = 'Link to Activate Account'
email_from = settings.EMAIL_HOST_USER
recipient_list = [receiver,]
send_mail( subject, msg, email_from, recipient_list )
def email(receiver,msg = '',sub = ''):
email_from = settings.EMAIL_HOST_USER
send_mail( sub, msg, email_from, receiver )
def encrypt(msg):
msg = str(msg)
temp = ''
for x in msg:
temp += chr(ord(x)+3)
return temp[::-1]
def decrypt(msg):
msg = msg[::-1]
temp = ''
for x in msg:
temp += chr(ord(x)-3)
return temp
####################################################################################################################################################################################################################################################################################################################################
#Views
@csrf_exempt
def login(req):
try:
if req.method != 'POST':
return HttpResponse(status = 405)
id = req.POST.get("id")
pwd = req.POST.get("pwd")
if not (id and pwd):
return HttpResponse(status= 400)
user = authenticate(username=req.POST.get("id"), password=req.POST.get("pwd"))
if user:
temp = Profile.objects.get(user = user)
if not temp.activated:
import socket
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
print("Your Computer Name is:" + hostname)
print("Your Computer IP Address is:" + IPAddr)
token = encrypt(user.username)
print(temp.email)
activate_email(str(temp.email),'http://'+IPAddr+':8000/colcon/activate/'+token)
return HttpResponse(status = 403)
if user.username in logins:
del keys_dict[logins[user.username]]
auth = get_token(user)
userdetails = UserDetail.objects.get(idno = user.username)
profile = ProfileSerializer(temp)
data = {'id':userdetails.idno,'name':userdetails.name,'accounttype':userdetails.type,'email':userdetails.email,'image':profile.data['profilePicture']}
return JsonResponse({"msg":"login successful","auth":auth,"data":data},status=200)
else:
return HttpResponse(status = 401)
except Exception as e:
print(e)
return HttpResponse(status=500)
@csrf_exempt
def activate(req,id):
id1 = decrypt(id)
user = User.objects.get(username = id1)
temp = Profile.objects.get(user = user)
temp.activated = True
user.set_password(id)
temp.save()
return HttpResponse("<div><h1>Account Activated</h1><p>Your new password is : <b>"+id+"</b></p></div>")
@csrf_exempt
def forgot_password(req,id):
try:
if req.method != 'GET':
return HttpResponse(status = 405)
if not id :
return HttpResponse(status= 400)
user = User.objects.get(username=id)
email = Profile.objects.get(user = user).email
otp = generateOTP()
forgot_email(email,'OTP:'+otp)
return JsonResponse({'otp':otp},status=200)
except ObjectDoesNotExist :
return HttpResponse(status=204)
except Exception as e:
print(e)
return HttpResponse(status=500)
@csrf_exempt
def reset_password(req,id,pwd):
try:
if req.method != 'GET':
return HttpResponse(status = 405)
if not (id and pwd):
return HttpResponse(status= 400)
user=User.objects.get(username=id)
user.set_password(pwd)
user.save()
return HttpResponse(status=200)
except ObjectDoesNotExist :
return HttpResponse(status=204)
except Exception:
return HttpResponse(status=500)
@csrf_exempt
def logout(req):
try:
del logins[keys_dict[req.headers['Authorization']].username]
del keys_dict[req.headers['Authorization']]
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
@csrf_exempt
def profile_picture_upload(req):
try:
user = keys_dict[req.headers['Authorization']]
profile = Profile.objects.get(user=user)
profile.profilePicture.delete()
profile.profilePicture.save(user.username+'_'+str(datetime.datetime.now())+'_'+req.FILES['my_photo'].name,req.FILES['my_photo'])
return JsonResponse({'image':ProfileSerializer(profile).data['profilePicture']},status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def channel_list(req):
try:
if req.method != 'GET':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
profile = Profile.objects.get(user=user)
channels = profile.channels.all()
data = []
for x in channels:
temp = {'title':x.channel_name}
data.append(temp)
return JsonResponse({'data':data},status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def invitation_list(req):
try:
if req.method != 'GET':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
profile = Profile.objects.get(user=user)
channels = profile.invitations.all()
data = []
for x in channels:
temp = {'title':x.channel_name,'description':x.description}
data.append(temp)
return JsonResponse({'data':data},status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def process_invitation(req):
try:
if req.method != 'POST':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
profile = Profile.objects.get(user = user)
channel = Channel.objects.get(channel_name=req.POST.get('channel'))
profile.invitations.remove(channel)
if req.POST.get('accepted') == 'y':
profile.channels.add(channel)
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def add_post(req):
try:
if req.method != 'POST':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
profile = Profile.objects.get(user=user)
channel = profile.channels.get(channel_name = req.POST.get('channelName'))
post = Post()
post.posted_in = channel
post.posted_by = user
post.title = req.POST.get('title')
post.description = req.POST.get('description')
post.save()
if 'my_photo' in req.FILES:
post.image.save(user.username+'_'+str(datetime.datetime.now())+'_'+req.FILES['my_photo'].name,req.FILES['my_photo'])
if 'my_file' in req.FILES:
post.files.save(user.username + '_' + str(datetime.datetime.now()) + '_' + req.FILES['my_file'].name,req.FILES['my_file'])
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def get_posts(req,channel_name):
try:
if req.method != 'GET':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
channel = Channel.objects.get(channel_name = channel_name)
posts = Post.objects.filter( posted_in= channel)
data = []
for x in posts:
temp = PostSerializer(x).data
temp.update({'by':UserDetail.objects.get(idno = x.posted_by.username).name})
data.append(temp)
return JsonResponse({'data':data},status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def add_comment(req):
try:
if req.method != 'POST':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
post = Post.objects.get(id = req.POST.get('id'))
comment = Comments()
comment.comment = req.POST.get('comment')
comment.commented_by = user
comment.commented_post = post
comment.save()
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def get_comments(req,postid):
try:
if req.method != 'GET':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
post = Post.objects.get( id = postid)
comments = Comments.objects.filter(commented_post = post)
data = []
for x in comments:
profile = Profile.objects.get(user=x.commented_by)
temp = {'title':UserDetail.objects.get(idno = x.commented_by.username).name,'description':x.comment,'image':ProfileSerializer(profile).data['profilePicture']}
data.append(temp)
return JsonResponse({'data':data},status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def add_channel_request(req):
try:
if req.method != 'POST':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
cr = ChannelRequests()
cr.by = user
cr.name = req.POST.get('name')
cr.description = req.POST.get('description')
cr.type = req.POST.get('type')
cr.save()
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def add_complaint(req):
try:
if req.method != 'POST':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
cr = Report()
cr.reported_by = user
cr.channel = req.POST.get('name')
cr.person = req.POST.get('idp')
cr.complaint = req.POST.get('issue')
cr.save()
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def add_people(req):
try:
if req.method != 'POST':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
users_list = req.POST.getlist('users')
channel = Channel.objects.get(channel_name = req.POST.get('channel'))
email_set = set()
for x in users_list:
temp_user = User.objects.get(username = x)
temp_profile = Profile.objects.get(user = temp_user)
temp_profile.invitations.add(channel)
email_set.add(temp_profile.email)
email(list(email_set), 'you are invited to join ' + channel.channel_name, 'Invitation')
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def get_people(req,type,dept,year,sec):
try:
if req.method != 'GET':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
if type == 0:
temp = UserDetail.objects.filter(type = 'F',dept = dept)
else:
temp = UserDetail.objects.filter(type = 'S',dept = dept,year = year,sec = sec)
data = []
for x in temp:
obj = {
'name' : x.name,
'id':x.idno,
'checked':False,
}
data.append(obj)
return JsonResponse({'data':data},status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def delete_channel(req,channel):
try:
if req.method != 'GET':
return HttpResponse(status=405)
if channel in {'College','Library','Placement'}:
raise Exception
temp = Channel.objects.get(channel_name= channel)
temp.delete()
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
@csrf_exempt
def change_pwd(req):
try:
if req.method != 'POST':
return HttpResponse(status=405)
user = keys_dict[req.headers['Authorization']]
pwd = req.POST.get('pwd')
user.set_password(pwd)
user.save()
return HttpResponse(status=200)
except KeyError:
return HttpResponse(status=404)
except Exception as e:
print(type(e),e)
return HttpResponse(status=500)
|
16,218 | 3f88a8b0692d939e2c7a715b5a65d53593dd9c83 |
class Solution:
def numDecodings(self, s: str) -> int:
"""
https://leetcode.com/problems/decode-ways-ii/
A message containing letters from A-Z is being encoded to numbers using the following mapping way:
'A' -> 1
'B' -> 2
...
'Z' -> 26
Beyond that, now the encoded string can also contain the character '*', which can be treated as one of the numbers from 1 to 9.
Given the encoded message containing digits and the character '*', return the total number of ways to decode it.
Also, since the answer may be very large, you should return the output mod 109 + 7.
Parameters
----------
s: str
Returns
-------
int
Examples
--------
>>> sol = Solution()
>>> sol.numDecodings('*')
9
>>> sol.numDecodings('1*')
18
Notes
-----
References
---------
.. [1] https://leetcode.com/problems/decode-ways-ii/discuss/105262/Python-6-lines-DP-solution
"""
ones = {str(k): 1 for k in range(1, 10)}
ones.update({'0': 0, '*': 9})
twos = {str(k): 1 for k in range(10, 27)}
twos.update({'*'+str(k): 2 if k <= 6 else 1 for k in range(10)})
twos.update({'1*': 9, '2*': 6, '**': 15})
pre, cur = 1, ones.get(s[:1], 0)
for i in range(1, len(s)):
pre, cur = cur, (ones.get(s[i], 0)*cur + twos.get(s[i-1:i+1], 0)*pre) % 1000000007
return cur
|
16,219 | c28cba14dc1355f203b609bcae7a4bd291fdedad | execfile('')
from boto.s3.connection import S3Connection
s3 = S3Connection()
bucket = s3.get_bucket('sgcs15spproj6tokyo')
from boto.dynamodb2.table import Table
ngram = Table('prjsixresult') # using table name
for s3object in bucket.list():
if 'output' in s3object.key and 'part-' in s3object.key: # select input file
content = s3object.get_contents_as_string()
for each_line in content.split('\n'): # read content
if each_line.strip() != "": # do not read empty content
words, counts = each_line.split('\t')
ngram.put_item(data={'words':words, 'counts': counts}) # put the data
|
16,220 | 9240bd828ae23621d704952283e4f35148e21815 | from django.conf.urls.defaults import *
from django.views.generic import ListView, DetailView
from stet.models import Article
urlpatterns = patterns('',
(r'^$', ListView.as_view(
model=Article,
)),
(r'^(?P<pk>\d+)$', DetailView.as_view(
model=Article,
)),
(r'^comments/', include('django.contrib.comments.urls')),
)
|
16,221 | dd0c2770779507917271b3e47643dd8a077d5073 | def f(a):
s = ""
i = 0
while(i < len(a) - 1):
s = s + a[i + 1]
i = i + 1
return s
def g(a, b):
if(b == 0):
return a
return f(a) + a[0]
s = "0123456789"
i = 0
while(i < 7):
j = 0
while(j < 2):
s = g(s, 1)
j = j + 1
s = g(s, 9)
i = i + 1
print(s)
|
16,222 | 82b78b238c59e785018d03db5b02ee511c4b9e40 | from quiz.models import Quiz
from rest_framework import serializers
from .create_quiz import QuestionSerializer
class QuizRetrieveSerializer(serializers.ModelSerializer):
questions = QuestionSerializer(many=True, source='question_set')
class Meta:
model = Quiz
fields = ('id', 'questions')
|
16,223 | 6eb27d2be931c57bf459d26e1d01cb94db065892 | # import the necessary packages
from __future__ import print_function
import argparse
import app_logic as logic
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--display", type=int, default=-1,
help="Whether or not frames should be displayed")
args = vars(ap.parse_args())
if args["display"] > 0:
logic.display()
|
16,224 | 4a9ea518153ff2c678c62da0e60893ec2df0b723 | #!/usr/bin/python3
from flask import Flask
from datetime import datetime, timedelta
from threading import Thread
from os import system
from random import randint
last_load = datetime.now()
def grim_reaper():
'''
If site not loaded for 10s reboot host
reboot can be prevented by calling life_line()
'''
s10 = timedelta(seconds=10)
while True:
if (last_load + s10) < datetime.now():
system("reboot")
app = Flask(__name__)
@app.route("/")
def life_line():
'''
Save the site from grim_reaper() for 10s
'''
global last_load
last_load = datetime.now()
return("thank you for saving me for another 10 sec")
if __name__ == "__main__":
t = Thread(target=grim_reaper)
t.start()
app.run(host="0.0.0.0", port=randint(1024,50000), debug=False) |
16,225 | 81d0448232a293483e6a2966b8a84e05e0be8db0 | import discord
from discord.ext import commands
import json
class Ping:
conf = {}
def __init__(self, bot, config):
self.bot = bot
self.config = config
global conf
conf = config
@commands.command(pass_context=True)
@commands.guild_only()
@commands.cooldown(1, 30, commands.BucketType.user)
async def ping(self, ctx):
return await ctx.send("Pong !!")
@commands.command(pass_context=True)
@commands.guild_only()
@commands.cooldown(1, 30, commands.BucketType.user)
async def pong(self, ctx):
return await ctx.send("Ping")
def setup(bot):
bot.add_cog(Ping(bot, bot.config))
|
16,226 | 8eea6b90d7f523ab13fb6b3e64049caba87205e9 | from django.contrib import admin
from cinema.models import Hall
from cinema.models import Movie
from cinema.models import Session
from cinema.models import TicketStatus
from cinema.models import Ticket
admin.site.register(Hall)
admin.site.register(Movie)
admin.site.register(Session)
admin.site.register(TicketStatus)
admin.site.register(Ticket)
|
16,227 | ada2d6893864df2465083eb31616f240061a7590 | # -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, Table
from sqlalchemy import Boolean, DateTime, Date
from sqlalchemy import Table, UniqueConstraint
from sqlalchemy.orm import relation
from sqlalchemy.schema import MetaData
from sqlalchemy.ext.declarative import declarative_base
from mage.sqla import *
from insanities.ext.auth import encrypt_password, check_password
metadata = MetaData()
MapedObject = declarative_base(metadata=metadata, name='MapedObject')
class User(MapedObject):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False, default='')
login = Column(String(255), nullable=False, unique=True)
password = Column(String(255), nullable=False)
email = Column(String(255), nullable=False, unique=True)
projects = relation('Project', secondary='users_projects', backref='users')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.login)
def __repr__(self):
return self.email.encode('utf-8')
def set_password(self, password):
self.password = encrypt_password(password)
def check_password(self, password):
return check_password(password, self.password)
@classmethod
def by_credential(cls, env, login, password):
user = env.db.get(cls, login=login)
if user and check_password(password, user.password):
return user.id, None
return None, u'Неправильный логин или пароль'
@classmethod
def by_id(cls, env, user_id):
return env.db.get(cls, id=user_id)
class Project(MapedObject):
__tablename__ = 'project'
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False, unique=True)
description = Column(String(1000))
users_projects = Table('users_projects', metadata,
Column('user_id', ForeignKey(User.id), nullable=False),
Column('proj_id', ForeignKey(Project.id), nullable=False),
UniqueConstraint('user_id', 'proj_id')
)
class Issue(MapedObject):
__tablename__ = 'issue'
id = Column(Integer, primary_key=True)
done = Column(Boolean, nullable=False, default=False)
created = Column(DateTime, nullable=False, default=datetime.now)
deadline = Column(Date)
title = Column(String(500), nullable=False)
proj_id = Column(Integer, ForeignKey(Project.id), nullable=False)
proj = relation(Project, backref='issues')
author_id = Column(Integer, ForeignKey(User.id), nullable=False)
author = relation(User, primaryjoin=User.id==author_id)
executor_id = Column(Integer, ForeignKey(User.id), nullable=False)
executor = relation(User, primaryjoin=User.id==executor_id)
class Comment(MapedObject):
__tablename__ = 'comment'
id = Column(Integer, primary_key=True)
issue_id = Column(Integer, ForeignKey(Issue.id), nullable=False)
issue = relation(Issue, backref='comments')
raw = Column(String(1000), nullable=False)
html = Column(String(1000), nullable=False)
author_id = Column(Integer, ForeignKey(User.id), nullable=False)
author = relation(User)
|
16,228 | 740dd474cf8405d11f9f95f608c2f329d0c04bd7 | import os
import urllib
import re
import random
#import md5
import hashlib
from amilib.useful import Singleton
from amilib.amiweb.amiweb import session
from amilib.amiweb.amidb import IntegrityError
from amilib.template import render
from amilib.amiweb.amigration import AMigrationControl, Not_Created
from amilib import json
from skeletonz.modules.plugin import GenericPlugin
from skeletonz.mylib import html_helpers, converters
from skeletonz.server import getConfig, getRSSManager, plugin_configurator, getRootController, getFormatManager, getMailManager
from skeletonz.model import CMSModel
from model import BlogModel, BlogLabelModel, BlogCommentModel
import model
from skeletonz.Site import adminPermission, editPermission
PLUGINS_FOR_EXPORT = ['Blog']
#--- Constants ----------------------------------------------
GENERIC_POST_LINK = 'blog/viewEntry/%s'
GENERIC_LABEL_LINK = 'blog/viewLabelPosts/%s'
GENERIC_ARCHIVE_LINK = 'blog/viewArchive/%s-%s'
GENERIC_DELETE_LINK = 'blog/deleteComment?id=%s'
class Blog(GenericPlugin):
NAME = "Blog plugin"
DESCRIPTION = "Adds a blog...!"
SYNTAX = [
{'handler': 'blog',
'required_arguments': {'ident': 'The identification'},
'optional_arguments': {'with_blog_info': {'type': 'option', 'help': 'Shows labels and archive'}}
},
{'handler': 'blog_labels',
'required_arguments': {'ident': 'The identification'},
'optional_arguments': {}
},
{'handler': 'blog_archive',
'required_arguments': {'ident': 'The identification'},
'optional_arguments': {}
},
{'handler': 'blog_rsslink',
'required_arguments': {'ident': 'The identification'},
'optional_arguments': {}
},
]
def __init__(self):
self.mi_control = AMigrationControl("Blog",
[model.Blog_Initial, model.Blog_MoveBlogOut,
model.Blog_AddComments, model.Blog_RewriteLabels,
model.Blog_MoveHostPage], plugin_configurator)
format_man = getFormatManager()
format_man.registerSLPlugin('blog', self.handleBlog)
format_man.registerSLPlugin('blog_labels', self.handleBlogLabels)
format_man.registerSLPlugin('blog_archive', self.handleBlogArchive)
format_man.registerSLPlugin('blog_rsslink', self.handleBlogRssLink)
def createStructure(self):
self.mi_control.upgradeToLatest()
def dropStructure(self):
self.mi_control.downgradeTo(Not_Created)
def addToController(self, rc):
path = "%s/site_plugins/blog/static" % os.getcwd()
rc.addStaticPath("/static_plugin/blog/", path)
rc.root_obj.blog = BlogController
rc.root_obj.plugin.blog = BlogController
def _addTemplate(self, template):
s = open("site_plugins/blog/static/style.css").read()
#template.getHeader().appendStyleData('<style type="text/css">%s</style>' % s)
s = open("site_plugins/blog/static/script_public.js").read()
#template.getHeader().appendScriptData('<script type="text/javascript">%s</script>' % s)
def addToSiteTemplate(self, template, on_init):
self._addTemplate(template)
def addToSiteEditTemplate(self, template, on_init):
s = open("site_plugins/blog/static/script.js").read()
template.getHeader().appendStaticScriptData(s)
self._addTemplate(template)
#--- Handlers ----------------------------------------------
def handleBlog(self, args, edit_mode, page_id):
ident = args.get('blog')
if ident:
result = []
if edit_mode:
result.append(self.renderManage(ident))
if args.get('with_blog_info', None) != None:
result.append(self.renderBlogInfo(ident, edit_mode))
result.append(self.renderContent(ident, edit_mode))
return True, "".join(result)
def handleBlogLabels(self, args, edit_mode, page_id):
ident = args.get('blog_labels')
if ident:
return True, BlogInfo().getLabelList(ident, '')
def handleBlogArchive(self, args, edit_mode, page_id):
ident = args.get('blog_archive')
if ident:
return True, BlogInfo().getArchiveList(ident, '')
def handleBlogRssLink(self, args, edit_mode, page_id):
ident = args.get('blog_rsslink')
if ident:
return False, BlogInfo().getRSSLink(ident)
#--- Renders ----------------------------------------------
def renderManage(self, ident):
btn_add_post = html_helpers.createActionLink("Add post", "static_plugin/blog/add.png", "return Blog.viewAdd('%s', this);" % ident)
btn_manage_label = html_helpers.createActionLink("Labels", "static_plugin/blog/label_manage.png", "return Blog.viewLabelManager('%s', this);" % ident)
btn_manage_comments = html_helpers.createActionLink("Comments", "static_plugin/blog/comment_manage.png", "return Blog.viewCommentManager('%s', this);" % ident)
ns = {
'btn_add_post': btn_add_post,
'btn_manage_label': btn_manage_label,
'btn_manage_comments': btn_manage_comments,
'ident': ident
}
return render("site_plugins/blog/view/manage.tmpl", ns)
def renderBlogInfo(self, ident, edit_mode):
return BlogInfo().render(ident, edit_mode)
def renderContent(self, ident, edit_mode):
posts = BlogModel.getAllPosts(ident, 15)
ns = {
'ident': ident,
'posts': posts,
'edit_mode': edit_mode,
'renderPost': renderPost
}
return render("site_plugins/blog/view/items.tmpl", ns)
#--- Render for blog info ----------------------------------------------
class BlogInfo(Singleton):
def getLabelList(self, ident, cls):
blog = BlogModel.getBlogByIdent(ident)
if blog:
link = '<a href="%s">%s</a>' % (GENERIC_LABEL_LINK, '%s')
labels = ['<li>%s</li>' % (link % (l.id, l.name)) for l in blog.getLabels()]
return '<ul class="%s">\n%s\n</ul>' % (cls, '\n'.join(labels))
else:
return None
def getArchiveList(self, ident, cls):
blog = BlogModel.getBlogByIdent(ident)
if blog:
link = '<a href="%s">%s</a>' % (GENERIC_ARCHIVE_LINK % (blog.id, '%s'), '%s')
years = ['<li>Posts in %s</li>' % (link % (y, y)) for y in blog.getArchiveYears()]
return '<ul class="%s">\n%s\n</ul>' % (cls, '\n'.join(years))
else:
return None
def getRSSLink(self, ident):
rss_link = "%s/rss/blog_%s.xml" % (getConfig().BASE_URL, converters.makeLinkAble(ident))
return """<a href="%s"><img src="static_core/images/rss.gif" alt="RSS blog feed" /></a>""" %\
(rss_link)
def render(self, ident, edit_mode):
labels = self.getLabelList(ident, 'CMS_BlogInfo_Labels')
archive = self.getArchiveList(ident, 'CMS_BlogInfo_Archive')
rss_link = self.getRSSLink(ident)
if labels:
return """
<div class="CMS_BlogInfo">
<div class="head">Blog info</div>
<div class="caption">Labels:</div>
<div> %s </div>
<div class="caption">Archive:</div>
<div> %s </div>
<div class="caption"> %s </div>
</div>
""" % (labels, archive, rss_link)
else:
return ''
#--- Renders for comment and post ----------------------------------------------
def renderPostComment(post_id):
ns = {'post_id': post_id}
return render("site_plugins/blog/view/post_comment.tmpl", ns)
def renderComment(comment, is_last, edit_mode):
def postAuthor(author, website):
if website:
return '<a href="%s" target="_blank">%s</a>' % (website, author)
return author
if edit_mode:
btn_edit_comment = html_helpers.createActionLink("Edit comment", "static_core/images/edit.png", "return Blog.editComment('%s', this);", tooltip_inner=True)
btn_del_comment = html_helpers.createActionLink("Delete comment", "static_core/images/trash.png", "return Blog.deleteComment('%s', this);", tooltip_inner=True, confirm='Are you sure you want to delete?')
else:
btn_del_comment = ''
btn_edit_comment = ''
def amiformat(cnt, var):
script = re.compile('<(/?script)>', re.IGNORECASE)
cnt = script.sub(r'<\1>', cnt)
return getFormatManager().noPluginFormat(cnt, var)
ns = {'comment': comment,
'postAuthor': postAuthor,
'btn_edit_comment': btn_edit_comment,
'btn_del_comment': btn_del_comment,
'amiformat': amiformat,
'is_last': is_last,
'edit_mode': edit_mode}
return render("site_plugins/blog/view/comment.tmpl", ns)
def renderPost(post, edit_mode, is_permanent=False):
page_obj = CMSModel.Pages.getPageById(post.getHostPage())
def cmsRender(text):
return getFormatManager().htmlFormat(text, edit_mode, False, page_obj)
#Buttons
if edit_mode:
btn_del = html_helpers.createActionLink("Delete post", "static_core/images/trash.png", "return Blog.del(this, '%s');" % post.getIdent(), tooltip_inner=True, confirm='Are you sure you want to delete?')
btn_edit = html_helpers.createActionLink("Edit post", "static_core/images/edit.png", "return Blog.viewEdit('%s', this);" % post.getIdent(), tooltip_inner=True)
btn_published = html_helpers.createActionLink("Published", "static_core/images/%(image)s", "return Blog.publish(this, '%s');" % post.getIdent(), tooltip_inner=True)
d = post.published and {'image': 'on.png'} or {'image': 'off.png'}
btn_published = btn_published % d
else:
btn_del = ''
btn_edit = ''
btn_published = ''
ns = {
'post': post,
'btn_delete': btn_del,
'btn_edit': btn_edit,
'btn_published': btn_published,
'cmsRender': cmsRender,
'comments': BlogCommentModel.getAll(post.id),
'edit_mode': edit_mode,
'is_permanent': is_permanent,
'post_comment_div': renderPostComment(post.id),
'GENERIC_POST_LINK': GENERIC_POST_LINK,
'GENERIC_LABEL_LINK': GENERIC_LABEL_LINK,
'renderComment': renderComment
}
if post.published or edit_mode:
#Check and see if the template has a inject method
site_obj = getRootController().root_obj
if getattr(site_obj.template, 'plugin_blog_renderPost', False):
return site_obj.template.plugin_blog_renderPost(ns)
else:
return render("site_plugins/blog/view/item.tmpl", ns)
else:
return 'No post found.'
#--- Controller ----------------------------------------------
from skeletonz import Site
from skeletonz.modules.template import PluginTemplate
from amilib.amiweb import amiweb
from skeletonz.mylib.amicache import AmiCache
class LimitedDictionary(dict):
size = 50
key_list = []
def __setitem__(self, name, value):
self.key_list.append(name)
self._clean_up()
dict.__setitem__(self, name, value)
def _clean_up(self):
while len(self.key_list) > self.size:
name = self.key_list.pop(0)
del self[name]
class BlogController:
def __init__(self):
template = PluginTemplate("blog")
self.template = template
self.obj_blog = Blog()
self.url_cache = LimitedDictionary()
@amiweb.expose
def previewComment(self):
ns = {
'template': self.template
}
return render("site_plugins/blog/view/preview_post.tmpl", ns)
@amiweb.expose
def renderComment(self, content):
return getFormatManager().noPluginFormat(content, True)
@amiweb.expose
@editPermission
def viewAdd(self, ident):
ns = {'template': self.template,
'ident': ident,
'title': '',
'content': '',
'hasLabel': lambda id: False,
'action': 'add',
'submit_value': 'Add post',
'labels': BlogLabelModel.getAllByIdent(ident)
}
return render("site_plugins/blog/view/manage_post.tmpl", ns)
@amiweb.expose
@amiweb.customHandler
def viewEntry(self, path_info, formvars):
post_id = path_info.split("/")[-1]
if amiweb.request()['QUERY_STRING']:
post_id = amiweb.request()['QUERY_STRING'].split('=')[-1]
post = BlogModel.getPostById(post_id)
edit_mode = False
if Site.Users.isLoggedIn():
edit_mode = True
kw = {}
if post:
blog = BlogModel.getBlogById(post.blog_id)
page = CMSModel.Pages.getPageById(blog.host_page)
content = '<div id="Blog_%s" post_id="%s">%s</div>' %\
(post.getIdent(), post.id, renderPost(post, edit_mode, True))
kw['content'] = content
kw['title'] = post.title
kw['host_page'] = blog.host_page
kw['id'] = "blogpost_%s" % post_id
kw['hidden'] = page.hidden
kw['premission_type'] = page.premission_type
else:
kw['title'] = 'Not found'
kw['content'] = 'Not found'
kw['host_page'] = 1
kw['id'] = None
kw['hidden'] = False
kw['premission_type'] = 'Everyone'
kw['edit_mode'] = edit_mode
page_obj = Site.PageDeco(kw)
return page_obj.servePage()
@amiweb.expose
@editPermission
def viewEdit(self, id):
post = BlogModel.getPostById(id)
ns = {'template': self.template,
'ident': id,
'title': post.title,
'content': post.content,
'hasLabel': post.hasLabel,
'action': 'update',
'submit_value': 'Save changes',
'labels': BlogLabelModel.getAllByIdent(post.getIdent())
}
return render("site_plugins/blog/view/manage_post.tmpl", ns)
@amiweb.expose
@editPermission
def getHTMLData(self, ident):
return self.obj_blog.renderContent(ident, True)
@amiweb.expose
@editPermission
def getPostHTMLData(self, id, is_permanent):
if is_permanent == 'True':
is_permanent = True
else:
is_permanent = False
post = BlogModel.getPostById(id)
return renderPost(post, True, is_permanent)
@amiweb.expose
@editPermission
def add(self, ident, title, content, labels):
AmiCache.expireCurrentPage()
BlogModel.add(ident, title, content, labels)
return ident
@amiweb.expose
@editPermission
def update(self, ident, title, content, labels):
id = ident
self._expireCache(id)
BlogModel.update(id, title, content, labels)
p = BlogModel.getPostById(id)
return '%s' % p.id
@amiweb.expose
@editPermission
def delete(self, id):
self._expireCache(id)
BlogModel.delete(id)
return 'ok'
@amiweb.expose
@editPermission
def flipPublish(self, id):
self._expireCache(id)
return str(BlogModel.flipPublish(id))
def _expireCache(self, post_id):
post = BlogModel.getPostById(post_id)
blog = post.getBlog()
AmiCache.expireCache(post.getHostPage())
AmiCache.expireCache('blogpost_%s' % post_id)
for l in post.getLabels():
AmiCache.expireCache('bloglabel_%s' % l.id)
for y in blog.getArchiveYears():
AmiCache.expireCache('blogarchive_%s-%s' % (blog.id, y))
##
# Labels
#
@amiweb.expose
@amiweb.customHandler
def viewLabelPosts(self, path_info, formvars):
label_id = path_info.split('/')[-1]
kw = {}
label = BlogLabelModel.getById(label_id)
posts = label.getPosts()
blog = BlogModel.getBlogById(label.blog_id)
title = '%s posts posted under label "%s"' % (len(posts), label.name)
ns = {
'posts': posts,
'amiformat': getFormatManager().noPluginFormat,
'title': title,
'GENERIC_POST_LINK': GENERIC_POST_LINK
}
kw['content'] = render('site_plugins/blog/view/viewPosts.tmpl', ns)
kw['title'] = title
kw['host_page'] = blog.host_page
kw['id'] = "bloglabel_%s" % label_id
kw['hidden'] = False
kw['edit_mode'] = False
kw['premission_type'] = 'Everyone'
page_obj = Site.PageDeco(kw)
return page_obj.servePage()
@amiweb.expose
@editPermission
def viewLabelManager(self, ident):
ns = {
'ident': ident,
'template': self.template,
'labels': BlogLabelModel.getAllByIdent(ident)
}
return render("site_plugins/blog/view/label_manager.tmpl", ns)
@amiweb.expose
@editPermission
def viewCommentManager(self, ident):
ns = {
'ident': ident,
'template': self.template,
'comments': BlogCommentModel.getAllByIdent(ident)
}
return render("site_plugins/blog/view/comment_manager.tmpl", ns)
@amiweb.expose
@editPermission
def labelAdd(self, ident, name):
blog = BlogModel.getBlogByIdent(ident)
if not blog:
blog = BlogModel.addBlog(ident)
try:
return BlogLabelModel.add(blog.id, name).toJSON()
except IntegrityError:
raise amiweb.AppError('A label is already found with the same name.')
@amiweb.expose
@editPermission
def labelDelete(self, id, ident):
return BlogLabelModel.delete(id)
@amiweb.expose
@editPermission
def labelUpdate(self, id, name):
AmiCache.expireCurrentPage()
try:
return BlogLabelModel.update(id, name).toJSON()
except IntegrityError:
raise amiweb.AppError('A label is already found with the same name.')
#--- Archive ----------------------------------------------
@amiweb.expose
@amiweb.customHandler
def viewArchive(self, path_info, formvars):
blogid_year = path_info.split('/')[-1]
sp = blogid_year.split('-')
blog_id = sp[0]
year = int(sp[1])
kw = {}
blog = BlogModel.getBlogById(blog_id)
posts = blog.getPostsByYear(year)
title = 'Post archive for year %s' % year
ns = {
'posts': posts,
'amiformat': getFormatManager().noPluginFormat,
'title': title,
'GENERIC_POST_LINK': GENERIC_POST_LINK
}
kw['content'] = render('site_plugins/blog/view/viewPosts.tmpl', ns)
kw['title'] = title
kw['host_page'] = blog.host_page
kw['id'] = "blogarchive_%s" % blogid_year
kw['hidden'] = False
kw['edit_mode'] = False
kw['premission_type'] = 'Everyone'
page_obj = Site.PageDeco(kw)
return page_obj.servePage()
#--- Comments ----------------------------------------------
def _expireCommentCache(self, id):
comment = BlogCommentModel.getById(id)
self._expireCache(comment.post_id)
def _getCommentURL(self, post_id, cmnt_id):
url = "%s/%s#%s" % (getConfig().BASE_URL, GENERIC_POST_LINK % post_id, cmnt_id)
return url
def _getDeleteURL(self, cmnt_id):
url = "%s/%s" % (getConfig().BASE_URL, GENERIC_DELETE_LINK % cmnt_id)
return url
@amiweb.expose
def viewComment(self, id):
comment = BlogCommentModel.getById(id)
post = comment.getPost()
url = self._getCommentURL(post.id, id)
raise amiweb.HTTPFound(url)
#--- Captcha ----------------------------------------------
re_all_img = re.compile('<img.+?>')
re_img_src = re.compile('src="(.+?)"')
def _getPictures(self, type, count):
page = random.randint(0, 35)
if type == 'birds':
url = 'http://www.flickr.com/groups/beautifulbirdscom/pool/page%s' % page
elif type == 'cats':
url = 'http://www.flickr.com/photos/tags/kitty/clusters/cat-kitten-cute/page%s' % page
else:
raise Exception('Invalid type arugment, should be "birds" or "cats", you gave %s' % type)
result = []
html = urllib.urlopen(url).read()
img_tags = list(self.re_all_img.finditer(html))
random.shuffle(img_tags)
for img_tag in img_tags:
img_tag = img_tag.group(0)
if 'class="pc_img"' in img_tag:
result.append( self.re_img_src.search(img_tag).group(1) )
if len(result) >= count:
break
return result
@amiweb.expose
def getCaptchaHTML(self):
matches = self._getPictures('birds', 5)
cats = self._getPictures('cats', 1)
session()['captcha_current_url'] = cats[0]
matches.extend(cats)
random.shuffle(matches)
form_html = []
form_html.append('<form><table>')
li_item = '<td><input type="radio" name="c_match" value="%s" id="%s" />'\
'<label for="%s"><img src="%s" /></label></td>'
for i in range(0, len(matches), 2):
img_1 = matches[i]
img_2 = matches[i+1]
form_html.append('<tr>')
id = 'item_%s' % i
id2 = 'item_%s' % (i+1)
form_html.append(li_item % (img_1, id, id, img_1))
form_html.append(li_item % (img_2, id2, id2, img_2))
form_html.append('</tr>')
form_html.append('</table></form>')
return ' '.join(form_html)
@amiweb.expose
def validateCaptcha(self, url, content):
cur_url = session().get('captcha_current_url')
if url == cur_url:
m = md5.new()
m.update(content)
session()['captcha_ok_for'] = m.hexdigest()
return 'ok'
#Only one guess
if cur_url:
del session()['captcha_current_url']
return 'error'
@amiweb.expose
def showCaptcha(self):
ns = {
'template': self.template
}
return render("site_plugins/blog/view/show_captcha.tmpl", ns)
urls = re.compile('https?://[^\s?]+')
def checkURLSpam(self, author, content):
for m in self.urls.finditer(content):
url = m.group(0)
url_key = 'af_url_%s_%s' % (hash(author), hash(url))
count = self.url_cache.get(url_key, 0)
if count >= 2:
raise SpamComment()
if url_key not in self.url_cache:
self.url_cache[url_key] = 0
self.url_cache[url_key] += 1
#--- Post and edit ----------------------------------------------
@amiweb.expose
def addComment(self, author, email, website, content, post_id):
#Check captcha
m = md5.new()
m.update(content)
hash_val = m.hexdigest()
if hash_val != session().get('captcha_ok_for'):
raise Exception('Wrong captcha check')
else:
del session()['captcha_ok_for']
if website != '' and website.find("http://") != 0:
website = "http://%s" % (website)
#Strip scripts, style and meta
content = re.sub('<(script|meta|style)(.|\s)*?>(.|\s)*?</(script|meta|style)>', '', content)
#Check for same URL's posted
try:
self.checkURLSpam(author, content)
except SpamComment:
return '<p><b style="background-color: #ffffcc; color: red">Your comment was marked as spam.</b>, but will be readded if it isn\'t.</p>'
self._expireCache(post_id)
id = BlogCommentModel.add(author, email, website, content, post_id)
email_data = {
'title': 'An comment has been posted',
'author': author,
'email': email,
'website': website,
'content': content,
'delete_link': self._getDeleteURL(id),
'post_id': self._getCommentURL(post_id, id)
}
#Send a notification email
if hasattr(getConfig(), 'DEFAULT_EMAIL'):
text = """%(title)s
Author: %(author)s
Email: %(email)s
Website: %(website)s
Post link: %(post_id)s
Content:
%(content)s
Delete link: %(delete_link)s
""" % email_data
mail = getConfig().DEFAULT_EMAIL
getMailManager().sendEmail(mail, [mail], '[Skeletonz] %s' % email_data['title'], text)
if id:
return renderComment(BlogCommentModel.getById(id), True, False)
else:
return '<p><b style="background-color: #ffffcc; color: red">Your comment was marked as spam.</b>, but will be readded if it isn\'t.</p>'
@amiweb.expose
@editPermission
def deleteComment(self, id, ident=""):
c = BlogCommentModel.getById(id)
self._expireCommentCache(id)
#BlogCommentModel.delete(id)
return c.post_id
@amiweb.expose
@editPermission
def deleteComments(self, ids, ident=""):
ids = json.read(ids)
if len(ids) > 0:
first_id = ids[0]
c = BlogCommentModel.getById(first_id)
self._expireCommentCache(first_id)
for id in ids:
BlogCommentModel.delete(id)
return 'ok'
@amiweb.expose
@editPermission
def fetchMore(self, last_id, ident=''):
last_id = long(last_id)
list = BlogCommentModel.getAllByIdent(ident, after_id=last_id)
jsons = [ i.toJSON() for i in list ]
return '[%s]' % (','.join(jsons))
@amiweb.expose
def getCommentContent(self, id):
comment = BlogCommentModel.getById(id)
return comment.content
@amiweb.expose
@editPermission
def updateComment(self, id, content):
BlogCommentModel.update(id, content)
self._expireCommentCache(id)
return getFormatManager().noPluginFormat(content, True)
BlogController = BlogController()
class SpamComment(Exception):
pass
|
16,229 | 91aea36cb3ae003842d923d03cb5c68149c74474 | # Las Diccionary son con llaves y separados los valores por comas
# Permite modificar
# Permite ordenado
# No permite duplicados
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
print(thisdict)
print(thisdict["brand"]) |
16,230 | e890e13019c4038d8777a8bc8a876fd07ca8ec0d | import sys
# 숫자의 합
def sum_digits(number):
digit = [int(x) for x in str(number)]
result = sum(digit)
return result
# main
if __name__ == '__main__':
N = int(sys.stdin.readline().rstrip()) # 숫자의 개수
number = int(sys.stdin.readline().rstrip())
answer = sum_digits(number)
print(answer) |
16,231 | 01f6588a8ba17abb5346d5738cae3ef960106c19 | import mysql.connector
config = {
'user': 'root',
'password': 'eabsen.kalselprov.go.id',
'host': 'localhost',
'database': 'data_finger',
'raise_on_warnings': True,
}
#TEST ACCOUNT
# config = {
# 'user': 'root',
# 'password': '123456',
# 'host': 'localhost',
# 'database': 'data_finger',
# 'raise_on_warnings': True,
# }
SQL_SYNTAX = {
'ADDPEGAWAI' : 'INSERT INTO pegawai (user_pin2, user_name, mac_) VALUES (%s, %s, %s)',
'ADDADMIN' : 'INSERT INTO pegawaiAdmin (user_pin2, user_name, mac_) VALUES (%s, %s, %s)',
'ADDMAC' : 'INSERT INTO macaddress (mac_) VALUES (%s)',
'ADDATTENDANCE' : 'INSERT INTO attendance (user_pin, mac_) VALUES (%s, %s)',
'ADDVERSION' : 'INSERT INTO version (version) VALUES (%s)',
'CHECKATTENDANCE' : 'SELECT COUNT(*) FROM attendance WHERE mac_ = (%s)',
'CHECKPEGAWAI' : 'SELECT COUNT(*) FROM pegawai WHERE mac_ = (%s)',
'CHECKADMIN' : 'SELECT COUNT(*) FROM pegawaiAdmin WHERE mac_ = (%s)',
'CHECKMAC' : 'SELECT COUNT(*) FROM macaddress',
'CHECKALLATTENDANCE' : 'SELECT COUNT(*) FROM attendance',
'CHECKALLADMIN' : 'SELECT COUNT(*) FROM pegawaiAdmin',
'CHECKALLPEGAWAI' : 'SELECT COUNT(*) FROM pegawai',
'CHECKVERSION' : 'SELECT version FROM version',
'DELETEMAC' : 'DELETE FROM macaddress WHERE mac_ = (%s)',
'DELETEPEGAWAI' : 'DELETE FROM pegawai WHERE user_pin2 = (%s) AND mac_ = (%s)',
'DELETEPEGAWAIID' : 'DELETE FROM pegawai WHERE id = (%s) AND mac_ = (%s)',
'DELETEADMIN' : 'DELETE FROM pegawaiAdmin WHERE user_pin2 = (%s) AND mac_ = (%s)',
'DELETEADMINID' : 'DELETE FROM pegawaiAdmin WHERE id = (%s) AND mac_ = (%s)',
'DELETEATTENDANCE' : 'DELETE FROM attendance WHERE mac_ = (%s)',
'FINDMAC' : 'SELECT mac_ FROM macaddress WHERE mac_ = (%s)',
'FINDALLMAC' : 'SELECT mac_ FROM macaddress',
'FINDALLADMIN' : 'SELECT user_pin2 FROM pegawaiAdmin WHERE mac_ = (%s)',
'FINDADMIN' : 'SELECT user_pin2 FROM pegawaiAdmin WHERE user_pin2 = (%s) AND mac_ = (%s)',
'FINDPEGAWAI' : 'SELECT user_pin2 FROM pegawai WHERE user_pin2 = (%s) AND mac_ = (%s)',
'FINDPEGAWAIALL' : 'SELECT * FROM pegawai WHERE user_pin2 = (%s) AND mac_ = (%s)',
'FINDADMINALL' : 'SELECT * FROM pegawaiAdmin WHERE user_pin2 = (%s) AND mac_ = (%s)',
'FINDALLPEGAWAI' : 'SELECT user_pin2 FROM pegawai WHERE mac_ = (%s)',
'UPDATEVERSION' : 'UPDATE version SET version = %s',
'TRUNCATE' : 'TRUNCATE TABLE attendance'
}
class Localhost:
def __init__ (self):
self.cnx = mysql.connector.connect(**config)
self.cursor = self.cnx.cursor(buffered=True)
self.cnx.commit()
def hapussemua(self):
self.cursor.execute('TRUNCATE TABLE attendance')
self.cursor.execute('TRUNCATE TABLE macaddress')
self.cursor.execute('TRUNCATE TABLE pegawai')
self.cursor.execute('TRUNCATE TABLE pegawaiAdmin')
self.cnx.commit()
if self.ceksemuaabsensi() is 0 and self.ceksemuaadmin() is 0 and self.ceksemuapegawai() is 0 and self.cekjumlahmac() is 0 :
return True
else:
return False
def cekversion(self, version):
self.cursor.execute(SQL_SYNTAX['CHECKVERSION'])
try:
versionsekarang = self.cursor.fetchone()[0]
if versionsekarang == version:
return False
else:
return True
except TypeError as err:
return True
def ambilversion(self):
self.cursor.execute(SQL_SYNTAX['CHECKVERSION'])
try:
versionsekarang = self.cursor.fetchone()[0]
return versionsekarang
except IndexError as err:
pass
except TypeError as err:
pass
def updateversion(self, version):
self.cursor.execute(SQL_SYNTAX['UPDATEVERSION'], (version,))
self.cnx.commit()
def tambahversion(self, version):
self.cursor.execute(SQL_SYNTAX['ADDVERSION'], (version,))
self.cnx.commit()
#MacAddress
def hapusmac(self, mac):
self.cursor.execute(SQL_SYNTAX['DELETEMAC'], (mac,))
self.cnx.commit()
def cekkesemuamac(self):
self.cursor.execute(SQL_SYNTAX['FINDALLMAC'])
data = self.cursor.fetchall()
return data
def macterdaftar(self, mac):
self.cursor.execute(SQL_SYNTAX['FINDMAC'], (mac,))
try:
macterdaftar = self.cursor.fetchone()[0]
if macterdaftar == mac:
return True
else :
return False
except TypeError as err:
return False
except IndexError as err:
return False
def daftarmac(self, mac):
if not self.macterdaftar(mac) :
self.cursor.execute(SQL_SYNTAX['ADDMAC'], (mac,))
self.cnx.commit()
return True
else:
return False
def cekjumlahmac(self):
self.cursor.execute(SQL_SYNTAX['CHECKMAC'])
try:
return self.cursor.fetchone()[0]
except TypeError as err:
pass
except IndexError as err:
pass
except ValueError as err:
pass
#Pegawai
def ceksemuapegawai(self):
self.cursor.execute(SQL_SYNTAX['CHECKALLPEGAWAI'])
try:
return self.cursor.fetchone()[0]
except TypeError as err:
pass
def cekjumlahpegawai(self, mac):
self.cursor.execute(SQL_SYNTAX['CHECKPEGAWAI'], (mac,))
try:
return self.cursor.fetchone()[0]
except TypeError as err:
pass
def cekpegawai (self, pegawaiid, mac):
self.cursor.execute(SQL_SYNTAX['FINDPEGAWAI'], (pegawaiid, mac,))
try:
pegawai = self.cursor.fetchone()
if str(pegawai[0]) == str(pegawaiid):
return True
else:
return False
except TypeError as err:
return False
def carisemuapegawai(self, mac):
self.cursor.execute(SQL_SYNTAX['FINDALLPEGAWAI'], (mac,))
data = self.cursor.fetchall()
return data
def hapuspegawai(self, pegawaiid, mac):
self.cursor.execute(SQL_SYNTAX['DELETEPEGAWAI'], (pegawaiid, mac,))
self.cnx.commit()
if not Localhost().cekpegawai(pegawaiid, mac):
return True
else:
return False
def hapuspegawaiid(self, idlocal, mac):
self.cursor.execute(SQL_SYNTAX['DELETEPEGAWAIID'], (idlocal, mac,))
self.cnx.commit()
def normalizelocalhostpegawai (self, pegawaiid, mac) :
self.cursor.execute(SQL_SYNTAX['FINDPEGAWAIALL'], (pegawaiid, mac,))
data = self.cursor.fetchall()
return data
def daftarpegawai(self, pegawaiid, nama, mac):
while True:
if not Localhost().cekpegawai(pegawaiid, mac): #Cek Pegawai Jika Sudah Ada Maka Tidak Didaftarkan
self.cursor.execute(SQL_SYNTAX['ADDPEGAWAI'], (pegawaiid, nama, mac,))
self.cnx.commit()
if Localhost().cekpegawai(pegawaiid, mac):
return True
else:
return False
else:
return False
#admin
def ceksemuaadmin(self):
self.cursor.execute(SQL_SYNTAX['CHECKALLADMIN'])
try:
return self.cursor.fetchone()[0]
except TypeError as err:
pass
def cekjumlahadmin(self, mac):
self.cursor.execute(SQL_SYNTAX['CHECKADMIN'], (mac,))
try:
return self.cursor.fetchone()[0]
except TypeError as err:
pass
def cekadmin (self, pegawaiid, mac):
self.cursor.execute(SQL_SYNTAX['FINDADMIN'], (pegawaiid, mac,))
try:
admin = self.cursor.fetchone()
if str(admin[0]) == str(pegawaiid):
return True
else:
return False
except TypeError as err:
return False
def hapusadmin(self, pegawaiid, mac):
self.cursor.execute(SQL_SYNTAX['DELETEADMIN'], (pegawaiid, mac,))
self.cnx.commit()
if not Localhost().cekpegawai(pegawaiid, mac):
return True
else:
return False
def hapusadminid(self, idlocal, mac):
self.cursor.execute(SQL_SYNTAX['DELETEADMINID'], (idlocal, mac,))
self.cnx.commit()
def normalizelocalhostadmin (self, pegawaiid, mac) :
self.cursor.execute(SQL_SYNTAX['FINDADMINALL'], (pegawaiid, mac,))
data = self.cursor.fetchall()
return data
def daftaradmin(self, pegawaiid, nama, mac):
while True:
if not Localhost().cekadmin(pegawaiid, mac): #Cek Pegawai Jika Sudah Ada Maka Tidak Didaftarkan
self.cursor.execute(SQL_SYNTAX['ADDADMIN'], (pegawaiid, nama, mac,))
self.cnx.commit()
return True
else:
return False
def carisemuaadmin(self, mac):
self.cursor.execute(SQL_SYNTAX['FINDALLADMIN'], (mac,))
data = self.cursor.fetchall()
return data
#Data Absensi
def ceksemuaabsensi(self):
self.cursor.execute(SQL_SYNTAX['CHECKALLATTENDANCE'])
try:
return self.cursor.fetchone()[0]
except TypeError as err:
pass
except IndexError as err:
pass
def cekjumlahabsensi(self, mac):
self.cursor.execute(SQL_SYNTAX['CHECKATTENDANCE'], (mac,))
try:
return self.cursor.fetchone()[0]
except TypeError as err:
pass
except IndexError as err:
pass
def inputdataabsensi(self, pegawaiid, mac):
self.cursor.execute(SQL_SYNTAX['ADDATTENDANCE'], (pegawaiid, mac,))
self.cnx.commit()
def cleardataabsensi(self):
self.cursor.execute(SQL_SYNTAX['TRUNCATE'])
self.cnx.commit()
def hapusdataabsensi(self, mac):
self.cursor.execute(SQL_SYNTAX['DELETEATTENDANCE'], (mac,))
self.cnx.commit()
# print Localhost().daftarpegawai(1, 'qwerty', '00:17:61:11:6A:C2')
# print Localhost().cekjumlahmac()
# for x in range (0, 1500):
# print Localhost().normalizelocalhost(x,'00:17:61:11:6A:C2')
# print Localhost().cekkesemuamac('00:17:61:11:6a:c3')
# print Localhost().hapussemua()
|
16,232 | 44170d18478edcff93a806f619e8967c1939b2d8 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import RequestHandler,template
from google.appengine.api import channel
from google.appengine.ext import db
import uuid
import simplejson
import urllib2
import OpenTokSDK
OPENTOK_API_KEY = '4383481'
OPENTOK_API_SECRET = '2359bc5bdd56f90b4b6b24cd3442ffccddaad6c4'
class SessionEntry(db.Model):
token = db.StringProperty(required=True)
opentok_session_id = db.StringProperty()
class MainHandler(webapp.RequestHandler):
def get(self):
token = str(uuid.uuid4())[:8]
self.redirect("/w/" + token)
class WatchHandler(webapp.RequestHandler):
def get(self, token):
if not token:
self.redirect("/")
return
opentok_sdk = OpenTokSDK.OpenTokSDK(OPENTOK_API_KEY, OPENTOK_API_SECRET)
session_address = "yontage.appspot.com"
session_entry = db.GqlQuery("SELECT * FROM SessionEntry WHERE token = :1", token).fetch(1)
if session_entry == []:
opentok_session = opentok_sdk.create_session(session_address)
opentok_token = opentok_sdk.generate_token(opentok_session.session_id)
opentok_session_id = opentok_session.session_id
session_entry = SessionEntry(token=token, opentok_session_id=opentok_session_id)
session_entry.put()
else:
opentok_token = opentok_sdk.generate_token(session_entry[0].opentok_session_id)
opentok_session_id = session_entry[0].opentok_session_id
channel_token = channel.create_channel(token)
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(template.render('templates/index.html',
{
'channel_token': channel_token,
'token': token,
'opentok_session_id' :opentok_session_id,
'opentok_token' : opentok_token
}))
class QueryHandler(webapp.RequestHandler):
def get(self,channel_token,query):
if channel_token and query:
message = simplejson.dumps({
'query':urllib2.unquote(query)
})
channel.send_message(channel_token,message)
self.response.out.write("%s - %s" % (channel_token, message))
pass
def main():
application = webapp.WSGIApplication([
('/', MainHandler),
('/w/([^/]+)?',WatchHandler),
('/q/([^/]+)?/([^/]+)?', QueryHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
16,233 | 28d2af41cce5704fd7a838ad93a927903f46a745 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import common
from bs4 import BeautifulSoup
import mysqlop
class AnHui():
_url = 'www.ccgp-anhui.gov.cn'
_baseurl = 'http://www.ccgp-anhui.gov.cn/'
_posturl = '/mhxt/MhxtSearchBulletinController.zc?method=bulletinChannelRightDown'
_title = u'安徽省政府采购'
_createtable = 'create table if not exists anhui(' \
'id int not null PRIMARY KEY auto_increment,' \
'time date,' \
'title text,' \
'url text,' \
'reserved1 int,' \
'reserved2 text) DEFAULT CHARSET=utf8;'
_insertor = u"insert into anhui(time,title,url) " \
" VALUES('{0}','{1}','{2}');"
_header = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'text/html, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'http://www.ccgp-anhui.gov.cn/',
'Accept-Language': 'zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Content-Length': '145',
'Host': 'www.ccgp-anhui.gov.cn',
'Connection': 'Keep-Alive',
'Pragma': 'no-cache'}
_para = {'channelCode': 'sjcg',
'bType': '01',
'areaCode': '340000',
'type': '00',
'key': '',
'bStartDate': '',
'bEndDate': '',
'proType': '00',
'category': '',
'areaCodeName': '%E7%9C%81%E6%9C%AC%E7%BA%A7',
'pageNo': '1'}
# _refer = ''
# _host = ''
def __init__(self):
print 'Get Data from http://www.ccgp-anhui.gov.cn'
def title(self):
return self._title
def getContent(self):
cn = common.Common()
ret = []
try:
data = cn.post(self._url,self._posturl,self._para,self._header)
soup = BeautifulSoup(data, 'html5lib')
lis = soup.select(".column.infoLink.noBox.addState.addStateL.unitWidth_x6 li")
# print len(lis)
for li in lis:
title = li.a['title']
href = li.a['href']
time = li.span.text
url = self._baseurl + href
ret.append({'time': time, 'title': title, 'url': url})
except Exception as e:
print str(e)
return ret
def format(self, ret):
'''
格式化输出
:param ret:爬去的内容
:return:格式化后的输出
'''
str = ""
for i in ret:
str += u'时间:{0} <a href="{1}">{2}</a><br>'.format(i['time'], i['url'], i['title'])
return str
def save2mysql(self, data, dbname):
ret = True
try:
mo = mysqlop.MysqlOp()
mo.loadDefault()
mo.createdb(dbname)
mo.runNotQuery(self._createtable)
for dat in data:
sql = self._insertor.format(dat['time'], dat['title'], dat['url'])
mo.runNotQuery(sql)
except Exception as e:
ret = str(e)
return ret
if __name__ == '__main__':
ah = AnHui()
ret = ah.getContent()
print ah.format(ret)
ah.save2mysql(ret,"reptiles") |
16,234 | 76483ced2e9552445261a04ae68b50741ef069a6 | from __future__ import print_function
import ROOT,uproot
xml_files=[
'MC_QCD_Pt_1000to1400_2017v2_46.xml',
'MC_QCD_Pt_1000to1400_2017v2_48.xml',
'MC_QCD_Pt_1000to1400_2017v2_51.xml',
'MC_QCD_Pt_1000to1400_2017v2_52.xml',
'MC_QCD_Pt_1000to1400_2017v2_54.xml',
'MC_QCD_Pt_1000to1400_2017v2_57.xml',
'MC_QCD_Pt_1000to1400_2017v2_59.xml'
]
path = '/nfs/dust/cms/user/albrechs/UHH2/10_2_afs_installation/DazsleWorkdir/workdir_WMassDDTMaps/'
for xml in xml_files:
print('processing xml:', xml)
root_files=[]
with open(path+xml) as xml_file:
for l in xml_file:
if ".root" in l and 'pnfs' in l:
root_files.append(l.split('"')[1])
xml_file.close()
for root_file in root_files:
f = uproot.open(root_file)
tree = f['AnalysisTree']
try:
branch = tree['PFParticles.m_pt']
array = branch.array()
except:
print('root file',root_file,'is broken')
|
16,235 | 53ec0da22e1b9577a6751337e9937e2c82db165c | '''
Created on 2014/3/20
@author: Robert
'''
import wx
import wx.lib.newevent
import sys
import os
import threading
import logging
import time
####Thread event
myEVT_ThreadDone = wx.NewEventType()
EVT_ThreadDone = wx.PyEventBinder(myEVT_ThreadDone, 1)
myEVT_ThreadStart = wx.NewEventType()
EVT_ThreadStart = wx.PyEventBinder(myEVT_ThreadStart, 2)
class ThreadingManagement(object):
thrd = None
thrd_stop = True
thrd_stop_done = True
def __init__(self):
self.Bind(EVT_ThreadDone , self.OnThreadDone)
self.Bind(EVT_ThreadStart , self.OnThreadStart)
def ThreadStart( self , target = None, args = ()):
logging.debug("%s:ThreadStart" % self.__class__.__name__)
wx.PostEvent(wx.GetTopLevelParent(self), wx.PyCommandEvent(myEVT_ThreadStart, -1))
self.thrd_stop = False
self.thrd_stop_done = False
self.thrd = threading.Thread(target = target,args = args).start()
def OnThreadStart( self , event ):
logging.debug("%s:OnThreadStart" % self.__class__.__name__)
self.thrd_stop = False
self.thrd_stop_done = False
if(event):
event.Skip()
def ThreadDone( self ):
logging.debug("%s:ThreadDone" % self.__class__.__name__)
self.thrd_stop_done = True
wx.PostEvent(wx.GetTopLevelParent(self), wx.PyCommandEvent(myEVT_ThreadDone, -1))
logging.info("Done")
def OnThreadDone( self , event ):
self.thrd_stop_done = True
logging.debug("%s:OnThreadDone" % self.__class__.__name__)
if(event):
for child in self.GetChildren():
child.Enable(True)
#logging.info("Done")
event.Skip()
def ThreadStop( self ):
logging.debug("%s:ThreadStop" % self.__class__.__name__)
self.thrd_stop = True
busy = wx.BusyInfo("One moment please, waiting for stopping")
i = 0
while(not self.thrd_stop_done):
time.sleep(0.1)
i+=1
if(not i&0xF):
wx.Yield()
if __name__ == '__main__':
pass |
16,236 | 3492b074ba8cf0e0ec693763649b1c1884716f25 | from Tkinter import *
from threading import Thread
#from threading import Timer
from os.path import expanduser
import os
import time
import datetime
import tkFont
def recThread():
# os.system("sleep 1s;ffmpeg -f x11grab -s $(xdpyinfo | grep 'dimensions:' | awk '{print $2}' | cut -dx -f1)x$(xdpyinfo | grep 'dimensions:' | awk '{print $2}' | cut -dx -f2) -b:v 350k -r 15 -i :0.0 -q:v 3 -r 15 -y ~/Videos/$(date +%d%b_%Hh%Mm).avi &")
os.system("ffmpeg -y -f alsa -i pulse -f x11grab -framerate 30 -video_size $(xdpyinfo | grep 'dimensions:' | awk '{print $2}' | cut -dx -f1)x$(xdpyinfo | grep 'dimensions:' | awk '{print $2}' | cut -dx -f2) -i :0.0+0,0 -c:v libx264 -pix_fmt yuv420p -qp 0 -preset ultrafast ~/Videos/$(date +%d%b_%Hh%Mm).avi &")
#def rec_voice_Thread():
# os.system("arecord -D plughw:2 -r 15 ~/Videos/$(date +%d%b_%Hh%Mm).wav &")
# os.system("ffmpeg -f alsa -ac 2 -i pulse -acodec aac -strict experimental -ab 399k -y ~/Videos/$(date +%d%b_%Hh%Mm).aac &")
def rec():
global videoFile
mydate = datetime.datetime.now()
videoFile = mydate.strftime("%d%b_%Hh%Mm.avi")
pathSt=os.getcwd()+"/Videos/"
l['text']=os.path.expanduser('~')+"/Videos/"
l1['text']=videoFile
b.config(state=DISABLED)
b1.config(state=ACTIVE)
t = Thread(target=recThread)
# t1 = Thread(target=rec_voice_Thread)
t.start()
# t1.start()
global count_flag, secs, mins
count_flag = True
secs=0
mins=0
while True:
if count_flag == False:
break
label['text'] = str("%02dm:%02ds" % (mins,secs))
if secs == 0:
time.sleep(0)
else:
time.sleep(1)
if(mins==0 and secs==1):
b1.config(bg="red")
b.config(fg="white")
b.config(bg="white")
if secs==60:
secs=0
mins+=1
label['text'] = str("%02dm:%02ds" % (mins,secs))
root.update()
secs = secs+1
def stop():
b.config(state=ACTIVE)
b1.config(state=DISABLED)
b1.config(fg="white")
b1.config(bg="white")
b.config(fg="white")
b.config(bg="green")
global count_flag
count_flag = False
os.system("pkill -n ffmpeg")
# os.system("pkill -n ffmpeg")
try:
# t1.stop()
t.stop()
except:
print("")
root = Tk()
fontTime = tkFont.Font(family="Helvetica", size=12)
fontButton = tkFont.Font(family="Monospace", size=11,weight="bold")
label = Label(root, text="00m:00s",fg="blue",font="fontTime")
b = Button(root,text="Record",command=rec,state=ACTIVE,bg="green",font="fontButton")
b1 = Button(root,text=" Stop ",command=stop,state=DISABLED,bg="white",font="fontButton")
l = Label(root, text="")
l1 = Label(root, text="")
label.grid(row=0, column=0, columnspan=2)
b.grid(row=1, column=0, padx=1, pady=5)
b1.grid(row=1, column=1, padx=1)
l.grid(row=2, column=0,columnspan=2)
l1.grid(row=3, column=0,columnspan=2)
root.minsize(160,105)
root.maxsize(160,105)
root.title("Desktop REC")
root.attributes("-topmost", 1)
root.mainloop()
'''
ffmpeg -y -f alsa -i #hw:2# pulse -f x11grab -framerate 30 -video_size 2560x1440 -i :0.0+0,0 -c:v libx264 -pix_fmt yuv420p -qp 0 -preset ultrafast ~/Videos/$(date +%d%b_%Hh%Mm).avi ## ok for single screen
'''
|
16,237 | 0cfaeada5b6c464e833fd34f80b00d4a27ae26bf | # sample usage
# python zip_file_password_cracker.py -f archive.zip -d dictionary.txt
import zipfile
import optparse
import os
from threading import Thread
def extract_file(archive, password):
try:
archive.extractall(pwd=password)
print '[+] Password = ' + password + '\n'
return
except:
pass
def main():
parser = optparse.OptionParser("usage " + os.path.basename(__file__) + " -f <zipfile> -d <dictionary")
parser.add_option('-f', dest='archive', type='string', help='specify zip file')
parser.add_option('-d', dest='dictionary', type='string', help='specify dictionary file')
(options, args) = parser.parse_args()
if options.archive is None or options.dictionary is None:
print parser.usage
exit(0)
else:
archive = options.archive
dictionary = options.dictionary
archive_file = zipfile.ZipFile(archive)
dictionary_file = open(dictionary)
for line in dictionary_file.readlines():
password = line.strip('\n')
t = Thread(target=extract_file, args=(archive_file, password))
t.start()
guess = extract_file(archive, password)
if guess:
exit(0)
if __name__ == '__main__':
main()
|
16,238 | a1fce043bc1950020d778777d38e04691225e0ba | """
Description: Get GPS coordinates from labels (classification output)
Author: Iva
Date: 11/2/2016
Python version: 2.7.10 (venv2)
"""
from __future__ import division
import math
import numpy as np
from get_data.map_coverage import MercatorProjection, G_Point, G_LatLng
koef=1
MERCATOR_RANGE = 256
ZOOM_LEVEL = 16+koef
PIXELperLABEL = 4
IMAGE_SIZE= 300*koef*2
SINGLE_SIZE = 24*koef
LABEL_SIZE = 1+ (IMAGE_SIZE-SINGLE_SIZE)/PIXELperLABEL # = 1+69*koef
def labels_suspect(labels, pixels = PIXELperLABEL, treshold=.9):
n = np.sqrt(np.prod(labels.shape)).astype('int')
ind = local_max(labels, n) > treshold
grid = np.round((np.array(range(n-2))-(n-3)/2.) * pixels)
xc = np.array([grid for i in range(n-2)])
yc = np.transpose(xc)
return xc[ind].ravel(), yc[ind].ravel()
def local_max(labels, n):
x = labels.reshape(n,n)
z = (x[2:n,:] + x[1:(n-1),:] + x[0:(n-2),:])/3
z = (z[:,2:n] + z[:,1:(n-1)] + z[:,0:(n-2)])/3
x[1:(n-1),1:(n-1)] += np.random.randn(n-2,n-2)*z/1000000
y = np.amax([(x[2:n,:]), (x[1:(n-1),:]), (x[0:(n-2),:])], axis=0)
y = np.amax([(y[:,2:n]), (y[:,1:(n-1)]), (y[:,0:(n-2)])], axis=0)
res = (y==x[1:(n-1),1:(n-1)]).astype('int')*z # (z+x[1:(n-1),1:(n-1)])/2
return res
def labels_GPS(labels, center_gps, pixels = PIXELperLABEL, zoom = ZOOM_LEVEL):
scale = 2.**zoom
proj = MercatorProjection()
centerPx = proj.fromLatLngToPoint(G_LatLng(center_gps[1], center_gps[0]))
suspect_x, suspect_y = labels_suspect(labels, pixels)
np_gps = np.zeros((len(suspect_x),2))
for i in range(len(suspect_x)):
suspectPX = G_Point(centerPx.x+suspect_x[i]/scale, centerPx.y+suspect_y[i]/scale)
suspect_gps = proj.fromPointToLatLng(suspectPX)
np_gps[i,] = [suspect_gps.lng, suspect_gps.lat]
return np_gps
def labels_GPS_list(labels, coords, pixels = PIXELperLABEL, zoom = ZOOM_LEVEL):
suspect = np.zeros((0,2))
for j in range(coords.shape[0]):
new_gps = labels_GPS(labels=labels[j,].reshape(LABEL_SIZE,LABEL_SIZE), center_gps= coords[j,:].ravel(), pixels = pixels, zoom = zoom)
if new_gps.shape[0]>0:
suspect = np.concatenate([suspect, new_gps], axis=0)
return suspect
def testrun(i=3):
labels = np.genfromtxt('tmp_images/assemble_y.csv', delimiter=',', skip_header= False).astype(dtype='float32')
est1 = np.genfromtxt('tmp_images/assemble1.csv', delimiter=',', skip_header= False).astype(dtype='float32')
est4 = np.genfromtxt('tmp_images/assemble4.csv', delimiter=',', skip_header= False).astype(dtype='float32')
ids = np.genfromtxt('tmp_images/assemble_ids.csv', delimiter=',', skip_header= False).astype(dtype='float32')
coords = np.genfromtxt('RGBprofiles/RGB_coords.csv', delimiter=',', skip_header= False)
mypath='coord_lists/click/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
onlyind = [int(f[2:4])-1 for f in onlyfiles]
num_sites = np.zeros((61, 4))
for i in range(len(onlyfiles)):
click = np.genfromtxt(mypath+onlyfiles[i], delimiter=',', skip_header= True, dtype='uint16')
num_sites[i,0] = click.shape[0]
tr=np.zeros((10,4))
tr[:,0] = 1 - (.02*np.array(range(10)))
for k in range(10):
t=tr[k,0]
for i in range(61):
x1,x2 = labels_suspect(labels[i,], treshold=t)
num_sites[i,1] = x1.shape[0]
x1,x2 = labels_suspect(est1[i,], treshold=t)
num_sites[i,2] = x1.shape[0]
x1,x2 = labels_suspect(est4[i,], treshold=t)
num_sites[i,3] = x1.shape[0]
for j in range(1,4):
tr[k,j] = np.sqrt(np.sum((num_sites[:,j] - num_sites[:,0])**2)/61)
print(tr)
suspect_gps = labels_GPS(labels=labels[i,:], center_gps= ids[i,1:3].ravel()) # gps array = [longitude is greenwitch, latitude is around 50]
return(suspect_gps)
def snap2grid(obj_gps, zoom = ZOOM_LEVEL, img_size = IMAGE_SIZE, overlap=.1):
img_size = img_size - img_size*overlap
scale = 2.**zoom
proj = MercatorProjection()
objPx = proj.fromLatLngToPoint(G_LatLng(obj_gps[1], obj_gps[0]))
def myround(num):
return np.round(num*scale/img_size)*img_size/scale
centerPx = G_Point( myround(objPx.x), myround(objPx.y))
center_gps = proj.fromPointToLatLng(centerPx)
return [np.round(center_gps.lng,6), np.round(center_gps.lat,6)]
def unique_rows(a):
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def snap_list(inlist, zoom=ZOOM_LEVEL, img_size = IMAGE_SIZE, overlap=.1):
outlist=inlist
for i in range(len(inlist)):
outlist[i,]=snap2grid(inlist[i,], zoom=zoom, img_size=img_size, overlap=overlap)
return unique_rows(outlist)
def snap_square(long_range = [-3.3,-3], lat_range= [51,51.3], zoom = ZOOM_LEVEL, img_size = IMAGE_SIZE, overlap=.1):
DEFAULT_LAT_DIFFERENCE_AT_19 = 0.0008#0.0010
DEFAULT_LONG_DIFFERENCE_AT_19 = 0.0010#0.0015
long_dense_grid = np.arange(long_range[0], long_range[1],DEFAULT_LONG_DIFFERENCE_AT_19)
lat_dense_grid = np.arange(lat_range[0], lat_range[1],DEFAULT_LAT_DIFFERENCE_AT_19)
inlist = np.array([[lng, lat] for lng in long_dense_grid for lat in lat_dense_grid])
return snap_list(inlist, zoom=zoom, img_size=img_size, overlap=overlap)
#t=snap_square()
|
16,239 | 2871e79970f9ea9ff018e905144f19a5331e0356 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-09-27 18:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wally_trips', '0002_colors'),
]
operations = [
migrations.AddField(
model_name='colors',
name='incidents',
field=models.IntegerField(default=30),
preserve_default=False,
),
]
|
16,240 | 26372433b841211fd74ff5a95b44550bd23b7714 | from django.urls import path
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required, permission_required
from .views import ItemView, CategoriaView, EncargadosListView, ItemDetailView, EncargadoDetailView
app_name = 'inventario'
urlpatterns = [
path(
'',
TemplateView.as_view(template_name='inventario/index.html'),
name='index'
),
path(
'items/',
login_required(ItemView.as_view()),
name='items'
),
path(
'categorias/',
permission_required('inventario.add_categoria')(CategoriaView.as_view()),
name='categorias'
),
path(
'encargados/',
EncargadosListView.as_view(),
name='encargados'
),
path(
'items/detalle/<slug:slug>/',
ItemDetailView.as_view(),
name='detalle_item'
),
path(
'encargados/detalle/<slug:slug>/',
EncargadoDetailView.as_view(),
name='detalle_encargado'
),
]
|
16,241 | 4614e4b3457d613cbdbbdeec1dce6fb39acf90de | from Tests.test_crud import test_add_cheltuiala,test_delete_cheltuiala
from Tests.test_operatiuni import test_stergere_cheltuieli,test_adaugare_valoare_pt_o_data
from Tests.test_undo_redo import test_undo
def run_all_tests():
test_add_cheltuiala()
test_delete_cheltuiala()
test_stergere_cheltuieli()
test_adaugare_valoare_pt_o_data()
#test_undo() |
16,242 | 8ccc9a4806393d28e22d48f5dd1d46edb7404645 | class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
res = ""
carry = 0
if len(num1) <= len(num2):
for i in range(1,len(num1)+1):
carry, num = divmod(int(num1[-i])+int(num2[-i])+carry,10)
num = str(num)
res = num + res
for j in range(len(num1)+1,len(num2)+1):
carry, num = divmod(int(num2[-j])+carry,10)
num = str(num)
res = num + res
if carry != 0:
res = str(carry) + res
return res
else:
for i in range(1,len(num2)+1):
carry, num = divmod(int(num1[-i])+int(num2[-i])+carry,10)
num = str(num)
res = num + res
for j in range(len(num2)+1,len(num1)+1):
carry, num = divmod(int(num1[-j])+carry,10)
num = str(num)
res = num + res
if carry != 0:
res = str(carry) + res
return res |
16,243 | 877e36467e440186f8891279de63d1c5093d011e | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit test dummies.
"""
from Acquisition import Implicit
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from OFS.event import ObjectWillBeAddedEvent
from OFS.event import ObjectWillBeRemovedEvent
from OFS.interfaces import IObjectManager
from OFS.SimpleItem import Item
from zope.component.factory import Factory
from zope.container.contained import ObjectAddedEvent
from zope.container.contained import ObjectRemovedEvent
from zope.container.contained import notifyContainerModified
from zope.datetime import rfc1123_date
from zope.event import notify
from zope.interface import implementer
from ...ActionProviderBase import ActionProviderBase
from ...interfaces import IContentish
from ...interfaces import ISiteRoot
from ...interfaces import ITypeInformation
from ...PortalContent import PortalContent
from ..base.security import DummyUser
from ..base.security import OmnipotentUser
class DummyObject(Implicit):
"""
A dummy callable object.
Comes with getIconURL and restrictedTraverse
methods.
"""
def __init__(self, id='dummy', **kw):
self._id = id
self.__dict__.update(kw)
def __str__(self):
return self._id
def __call__(self):
return self._id
def restrictedTraverse(self, path):
if not path:
return self
parent = self
path_elements = path.split('/')
path_elements.reverse()
while path_elements:
path_element = path_elements.pop()
parent = getattr(parent, path_element)
return parent
def icon(self):
return f'{self._id} ICON'
def getIconURL(self):
return f'{self._id} ICON'
def getId(self):
return self._id
@implementer(ITypeInformation)
class DummyType(DummyObject):
""" A Dummy Type object """
def __init__(self, id='Dummy Content', title='Dummy Content', actions=()):
""" To fake out some actions, pass in a sequence of tuples where the
first element represents the ID or alias of the action and the
second element is the path to the object to be invoked, such as
a page template.
"""
self.id = self._id = id
self.title = title
self._actions = {}
self._setActions(actions)
def _setActions(self, actions=()):
for action_id, action_path in actions:
self._actions[action_id] = action_path
def Title(self):
return self.title
def allowType(self, contentType):
return True
def allowDiscussion(self):
return False
def queryMethodID(self, alias, default=None, context=None):
return self._actions.get(alias, default)
def isConstructionAllowed(self, container):
return True
@implementer(IContentish)
class DummyContent(PortalContent, Item):
"""
A Dummy piece of PortalContent
"""
meta_type = 'Dummy'
portal_type = 'Dummy Content'
url = 'foo_url'
after_add_called = before_delete_called = 0
def __init__(self, id='dummy', *args, **kw):
self.id = id
self._args = args
self._kw = {}
self._kw.update(kw)
self.reset()
self.catalog = kw.get('catalog', 0)
self.url = kw.get('url', None)
self.view_id = kw.get('view_id', None)
def manage_afterAdd(self, item, container):
self.after_add_called = 1
def manage_beforeDelete(self, item, container):
self.before_delete_called = 1
def absolute_url(self):
return self.url
def reset(self):
self.after_add_called = self.before_delete_called = 0
# Make sure normal Database export/import stuff doesn't trip us up.
def _getCopy(self, container):
return DummyContent(self.id, catalog=self.catalog)
def _safe_get(self, attr):
if self.catalog:
return getattr(self, attr, '')
else:
return getattr(self, attr)
def Title(self):
return self.title
def listCreators(self):
return self._safe_get('creators')
def Subject(self):
return self._safe_get('subject')
def Description(self):
return self._safe_get('description')
def created(self):
return self._safe_get('created_date')
def modified(self):
return self._safe_get('modified_date')
def Type(self):
return 'Dummy Content Title'
def __call__(self):
if self.view_id is None:
return DummyContent.inheritedAttribute('__call__')(self)
else:
# view_id control for testing
template = getattr(self, self.view_id)
if getattr(aq_base(template), 'isDocTemp', 0):
return template(self, self.REQUEST, self.REQUEST['RESPONSE'])
else:
return template()
DummyFactory = Factory(DummyContent)
class DummyFactoryDispatcher:
"""
Dummy Product Factory Dispatcher
"""
def __init__(self, folder):
self._folder = folder
def getId(self):
return 'DummyFactoryDispatcher'
def addFoo(self, id, *args, **kw):
if getattr(self._folder, '_prefix', None):
id = f'{self._folder._prefix}_{id}'
foo = DummyContent(id, *args, **kw)
self._folder._setObject(id, foo, suppress_events=True)
if getattr(self._folder, '_prefix', None):
return id
__roles__ = ('FooAdder',)
__allow_access_to_unprotected_subobjects__ = {'addFoo': 1}
@implementer(IObjectManager)
class DummyFolder(DummyObject):
"""Dummy Container for testing.
"""
def __init__(self, id='dummy', fake_product=0, prefix=''):
self._prefix = prefix
self._id = id
if fake_product:
self.manage_addProduct = {
'FooProduct': DummyFactoryDispatcher(self)}
def _setOb(self, id, object):
setattr(self, id, object)
def _delOb(self, id):
delattr(self, id)
def _getOb(self, id):
return getattr(self, id)
def _setObject(self, id, object, suppress_events=False):
if not suppress_events:
notify(ObjectWillBeAddedEvent(object, self, id))
self._setOb(id, object)
object = self._getOb(id)
if hasattr(aq_base(object), 'manage_afterAdd'):
object.manage_afterAdd(object, self)
if not suppress_events:
notify(ObjectAddedEvent(object, self, id))
notifyContainerModified(self)
return object
def _delObject(self, id):
object = self._getOb(id)
notify(ObjectWillBeRemovedEvent(object, self, id))
if hasattr(aq_base(object), 'manage_beforeDelete'):
object.manage_beforeDelete(object, self)
self._delOb(id)
notify(ObjectRemovedEvent(object, self, id))
notifyContainerModified(self)
def getPhysicalPath(self):
p = aq_parent(aq_inner(self))
path = (self._id,)
if p is not None:
path = p.getPhysicalPath() + path
return path
def getId(self):
return self._id
def reindexObjectSecurity(self):
pass
def contentIds(self):
return ('user_bar',)
def all_meta_types(self):
return ({'name': 'Dummy', 'permission': 'addFoo'},)
def getTypeInfo(self):
return self.portal_types.getTypeInfo(self) # Can return None.
@implementer(ISiteRoot)
class DummySite(DummyFolder):
""" A dummy portal folder.
"""
_domain = 'http://www.foobar.com'
_path = 'bar'
def absolute_url(self, relative=0):
return '/'.join((self._domain, self._path, self._id))
def getPhysicalPath(self):
return ('', self._path, self._id)
def getPhysicalRoot(self):
return self
def unrestrictedTraverse(self, path, default=None, restricted=0):
if path == ['acl_users']:
return self.acl_users
else:
obj = self
for id in path[3:]:
obj = getattr(obj, id)
return obj
def userdefined_roles(self):
return ('Member', 'Reviewer')
def getProperty(self, id, default=None):
return getattr(self, id, default)
class DummyUserFolder(Implicit):
""" A dummy User Folder with 2 dummy Users.
"""
id = 'acl_users'
def __init__(self):
setattr(self, 'user_foo', DummyUser(id='user_foo'))
setattr(self, 'user_bar', DummyUser(id='user_bar'))
setattr(self, 'all_powerful_Oz', OmnipotentUser())
def getUsers(self):
pass
def getUser(self, name):
return getattr(self, name, None)
def getUserById(self, id, default=None):
return self.getUser(id)
def userFolderDelUsers(self, names):
for user_id in names:
delattr(self, user_id)
class DummyTool(Implicit, ActionProviderBase):
"""
This is a Dummy Tool that behaves as a
a MemberShipTool, a URLTool and an
Action Provider
"""
def __init__(self, anon=1):
self.anon = anon
# IMembershipTool
def getAuthenticatedMember(self):
return DummyUser()
def isAnonymousUser(self):
return self.anon
def checkPermission(self, permissionName, object, subobjectName=None):
return True
# ITypesTool
_type_id = 'Dummy Content'
_type_actions = (('', 'dummy_view'),
('view', 'dummy_view'),
('(Default)', 'dummy_view'))
def getTypeInfo(self, contentType):
return DummyType(self._type_id, title=self._type_id,
actions=self._type_actions)
def listTypeInfo(self, container=None):
return (DummyType(self._type_id, title=self._type_id,
actions=self._type_actions),)
def listContentTypes(self, container=None, by_metatype=0):
return (self._type_id,)
# IURLTool
def __call__(self, relative=0):
return self.getPortalObject().absolute_url()
def getPortalObject(self):
return aq_parent(aq_inner(self))
getPortalPath = __call__
# IWorkflowTool
test_notified = None
def notifyCreated(self, ob):
self.test_notified = ob
def getCatalogVariablesFor(self, obj):
return {}
class DummyCachingManager:
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
return (
('foo', 'Foo'), ('bar', 'Bar'),
('test_path', '/'.join(content.getPhysicalPath())),
)
def getModTimeAndETag(self, content, view_method, keywords, time=None):
return (None, None, False)
def getPhysicalPath(self):
return ('baz',)
FAKE_ETAG = None # '--FAKE ETAG--'
class DummyCachingManagerWithPolicy(DummyCachingManager):
# dummy fixture implementing a single policy:
# - always set the last-modified date if available
# - calculate the date using the modified method on content
def getHTTPCachingHeaders(self, content, view_name, keywords, time=None):
# if the object has a modified method, add it as last-modified
if hasattr(content, 'modified'):
headers = (('Last-modified', rfc1123_date(content.modified())),)
return headers
def getModTimeAndETag(self, content, view_method, keywords, time=None):
modified_date = None
if hasattr(content, 'modified'):
modified_date = content.modified()
set_last_modified = (modified_date is not None)
return (modified_date, FAKE_ETAG, set_last_modified)
|
16,244 | 92b30cb80f3f5ad6f0eea3a66f3b48b32b24877c | from .tboard import write_images, write_losses
from .weights import save_weights, load_weights |
16,245 | bc386b6c658dd34e9d6ecd85b255292d32f7d0f8 | #!/usr/bin/python
from sphero_driver import sphero_driver
from time import sleep
# if you know your Sphero's address
# sphero = sphero_driver.Sphero("Sphero", "FF:CD:AA:99:45:00")
sphero = sphero_driver.Sphero()
while not sphero.is_connected:
sphero.connect()
sleep(1)
sphero.set_rgb_led(255, 0, 0, 0, False)
sleep(1)
sphero.set_rgb_led(0, 255, 0, 0, False)
sleep(1)
sphero.set_rgb_led(0, 0, 255, 0, False)
sleep(1)
|
16,246 | b670b672ebf48715b179dae439cebbf20c018169 | '''
Created on Jun 9, 2017
@author: student
'''
# A program to average a set of numbers
# Illustrates interactive loop with two accumulators
def main():
my_sum = 0.0
count = 0
moredata = "yes"
while moredata[0] == "y":
x = input("Enter a number >> ")
my_sum = my_sum + x
count = count + 1
moredata = raw_input("Do you have more numbers (yes or no)? ")
print "\nThe average of the numbers is", my_sum / count
main()
|
16,247 | 37b1e2252f4d12565024f2d1f5c1ba66ebcb9b57 | from django.shortcuts import render,HttpResponse,redirect
import json
from weibo import models
from django.core.cache import cache
from io import BytesIO
import uuid
def userLogin(request):
if request.method == 'POST':
email = request.POST.get('email')
passwd = request.POST.get('passwd')
accessLogin = request.POST.get('accessLogin')
verify = request.POST.get('verify').upper()
imgId = request.POST.get('imgId')
if( cache.get(imgId) == None or cache.get(imgId).upper() != verify):
return HttpResponse(json.dumps({'status': 'error','msg':'验证码错误'}), content_type='application/json; charset=utf-8')
else:
try:
userInfo = models.UserInfo.objects.filter(email=email)[0]
if(userInfo.passwd == passwd):
cache.set(str(uuid.uuid1())+email, {'isLogin': True,'email':email})
# cache.delete(imgId)
return HttpResponse(json.dumps({'status': 'success', 'msg':
{'userName':userInfo.username,
'token': str(uuid.uuid1())+email}}),content_type='application/json; charset=utf-8')
else:
print("密码错误")
return HttpResponse(json.dumps({'status': 'error','msg':'用户名或密码错误'}), content_type='application/json; charset=utf-8')
except:
print("用户名不存在")
return HttpResponse(json.dumps({'status': 'error','msg':'用户名或密码错误'}), content_type='application/json; charset=utf-8')
from untils.check_code import create_validate_code
from django.core.handlers.wsgi import WSGIRequest
def VerifyCode(request):
auuid = request.GET.get('id')
stream = BytesIO()
img, code = create_validate_code()
img.save(stream, 'PNG')
cache.set(auuid, code, 60*5)
print(1111111111111, request.environ['HTTP_USER_AGENT'] )
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
print(ip)
return HttpResponse(stream.getvalue()) |
16,248 | 8e154c51612fb65234071e6454a0d6f5bff12a1e | import os
import pickle
import tarfile
import time
from .utils import download_dataset
import numpy as np
labels_list = [
"apple",
"aquarium_fish",
"baby",
"bear",
"beaver",
"bed",
"bee",
"beetle",
"bicycle",
"bottle",
"bowl",
"boy",
"bridge",
"bus",
"butterfly",
"camel",
"can",
"castle",
"caterpillar",
"cattle",
"chair",
"chimpanzee",
"clock",
"cloud",
"cockroach",
"couch",
"crab",
"crocodile",
"cup",
"dinosaur",
"dolphin",
"elephant",
"flatfish",
"forest",
"fox",
"girl",
"hamster",
"house",
"kangaroo",
"keyboard",
"lamp",
"lawn_mower",
"leopard",
"lion",
"lizard",
"lobster",
"man",
"maple_tree",
"motorcycle",
"mountain",
"mouse",
"mushroom",
"oak_tree",
"orange",
"orchid",
"otter",
"palm_tree",
"pear",
"pickup_truck",
"pine_tree",
"plain",
"plate",
"poppy",
"porcupine",
"possum",
"rabbit",
"raccoon",
"ray",
"road",
"rocket",
"rose",
"sea",
"seal",
"shark",
"shrew",
"skunk",
"skyscraper",
"snail",
"snake",
"spider",
"squirrel",
"streetcar",
"sunflower",
"sweet_pepper",
"table",
"tank",
"telephone",
"television",
"tiger",
"tractor",
"train",
"trout",
"tulip",
"turtle",
"wardrobe",
"whale",
"willow_tree",
"wolf",
"woman",
"worm",
]
_dataset = "cifar100"
_urls = {"https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz": "cifar100.tar.gz"}
def load(path=None):
"""Image classification.
The `CIFAR-100 < https: // www.cs.toronto.edu/~kriz/cifar.html >`_ dataset is
just like the CIFAR-10, except it has 100 classes containing 600 images
each. There are 500 training images and 100 testing images per class.
The 100 classes in the CIFAR-100 are grouped into 20 superclasses. Each
image comes with a "fine" label(the class to which it belongs) and a
"coarse" label(the superclass to which it belongs)."""
if path is None:
path = os.environ["DATASET_PATH"]
download_dataset(path, _dataset, _urls)
t0 = time.time()
# Loading the file
tar = tarfile.open(os.path.join(path, _dataset, "cifar100.tar.gz"), "r:gz")
# Loading training set
f = tar.extractfile("cifar-100-python/train").read()
data = pickle.loads(f, encoding="latin1")
train_images = data["data"].reshape((-1, 3, 32, 32)).astype("float32")
train_fine = np.array(data["fine_labels"])
train_coarse = np.array(data["coarse_labels"])
# Loading test set
f = tar.extractfile("cifar-100-python/test").read()
data = pickle.loads(f, encoding="latin1")
test_images = data["data"].reshape((-1, 3, 32, 32)).astype("float32")
test_fine = np.array(data["fine_labels"])
test_coarse = np.array(data["coarse_labels"])
data = {
"train_set/images": train_images,
"train_set/labels": train_fine,
"train_set/coarse_labels": train_coarse,
"test_set/images": test_images,
"test_set/labels": test_fine,
"test_set/coarse_labels": test_coarse,
}
print("Dataset cifar100 loaded in {0:.2f}s.".format(time.time() - t0))
return data
|
16,249 | d7620ca25598775b75483a2f8f6311f65a4ba3a3 | '''
Module of Linux API for plyer.cpu.
'''
from os.path import join
from os import environ, listdir
from subprocess import Popen, PIPE
from plyer.facades import CPU
from plyer.utils import whereis_exe
class LinuxCPU(CPU):
'''
Implementation of Linux CPU API.
'''
def _sockets(self):
# physical CPU sockets (or slots) on motherboard
sockets = [] # list of CPU ids from kernel
# open Linux kernel data file for CPU
with open('/proc/cpuinfo', 'rb') as fle:
lines = fle.readlines()
# go through the lines and obtain physical CPU ids
for line in lines:
line = line.decode('utf-8')
if 'physical id' not in line:
continue
cpuid = line.split(':')[1].strip()
sockets.append(cpuid)
# total sockets is the length of unique CPU ids from kernel
sockets = len(set(sockets))
return sockets
def _physical(self):
# cores
physical = [] # list of CPU ids from kernel
# open Linux kernel data file for CPU
with open('/proc/cpuinfo', 'rb') as fle:
lines = fle.readlines()
# go through the lines and obtain CPU core ids
for line in lines:
line = line.decode('utf-8')
if 'core id' not in line:
continue
cpuid = line.split(':')[1].strip()
physical.append(cpuid)
# total cores (socket * core per socket)
# is the length of unique CPU core ids from kernel
physical = len(set(physical))
return physical
def _logical(self):
# cores * threads
logical = None
old_lang = environ.get('LANG', '')
environ['LANG'] = 'C'
_logical = Popen(['nproc', '--all'], stdout=PIPE)
output = _logical.communicate()[0].decode('utf-8').strip()
if output:
logical = int(output)
environ['LANG'] = old_lang
return logical
def _cache(self):
values = {key: 0 for key in ('L1', 'L2', 'L3')}
cpu_path = join('/sys', 'devices', 'system', 'cpu')
# get present cores from kernel device
with open(join(cpu_path, 'present')) as fle:
present = fle.read().decode('utf-8')
present = present.strip().split('-')
if len(present) == 2:
present = range(int(present[1]) + 1)
else:
present = [present[0]]
cores = ['cpu{}'.format(i) for i in present]
for core in cores:
indicies = [
# get 'indexN' files from 'cache' folder assuming
# the filename is in range index0 to index99
# in case a wild 'index_whatevercontent' file appears
fle for fle in listdir(join(cpu_path, core, 'cache'))
if fle.startswith('index') and len(fle) <= len('index') + 2
]
for index in indicies:
index_type = join(cpu_path, core, 'cache', index, 'level')
with open(index_type, 'rb') as fle:
cache_level = fle.read().decode('utf-8').strip()
values['L{}'.format(cache_level)] += 1
return values
@staticmethod
def _numa():
return
def instance():
'''
Instance for facade proxy.
'''
import sys
if whereis_exe('nproc'):
return LinuxCPU()
sys.stderr.write("nproc not found.")
return CPU()
|
16,250 | 99cb7f09045362559513b195d8446cf52f19dfd7 | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Deque
from collections import deque
@dataclass
class Node:
data: int
left: Optiona[Node] = None
right: Optiona[Node] = None
def inorder(root: Node):
result: list = []
def helper(root: Node):
if root.left:
helper(root.left)
result.append(root.data)
if root.right:
helper(root.right)
helper(root)
print(result)
def invert_binary_tree_dfs(root: Node):
if not root:
return None
reverse_tree: Node = Node(root.data)
def helper(root: Node, reverse_tree: Node):
if root.left:
reverse_tree.right = Node(root.left.data)
helper(root.left, reverse_tree.right)
if root.right:
reverse_tree.left = Node(root.right.data)
helper(root.right, reverse_tree.left)
helper(root, reverse_tree)
inorder(reverse_tree)
print(reverse_tree)
print("****************************")
def invert_binary_tree_bfs(root: Node):
if not root:
return None
reverse_bfs_tree: Node = Node(root.data)
reverse_root: Node = reverse_bfs_tree
queue: Deque = deque()
queue.append((reverse_bfs_tree, root))
while queue:
level_len: int = len(queue)
level_nodes: list = []
for idx in range(level_len):
reverse_node, node = queue.popleft()
if node.left:
reverse_node.right = Node(node.left.data)
queue.append((reverse_node.right, node.left))
if node.right:
reverse_node.left = Node(node.right.data)
queue.append((reverse_node.left, node.right))
print(reverse_root)
root: Node = Node(4)
root.left = Node(2)
root.left.left = Node(1)
root.left.right = Node(3)
root.right = Node(7)
root.right.left = Node(6)
root.right.right = Node(9)
invert_binary_tree_dfs(root)
invert_binary_tree_bfs(root) |
16,251 | 12d9c71cb32afd0f5ba3ebbcf578980389d966cf | # pylint: disable=all
import sys
class StationGraph(object):
"""
A stationgraph allows a graph data structure to be built by
passing in information one station at a time. From the graph's
perspective, each station would be considered a node in the graph. The
stationgraph is made of a nested dictionary arranged by stops rather than
by trains. Each stop will contain info about where the passenger can go
next and how much it will cost. This dictionary will then be used to
determine the lowest cost path from start to D. Since there is no limit
on how many times a passenger can switch trains, that information will be
eliminated when building the graph.
"""
def __init__(self, numLines, finalStop):
self.final_stop = int(finalStop)
self.num_train_lines = int(numLines)
##station_graph is a nested dict organized as follows:
### {station: {station: cost, station: cost, station: cost}}
##initialize it to our final stop (which goes nowhere)
self.station_graph ={int(self.final_stop):{}}
def build_graph(self, key1, key2, value):
#the graph will be a nested dictionary, so we need 2 keys to get
#to a value. The value will be the cost of traveling from
#key1 to key2
if not key1 in self.station_graph:
self.station_graph[key1]={key2:value}
elif not key2 in self.station_graph[key1]:
self.station_graph[key1][key2]=value
else:
current = self.station_graph[key1][key2]
if value < current:
self.station_graph[key1][key2]=value
def print_station_graph(self):
for key1 in self.station_graph:
print str(key1)+"->",
for value in self.station_graph[key1]:
print str(value)+':'+str(self.station_graph[key1][value])+",",
print ""
def shortest_path(self):
"""
Using Djikstra's shortest path algorithm, determine optimal costs
from the start station to each subsequent station, then return the
value of the cost to travel from the starting station to the
ending station.
Passengers always start at location 0, so that is set to be the
source station.
Since trains only move forward, from start (0) to end (numstops),
the graph is treated as directed and acyclic, which is great because
it allows the shortest path calculation to be done in linear time
O(nodes + edges)
"""
#dict that will hold the cost of traveling to each station
#add the initial cost of the starting station, which is 0
D = {0:0}
#add all of our dict keys (stations) to our queue
station_queue = self.station_graph.keys()
#sort the keys! since the graph is directed and acyclic, the stations
#can be explored one at a time, in order, without having to adjust
#for the lowest distance value via priority queue.
#
#sort them with reverse=True so that they can be popped from the
#end of the list instead of from the beginning. This should save
#some cpu time.
station_queue.sort(reverse=True)
while len(station_queue) > 0:
station = station_queue.pop() #grab the next node in the queue
for next_st, next_cost in self.station_graph[station].iteritems():
#loops through the current station's neighbors, and calculates
#their costs from the starting node, making sure to store
#the lowest cost in our D dict
alt = D[station] + next_cost #sum the costs
if not D.has_key(next_st) or alt < D[next_st]:
#if there is no cost on record, or if the newly calculated
#cost is lower than the currently recorded one, then
#record the newly calculated cost as the lowest
D[next_st] = alt #set the cost to get to next_st
return D[self.final_stop]
if __name__ == '__main__':
"""
The traingraph is initialized from standard input as follows:
first line is two integers separated by a space.
first integer is represented by "K" in description
but will be numLines here.
The second integer is the position of the destination D which
will be called finalStop here
"""
##instantiate our object to set up its internals
numLines, finalStop = sys.stdin.readline().strip().split()
tripPlanner = StationGraph(numLines, finalStop)
"""
The next few lines will be read in groups of 3,
1st line is the number of stops for that train
2nd line is list of stops
3rd line is list of costs per stop
"""
for i in xrange (0,tripPlanner.num_train_lines):
numstops = sys.stdin.readline().strip()
stopline = sys.stdin.readline().strip().split()
costline = sys.stdin.readline().strip().split()
##pass in each station, what it points to, and its travel cost
for j in xrange(0, int(numstops)-1):
#build the graph, one node at a time
tripPlanner.build_graph(
int(stopline[j]),
int(stopline[j+1]),
int(costline[j])
)
"""
Now that the graph is built, call shortest_path to determine
the lowest cost from beginning to end.
"""
print tripPlanner.shortest_path()
|
16,252 | 02659ff5f9daaaa47d7530a5cde0788b2f3ba314 | # -*- coding: utf-8 -*-
import xml.etree.ElementTree as etree
import os.path, shutil
import gamelist, utils
import fav, dat
class BasicSorter :
def __init__(self,configuration,scriptDir,logger,bioses,setKey) :
self.configuration = configuration
self.scriptDir = scriptDir
self.logger = logger
self.bioses = bioses
self.setKey = setKey
def process(self) :
self.prepare()
# create bestarcade romsets
self.logger.log('\n<--------- Create Sets --------->')
self.createSets(self.dats)
self.logger.log("\n<--------- Detecting errors ----------->")
self.checkErrors()
self.logger.log('\n<--------- Process finished ----------->')
# input('\n (Press Enter) ')
def prepare(self) :
self.usingSystems = self.useSystems(self.configuration)
# create favorites containing fav games
self.logger.log('\n<--------- Load Favorites Ini Files --------->')
self.favorites = fav.loadFavs(self.scriptDir,self.setKey+'.ini',self.bioses,self.logger)
# parse dat files
self.logger.log('\n<--------- Load '+self.setKey+' dat --------->')
if 'dat' in self.configuration and os.path.exists(self.configuration['dat']) :
datsDict = dict(zip([self.setKey],[self.configuration['dat']]))
else :
datsDict = dict(zip([self.setKey],[self.setKey+'.dat']))
self.dats = dat.parseDats(self.scriptDir,utils.dataDir,datsDict,self.usingSystems,self.logger)
def useSystems(self,configuration) :
systems = []
systems.append(self.setKey) if os.path.exists(configuration[self.setKey]) else None
self.logger.logList('Using systems',systems)
return systems
def createSets(self,dats) :
self.logger.log('Creating or cleaning output directory '+ self.configuration['exportDir'])
if os.path.exists(self.configuration['exportDir']) :
for file in os.listdir(os.path.join(self.configuration['exportDir'])) :
fullPath = os.path.join(self.configuration['exportDir'],file)
shutil.rmtree(fullPath) if os.path.isdir(fullPath) else os.remove(fullPath)
else :
os.makedirs(self.configuration['exportDir'])
dryRun = True if self.configuration['dryRun'] == '1' else False
useGenreSubFolder = True if self.configuration['genreSubFolders'] == '1' else False
scrapeImages = True if self.configuration['useImages'] == '1' and self.configuration['images'] else False
CSVs, gamelists, roots = dict(), dict(), dict()
header="Genre;Name (mame description);Rom name;Year;Manufacturer;Hardware;Comments;Notes\n"
# init CSVS
CSVs[self.setKey] = open(os.path.join(self.configuration['exportDir'],self.setKey+".csv"),"w",encoding="utf-8")
CSVs[self.setKey].write(header)
# init gamelists
roots[self.setKey] = etree.Element("datafile")
roots[self.setKey].append(dats[self.setKey+"Header"])
os.makedirs(os.path.join(self.configuration['exportDir'],self.setKey))
os.makedirs(os.path.join(self.configuration['exportDir'],self.setKey,'downloaded_images')) if scrapeImages else None
gamelists[self.setKey] = gamelist.initWrite(os.path.join(self.configuration['exportDir'],self.setKey))
for genre in self.favorites.keys() :
self.logger.log("Handling genre "+ genre)
if useGenreSubFolder :
os.makedirs(os.path.join(self.configuration['exportDir'],self.setKey,genre))
if scrapeImages :
gamelist.writeGamelistFolder(gamelists[self.setKey],genre,genre+'.png')
utils.setImageCopy(self.configuration['exportDir'],os.path.join(self.scriptDir,'data','images'),genre+'.png',self.setKey,dryRun)
# copy bios in each subdirectory
for bios in self.bioses :
setBios = os.path.join(self.configuration[self.setKey],bios+".zip")
utils.setFileCopy(self.configuration['exportDir'],setBios,genre,bios,self.setKey,useGenreSubFolder,dryRun)
if os.path.exists(setBios) :
utils.writeGamelistHiddenEntry(gamelists[self.setKey],bios,genre,useGenreSubFolder)
for game in sorted(self.favorites[genre]) :
setRom = os.path.join(self.configuration[self.setKey],game+".zip")
setCHD = os.path.join(self.configuration[self.setKey],game)
image = self.configuration['imgNameFormat'].replace('{rom}',game)
# TODO aliases should be handled here
utils.setFileCopy(self.configuration['exportDir'],setRom,genre,game,self.setKey,useGenreSubFolder,dryRun)
utils.setCHDCopy(self.configuration['exportDir'],setCHD,genre,game,self.setKey,useGenreSubFolder,dryRun)
utils.writeCSV(CSVs[self.setKey],game,None,genre,dats[self.setKey],None,self.setKey)
utils.writeGamelistEntry(gamelists[self.setKey],game,image,dats[self.setKey],genre,useGenreSubFolder,None,self.setKey,None)
roots[self.setKey].append(dats[self.setKey][game].node) if game in dats[self.setKey] else None
if scrapeImages :
utils.setImageCopy(self.configuration['exportDir'],self.configuration['images'],image,self.setKey,dryRun)
self.logger.log(setRom)
# writing and closing everything
treeSet = etree.ElementTree(roots[self.setKey])
treeSet.write(os.path.join(self.configuration['exportDir'],self.setKey+".dat"), xml_declaration=True, encoding="utf-8")
CSVs[self.setKey].close()
gamelist.closeWrite(gamelists[self.setKey])
def checkErrors(self) :
foundErrors = False
useGenreSubFolder = True if self.configuration['genreSubFolders'] == '1' else False
dryRun = True if self.configuration['dryRun'] == '1' else False
if not dryRun :
for genre in self.favorites :
for name in self.favorites[genre] :
if useGenreSubFolder :
setRom = os.path.join(self.configuration['exportDir'],self.setKey,genre,name+".zip")
else :
setRom = os.path.join(self.configuration['exportDir'],self.setKey,name+".zip")
if not os.path.exists(setRom) :
if foundErrors is False :
self.logger.log("Possible errors")
foundErrors = True
self.logger.log(setRom +' is missing in output dir')
if foundErrors is False :
self.logger.log("S'all good man")
# TODOS
# if name from dat is empty, take one from test file |
16,253 | 101750258189e874cf0ee54f5301954893c6bdcf | import argparse
import os
import random
import shutil
import time
import warnings
import numpy as np
from torchvision import datasets
from functions import *
from imagepreprocess import *
from model_init import *
from src.representation import *
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
help='model architecture: ')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr-method', default='step', type=str,
help='method of learning rate')
parser.add_argument('--lr-params', default=[], dest='lr_params',
action='append', help='params of lr method')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--modeldir', default=None, type=str,
help='director of checkpoint')
parser.add_argument('--representation', default=None, type=str,
help='define the representation method')
parser.add_argument('--num-classes', default=None, type=int,
help='define the number of classes')
parser.add_argument('--freezed-layer', default=None, type=int,
help='define the end of freezed layer')
parser.add_argument('--store-model-everyepoch', dest='store_model_everyepoch', action='store_true',
help='store checkpoint in every epoch')
parser.add_argument('--classifier-factor', default=None, type=int,
help='define the multiply factor of classifier')
parser.add_argument('--benchmark', default=None, type=str,
help='name of dataset')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
print(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.representation == 'GAvP':
representation = {'function':GAvP,
'input_dim':2048}
elif args.representation == 'MPNCOV':
representation = {'function':MPNCOV,
'iterNum':5,
'is_sqrt':True,
'is_vec':True,
'input_dim':2048,
'dimension_reduction':None if args.pretrained else 256}
elif args.representation == 'BCNN':
representation = {'function':BCNN,
'is_vec':True,
'input_dim':2048}
else:
warnings.warn('=> You did not choose a global image representation method!')
representation = None # which for original vgg or alexnet
model = get_model(args.arch,
representation,
args.num_classes,
args.freezed_layer,
pretrained=args.pretrained)
# plot network
vizNet(model, args.modeldir)
# obtain learning rate
LR = Learning_rate_generater(args.lr_method, args.lr_params, args.epochs)
if args.pretrained:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay': 0. if args.arch.startswith('vgg') else args.weight_decay})
else:
params_list = [{'params': model.features.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.representation.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay})
params_list.append({'params': model.classifier.parameters(),
'lr': args.lr*args.classifier_factor,
'weight_decay':args.weight_decay})
optimizer = torch.optim.SGD(params_list, lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_transforms, val_transforms = preprocess_strategy(args.benchmark)
train_dataset = datasets.ImageFolder(
traindir,
train_transforms)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
# make directory for storing checkpoint files
if os.path.exists(args.modeldir) is not True:
os.mkdir(args.modeldir)
stats_ = stats(args.modeldir, args.start_epoch)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, LR.lr_factor, epoch)
# train for one epoch
trainObj, top1, top5 = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
valObj, prec1, prec5 = validate(val_loader, model, criterion)
# update stats
stats_._update(trainObj, top1, top5, valObj, prec1, prec5)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
filename = []
if args.store_model_everyepoch:
filename.append(os.path.join(args.modeldir, 'net-epoch-%s.pth.tar' % (epoch + 1)))
else:
filename.append(os.path.join(args.modeldir, 'checkpoint.pth.tar'))
filename.append(os.path.join(args.modeldir, 'model_best.pth.tar'))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename)
plot_curve(stats_, args.modeldir, True)
data = stats_
sio.savemat(os.path.join(args.modeldir,'stats.mat'), {'data':data})
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top1.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename[0])
if is_best:
shutil.copyfile(filename[0], filename[1])
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Learning_rate_generater(object):
"""Generates a list of learning rate for each training epoch"""
def __init__(self, method, params, total_epoch):
if method == 'step':
lr_factor, lr = self.step(params, total_epoch)
elif method == 'log':
lr_factor, lr = self.log(params, total_epoch)
else:
raise KeyError("=> undefined learning rate method '{}'" .format(method))
self.lr_factor = lr_factor
self.lr = lr
def step(self, params, total_epoch):
decrease_until = decode_params(params)
decrease_num = len(decrease_until)
base_factor = 0.1
lr_factor = []
lr = []
flag = 0
for epoch in range(total_epoch):
if flag < decrease_num:
if epoch >= decrease_until[flag]:
flag+=1
lr_factor.append(np.power(base_factor,flag))
lr.append(args.lr*lr_factor[epoch])
return lr_factor, lr
def log(self, params, total_epoch):
params = decode(params)
left_range = params[0]
right_range = params[1]
np_lr = np.logspace(left_range, right_range, total_epoch)
lr_factor = []
lr = []
for epoch in range(total_epoch):
lr.append(np_lr[epoch])
lr_factor.append(np_lr[epoch]/np_lr[0])
if lr[0] != args.lr:
args.lr = lr[0]
return lr_factor, lr
def adjust_learning_rate(optimizer, lr_factor, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
#lr = args.lr * (0.1 ** (epoch // 30))
print('the learning rate is set to {0:.5f}'.format(lr_factor[epoch]*args.lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_factor[epoch]*args.lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
16,254 | 66e69404e9019e0714709abb34ba90576a62df1c | """
Checagens / Questionamentos
Usamos para checar se expressoes no codigo são validas ou não.
Podemos personalizar mensagens de erro.
SE UM PROGRAMA PYTHON FOR EXECUTADO COM O PARAMETRO -O, NENHU ASSERTION
SERÁ VALIDADO. OU SEJA , TODAS AS VALIDAÇÕES JÁ ERAM...
"""
#USANDO O ASSERT QUASE COMO SE FOSSE UM IF:
def soma_num_pos(a, b):
assert a > 0 and b > 0, 'Numeros Precisam Ser Positivos'
return a + b
oBJETO = soma_num_pos(2, 6)
print(oBJETO)
#Obj2 = soma_num_pos(2, -3) #Exibe a mensagem dentre o Erro -> 'Numeros Precisam Ser Positivos'
#print(Obj2)
def comer(comida):
assert comida in (
'pizza',
'hamburguer',
'batata_frita'
), \
"A Comida tem que ser Fast Food" #Mensagem que vai aparecer Dentre as de erro, Caso O que for Digitado não esteja na Lista.
return f'Eu estou comendo {comida}'
Comida = input("Comida: ")
Obj3 = comer(Comida)
print(Obj3)
"""
Se você executar no terminal o Programa Python com -O (Menos Ó Maiusculo antes do nome do
arquivo .PY ) -> Todos os ASSERTS não vão funcionar, e nenhuma mensagem de erro vai
ser mostrada na tela. Tudo vai rodar como se tudo estivesse OK.
"""
|
16,255 | c1f5e583593854b1086bd394fce9ea3ff1b05ba5 | from scanners import SimpleScanner, TextScanner
from tokens import eof_token, null_token
# A tokenizer, the purpose of this class is to transform a markdown string
# into a list of "tokens". In this case, each token has a type and a value.
#
# Example:
# "_Hi!_" => [{type: UNDERSCORE, value: '_'}, {type: TEXT, value: 'Hi!'},
# {type: UNDERSCORE, value: '_'}]
#
TOKEN_SCANNERS = (SimpleScanner, # Recognizes simple one-char tokens like `_` and `*`
TextScanner) # Recognizes everything but a simple token
class Tokenizer(object):
def tokenize(self, plain_md):
return self.tokens_as_array(plain_md)
def tokens_as_array(self, plain_md):
tokens = []
if plain_md:
i = 0
while i < len(plain_md):
try:
token = self.scan_one_token(plain_md[i:])
except Exception:
return []
# force return [] when meet exception
i += token.length
tokens.append(token)
tokens.append(eof_token)
return tokens
def scan_one_token(self, plain_md):
for scanner in TOKEN_SCANNERS:
token = scanner.from_string(plain_md)
if plain_md == 'HI':
assert token == null_token
if token != null_token:
return token
raise Exception("The scanners could not match the given input: " + plain_md)
|
16,256 | e724e4bff7fa61e6348e2decb6b1348c038c793e | class Solution(object):
def partition(self, head, x):
if head is None:
return None
result=l1=ListNode(0)
result1=l2=ListNode(0)
while head:
if head.val<x:
l1.next=head
l1=l1.next
else:
l2.next=head
l2=l2.next
head=head.next
l2.next=None
l1.next=result1.next
return result.next
|
16,257 | fb11d158ba2ced33bf302cd930f2deeafd2c7485 | from threading import local
_LOCAL = local()
class ThreadLocalCredentials:
def add(self, credentials):
_LOCAL.credentials = credentials
def authorize(self, http):
return _LOCAL.credentials.authorize(http) |
16,258 | 8085436ef991b07174269c03b674035df39c613d | ''' ===================== HYBRID SELECTION STRATEGY (HSS) ======================
Universidade Federal de Sao Carlos - UFSCar, Sorocaba - SP - Brazil
Master of Science Project Artificial Intelligence
Prof. Tiemi Christine Sakata (tiemi@ufscar.br)
Author: Vanessa Antunes (tunes.vanessa@gmail.com)
============================================================================ '''
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
def plottingPartitions(plt, numFigure, ari, nameTP, f1, f2, maxx, Name, title, X, Y, figureName):
fig = plt.figure(numFigure)
#plt.scatter(f1, f2)
ax = plt.axes()
points_with_annotation = []
for i in range(0, maxx):
maxAri = float(max(ari['ari'][i]))
if maxAri == 1:
point, = plt.plot(f1[i], f2[i], 's', markersize=12, color='k', label='ARI = 1')
elif maxAri >= 0.85:
point, = plt.plot(f1[i], f2[i], '^', markersize=11, color='b', label='0.85 <= ARI < 1')
elif maxAri >= 0.70:
point, = plt.plot(f1[i], f2[i], 'v', markersize=11, color='m', label='0.70 <= ARI < 0.85')
else:
point, = plt.plot(f1[i], f2[i], 'o', markersize=9, color='#848482', label='0 <= ARI < 0.70')
textAnnotation = []
ari_med = 0
for auxi, auxj in zip(nameTP, ari['ari'][i]):
textAnnotation.append(auxi + ' '+ str(auxj))
ari_med = ari_med + float(auxj)
annotation = annotationPlot(ax, Name[i] + '\n' + '\n'.join(textAnnotation) + '\nARI medio: ' + str(ari_med), f1[i], f2[i])
# by default, disable the annotation visibility
annotation.set_visible(False)
points_with_annotation.append([point, annotation])
points_with_annotation = points_with_annotation + plot_true_partitions(ax, f1[maxx:], f2[maxx:], nameTP)
def on_move(event):
visibility_changed = False
for point, annotation in points_with_annotation:
should_be_visible = (point.contains(event)[0] == True)
if should_be_visible != annotation.get_visible():
visibility_changed = True
annotation.set_visible(should_be_visible)
if visibility_changed:
plt.draw()
#x1,x2,y1,y2 = plt.axis()
#plt.axis((x1-10,x2+10,y1-10,y2+10))
plt.plot(X, Y, linewidth=1.5, color='g')
plt.xlabel('Variance')
plt.ylabel('Connectivity')
plt.title(title)
#plt.grid(True)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc='upper right', shadow=True, fontsize='medium', numpoints=1)
#legend = plt.legend(loc='upper right', shadow=True, fontsize='small')
on_move_id = fig.canvas.mpl_connect('motion_notify_event', on_move)
plt.savefig(figureName, format='png', dpi=1000)
return plt
def plot_true_partitions(ax, f1TP, f2TP, nameTP):
points_with_annotation = []
for i in range(len(f1TP)):
point, = plt.plot(f1TP[i], f2TP[i], 'd', markersize=11, color='r', label='TP')
annotation = annotationPlot(ax, nameTP[i], f1TP[i], f2TP[i])
# by default, disable the annotation visibility
annotation.set_visible(False)
points_with_annotation.append([point, annotation])
return points_with_annotation
def annotationPlot(ax, text, f1, f2):
bbox_annotation = dict(boxstyle="round", facecolor="w", edgecolor="0.5", alpha=0.9)
arrowprops_style = dict(arrowstyle="simple", connectionstyle="arc3,rad=-0.2")
annotation = ax.annotate("%s" % (text),
xy=(f1, f2), xycoords='data',
xytext=(float(f1) - float(f1) / 100, float(f2) + float(f2) / 10), textcoords='data',
horizontalalignment="left",
arrowprops=arrowprops_style,
size = 10,
bbox=bbox_annotation
)
return annotation
def plotRegionDivision(plt, numFigure, ari, nameTP, reduced_p_frontX, reduced_p_frontY, reduced_p_frontName, y_pred, title):
fig = plt.figure(numFigure)
ax = plt.axes()
colors = np.array(['#808080', '#FF0000', '#0000FF', '#808000', '#008000', '#00FF00', '#008080', '#FF00FF', '#800080', '#FFFF00', '#00FFFF'])
colors = np.hstack([colors] * 10)
maxx = len(reduced_p_frontX)
array = []
for i in range(0, maxx):
aux = y_pred[i]
aux1 = '$\Omega_{' + str(aux) + '}$'
plt.plot(reduced_p_frontX[i], reduced_p_frontY[i], 'o', markersize=9, color=colors[y_pred][i], label=aux1)
# Plot para artigo, solucoes selecionadas em vermelho
'''if reduced_p_frontName[i] in namePartitionsSelected:
array.append(i)
else:
plt.plot(reduced_p_frontX[i], reduced_p_frontY[i], 'o', markersize=5, color='b')
for _ in array:
plt.plot(reduced_p_frontX[_], reduced_p_frontY[_], 'o', markersize=5, color='r')
'''
#plt.scatter(reduced_p_frontX, reduced_p_frontY, color=colors[y_pred].tolist(), s=45)
plt.xlabel('Variance')
plt.ylabel('Connectivity')
plt.title(title)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc='upper right', shadow=True, fontsize='large', numpoints=1)
return plt
|
16,259 | 94fd09e4ba6fbd0048cf5bb465bb7194ad3d9c1b | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import typing as _typing
import folly.iobuf as _fbthrift_iobuf
import thrift.py3.builder
import module.types as _module_types
class InitialResponse_Builder(thrift.py3.builder.StructBuilder):
content: _typing.Optional[str]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class FinalResponse_Builder(thrift.py3.builder.StructBuilder):
content: _typing.Optional[str]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class SinkPayload_Builder(thrift.py3.builder.StructBuilder):
content: _typing.Optional[str]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class CompatibleWithKeywordSink_Builder(thrift.py3.builder.StructBuilder):
sink: _typing.Optional[str]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class InitialException_Builder(thrift.py3.builder.StructBuilder):
reason: _typing.Optional[str]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class SinkException1_Builder(thrift.py3.builder.StructBuilder):
reason: _typing.Optional[str]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
class SinkException2_Builder(thrift.py3.builder.StructBuilder):
reason: _typing.Optional[int]
def __iter__(self) -> _typing.Iterator[_typing.Tuple[str, _typing.Any]]: ...
|
16,260 | 85764ed6e00b138f67402af449d0e61aa3ff4724 | class CondaLockError(Exception):
"""
Generic conda-lock error.
"""
class PlatformValidationError(CondaLockError):
"""
Error that is thrown when trying to install a lockfile that was built
for a different platform.
"""
class MissingEnvVarError(CondaLockError):
"""
Error thrown if env vars are missing in channel urls.
"""
class ChannelAggregationError(CondaLockError):
"""
Error thrown when lists of channels cannot be combined
"""
|
16,261 | 4a36130e91f4559047c640f99f96d3743a3bd647 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Alexey Sanko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for pybuilder_semver_git_tag module
"""
from random import shuffle
from unittest import TestCase
from mock import Mock, patch
from pybuilder.core import Project
from pybuilder.errors import BuildFailedException
from pybuilder_semver_git_tag import (
_add_dev,
_TagInfo,
initialize_semver_git_tag,
_seek_last_semver_tag,
set_version_from_git_tag,
force_semver_git_tag_plugin,
update_version_from_git_tag,
_get_repo_name,
_get_repo_info
)
class SemVerGitPluginInitializationTests(TestCase):
""" Test initialize_cram_console_scripts """
def setUp(self):
self.project = Project("basedir")
def test_should_set_default_properties(self): # pylint: disable=invalid-name
""" We need to init properties"""
initialize_semver_git_tag(self.project)
self.assertEquals(
self.project.get_property('semver_git_tag_increment_part'), 'patch')
self.assertEquals(
self.project.get_property('semver_git_tag_version_prefix'), '')
def test_should_leave_user_specified_properties(self): # pylint: disable=invalid-name
""" We need to keep user-defined properties"""
self.project.set_property('semver_git_tag_increment_part', 'minor')
self.project.set_property('semver_git_tag_repo_dir', '/some/dir')
self.project.set_property('semver_git_tag_changelog',
'dir/CHANGELOG.md')
self.project.set_property('semver_git_tag_version_prefix', 'v')
initialize_semver_git_tag(self.project)
self.assertEquals(
self.project.get_property('semver_git_tag_increment_part'), 'minor')
self.assertEquals(
self.project.get_property('semver_git_tag_repo_dir'), '/some/dir')
self.assertEquals(
self.project.get_property('semver_git_tag_changelog'),
'dir/CHANGELOG.md')
self.assertEquals(
self.project.get_property('semver_git_tag_version_prefix'), 'v')
class SeekLastTagTests(TestCase):
""" Test function _seek_last_semver_tag"""
def test_basic_seek(self):
""" Test result for basic versions"""
tags = []
for i in range(15):
tags.append(_TagInfo(
'1.0.' + str(i),
'commit' + str(i),
''))
for i in range(15):
shuffle(tags)
self.assertEqual(_seek_last_semver_tag(tags).name, '1.0.14')
self.assertEqual(_seek_last_semver_tag(tags, '1.0.14').name,
'1.0.13')
def test_none_return(self):
""" Test than function returns None if not SemVer found"""
tags = []
for i in range(15):
tags.append(_TagInfo('v1.0.' + str(i), 'commit' + str(i), ''))
for i in range(15):
shuffle(tags)
self.assertEqual(_seek_last_semver_tag(tags), None)
def test_none_return_if_all_excluded(self): # pylint: disable=invalid-name
""" Test than function returns None if excluded one SemVer tag"""
tags = [_TagInfo('1.0.1', 'commit1', ''),
_TagInfo('notsemver', 'commit2', '')]
self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)
def test_basic_version_seek(self):
""" Test result for basic versions with prefix"""
version_prefix = 'v'
tags = []
for i in range(15):
tags.append(_TagInfo('%s1.0.%s' % (version_prefix, i),
'commit%s' % i,
version_prefix))
for i in range(15):
shuffle(tags)
self.assertEqual(_seek_last_semver_tag(tags).name, 'v1.0.14')
self.assertEqual(_seek_last_semver_tag(tags, '1.0.14').name,
'v1.0.13')
def test_none_version_return(self):
""" Test than function returns None if not SemVer found with prefix"""
version_prefix = 'v'
tags = []
for i in range(15):
tags.append(_TagInfo('1.0.' + str(i),
'commit' + str(i),
version_prefix))
for i in range(15):
shuffle(tags)
self.assertEqual(_seek_last_semver_tag(tags, version_prefix), None)
def test_none_version_return_if_all_excluded(self): # pylint: disable=invalid-name
""" Test than function returns None if excluded one SemVer tag"""
version_prefix = 'v'
tags = [_TagInfo('v1.0.1', 'commit1', version_prefix),
_TagInfo('notsemver', 'commit2', version_prefix),
_TagInfo('v1.0.v2', 'commit2', version_prefix)]
self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)
class VersionFromGitTests(TestCase):
""" Test plugin functionality """
def setUp(self):
self.project = Project("basedir")
self.logger = Mock()
def test__add_dev(self):
""" Test _add_dev """
self.assertEquals(_add_dev('1.2.3'), '1.2.3.dev')
def test_should_raise_if_git_repo_not_exists(self): # pylint: disable=invalid-name
""" Plugin should raise exception if cannot find git root directory"""
with self.assertRaises(BuildFailedException) as context:
set_version_from_git_tag(self.project, self.logger)
err_msg = str(context.exception)
self.assertTrue(
"Directory `basedir` isn't git repository root." in err_msg)
@patch("pybuilder_semver_git_tag._get_repo_info",
return_value=([_TagInfo('not_semver2', 'commit2', ''),
_TagInfo('not_semver1', 'commit1', '')],
'last_commit', False))
def test_should_warning_if_semver_tag_not_found(self, mock_git_info): # pylint: disable=invalid-name, unused-argument
""" Plugin should warning if SemVer tag wasn't found and return"""
set_version_from_git_tag(self.project, self.logger)
self.logger.warn.assert_called_once_with(
"No SemVer git tag found. "
"Consider removing plugin pybuilder_semver_git_tag.")
self.logger.info.assert_not_called()
@patch("pybuilder_semver_git_tag._get_repo_info",
return_value=([_TagInfo('1.2.3', 'last_commit', ''),
_TagInfo('not_semver1', 'commit1', '')],
'last_commit', False))
def test_release_version_found(self, mock_git_info): # pylint: disable=invalid-name, unused-argument
""" Plugin should find release version"""
set_version_from_git_tag(self.project, self.logger)
self.assertEqual(self.logger.info.call_count, 2)
self.assertEqual(self.project.version, '1.2.3')
def get_dev_version(self, increment_part):
""" Util method which call set_version_from_git_tag
with particular level of version increment part
"""
self.project.set_property(
'semver_git_tag_increment_part', increment_part)
set_version_from_git_tag(self.project, self.logger)
@patch("pybuilder_semver_git_tag._get_repo_info",
return_value=([_TagInfo('1.2.3', 'last_commit', ''),
_TagInfo('not_semver1', 'commit1', '')],
'last_commit', True))
def test_dev_version_if_dirty(self, mock_git_info): # pylint: disable=invalid-name, unused-argument
""" Plugin should generate dev version if repo is dirty"""
# Test `patch` part
self.get_dev_version('patch')
self.assertEqual(self.project.version, '1.2.4.dev')
# Test `minor` part
self.get_dev_version('minor')
self.assertEqual(self.project.version, '1.3.0.dev')
# Test `major` part
self.get_dev_version('major')
self.assertEqual(self.project.version, '2.0.0.dev')
# Test incorrect part
self.project.set_property('semver_git_tag_increment_part', 'incorrect')
with self.assertRaises(BuildFailedException) as context:
set_version_from_git_tag(self.project, self.logger)
err_msg = str(context.exception)
self.assertTrue(
("Incorrect value for `semver_git_tag_increment_part` property. "
"Has to be in (`major`, `minor`, `patch`), "
"but `incorrect` passed.") in err_msg)
@patch("pybuilder_semver_git_tag._get_repo_info",
return_value=([_TagInfo('1.2.3', 'commit2', ''),
_TagInfo('not_semver1', 'commit1', '')],
'last_commit', False))
def test_dev_version_if_tagged_not_last_commit(self, mock_git_info): # pylint: disable=invalid-name, unused-argument
""" Plugin should generate dev version
if repo had commit(s) after SemVer tagger commit
"""
# Test `patch` part
self.get_dev_version('patch')
self.assertEqual(self.project.version, '1.2.4.dev')
# Test `minor` part
self.get_dev_version('minor')
self.assertEqual(self.project.version, '1.3.0.dev')
# Test `major` part
self.get_dev_version('major')
self.assertEqual(self.project.version, '2.0.0.dev')
# Test incorrect part
self.project.set_property('semver_git_tag_increment_part', 'incorrect')
with self.assertRaises(BuildFailedException) as context:
set_version_from_git_tag(self.project, self.logger)
err_msg = str(context.exception)
self.assertTrue(
("Incorrect value for `semver_git_tag_increment_part` property. "
"Has to be in (`major`, `minor`, `patch`), "
"but `incorrect` passed.") in err_msg)
class UpdateVersionTests(TestCase):
""" Test update_version_from_git_tag function"""
def setUp(self):
self.project = Project("basedir")
self.logger = Mock()
@patch("pybuilder_semver_git_tag.set_version_from_git_tag")
@patch("pybuilder_semver_git_tag._get_repo_name")
def test_force_and_update(self, _get_repo_name, # pylint: disable=unused-argument
set_version_from_git_tag_mock):
""" Test force set and update after that"""
force_semver_git_tag_plugin(self.project, self.logger)
self.project.set_property('semver_git_tag_increment_part', 'minor')
update_version_from_git_tag(self.project, self.logger)
self.assertEqual(set_version_from_git_tag_mock.call_count, 2)
self.assertEqual(self.logger.info.call_count, 2)
self.logger.warn.assert_called_once_with(
"Property `semver_git_tag_increment_part` was changed. "
"For better compatibility recommended to use "
"command line `pyb ... -P semver_git_tag_increment_part=...`, "
"otherwise some version-related properties could "
"be spoiled."
)
class _Remotes(object): # pylint: disable=too-few-public-methods
def __init__(self, name, url):
self.name = name
self.url = url
class _Commit(object): # pylint: disable=too-few-public-methods
def __init__(self, hexsha):
self.hexsha = hexsha
class _Tag(object): # pylint: disable=too-few-public-methods
def __init__(self, name, commit):
self.name = name
self.commit = commit
class _Head(object): # pylint: disable=too-few-public-methods
def __init__(self, last_commit, prev_commits):
self.commit = last_commit
self.commits_list = prev_commits + [last_commit]
class _Repo(object): # pylint: disable=too-few-public-methods
def __init__(self, remotes=None, head=None, is_dirty=False, tags=None):
self.remotes = remotes if remotes else []
self.dirty = is_dirty
self.head = head
self.tags = tags if tags else []
def is_dirty(self):
""" Stub for is_dirty flag"""
return self.dirty
def iter_commits(self, rev=None): # pylint: disable=no-self-use
""" Stub for iter_commits"""
return iter(rev.commits_list)
class GetRepoNameTests(TestCase):
""" Test _get_repo_name function"""
def setUp(self):
self.project = Project("basedir")
self.logger = Mock()
@patch("pybuilder_semver_git_tag._get_repo",
return_value=(_Repo(remotes=[
_Remotes(
'someremote',
'https://github.com/AlexeySanko/some_incorrect.git'),
_Remotes(
'origin',
'https://github.com/AlexeySanko/pybuilder_semver_git_tag.git'), # pylint: disable=line-too-long
_Remotes(
'someotherremote',
'https://github.com/AlexeySanko/some_other_incorrect.git')
])))
def test_get_name_from_origin(self, mock_get_repo): # pylint: disable=unused-argument
"""Check that function correctly works with repositories with
origin remote"""
self.assertEqual(_get_repo_name(self.project, ''), 'pybuilder_semver_git_tag')
@patch("pybuilder_semver_git_tag._get_repo",
return_value=(_Repo(remotes=[
_Remotes(
'myremote',
'https://github.com/AlexeySanko/pybuilder_semver_git_tag.git'), # pylint: disable=line-too-long
_Remotes(
'someotherremote',
'https://github.com/AlexeySanko/some_other_incorrect.git')
])))
def test_get_name_from_any_remote(self, mock_get_repo): # pylint: disable=unused-argument
"""Check that function correctly works with repositories without
origin remote"""
self.assertEqual(_get_repo_name(self.project, ''), 'pybuilder_semver_git_tag')
@patch("pybuilder_semver_git_tag._get_repo",
return_value=(_Repo()))
def test_get_name_from_no_remotes(self, mock_get_repo): # pylint: disable=unused-argument
"""Check that function correctly works with repositories with
origin remote"""
self.assertEqual(_get_repo_name(self.project, ''), 'basedir')
class GetRepoInfoTests(TestCase):
""" Test _get_repo_info function"""
def setUp(self):
self.project = Project("basedir")
self.logger = Mock()
@patch("pybuilder_semver_git_tag._get_repo",
return_value=_Repo(
head=_Head(
last_commit=_Commit("shaforlastcommit"),
prev_commits=[_Commit("shaforfirstcommit"),
_Commit("shaforsecondcommit"),
_Commit("shaforthirdcommit")]),
tags=[_Tag('tag1', _Commit("shaforfirstcommit")),
_Tag('tag4', _Commit("shaforlastcommit")),
_Tag('tagotherbranch',
_Commit("shaforcommitfromotherbranch"))],
is_dirty=True
))
def test_get_info_for_active_branch(self, mock_get_repo): # pylint: disable=unused-argument
"""Check that function correctly returns tags for active branch"""
tags, last_commit, repo_is_dirty = _get_repo_info('', None)
self.assertEqual(repo_is_dirty, True)
self.assertEqual(last_commit.hexsha, 'shaforlastcommit')
self.assertEqual(len(tags), 2)
for tag in tags:
self.assertTrue(tag.name in ['tag1', 'tag4'])
|
16,262 | b5a2f3a5c79ea59288df07e2be478eb46b3c368a | import allure
from common.basic_handler import Basic
@allure.feature("客户端用户相关接口")
class TestIndexContentController:
@allure.story("获取首页展示内容1")
def test_case1_get_home_page_content(self):
api_info = Basic().get_api_by_name("get_home_page_content")
content = Basic().send_request(**api_info)
assert content.status_code == 200
@allure.story("获取首页展示内容2")
def test_case2_get_home_page_content(self):
api_info = Basic().get_api_by_name("get_home_page_content2")
content = Basic().send_request(**api_info)
# print(content.text)
assert content.status_code == 200 |
16,263 | adfaf82ae077bab189d433374528e333960b816e | # https://leetcode.com/problems/binary-tree-zigzag-level-order-traversal/
from collections import deque
class Node:
# A utility function to create a new node
def __init__(self, key):
self.val = key
self.left = None
self.right = None
def levelOrder(root):
if root is None:
return []
ret=[]
result=deque()
queue=deque([root, None]) # added None after root, to create None delimited levels to tell we finished one level
is_left = True
while len(queue) > 0:
node = queue.popleft()
if node:
if is_left: result.append(node.val)
else: result.appendleft(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
else:
ret.append(result)
if len(queue) > 0:
queue.append(None) # add None to delimiate next level
result = deque()
is_left = not is_left
return ret
root = Node(3)
root.left = Node(9)
root.right = Node(20)
root.right.left = Node(15)
root.right.right = Node(7)
print("DFS traversal of binary tree is -")
print(levelOrder(root))
|
16,264 | 7a45b95d7c1327cabc07cb644416e41b4a296a4a | class GameState():
def tick(self, delta):
raise NotImplementedError("tick() is not implemented by its child")
def get_name(self):
raise NotImplementedError("get_name() is not implemented by its child")
|
16,265 | 66a8706a8a6a011ebfad9c709eb9eac9d315a7fe | #!/usr/bin/python3
# coding=utf-8
# pylint: disable=I0011,W1401,E0401,R0914,R0915,R0912,C0103
# Copyright 2019 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nmap XML parser
Original author: patriknordlen
Modified for Dusty 1.0 by: arozumenko
Ported to Dusty 2.0 by: LifeDJIK
"""
import elementpath
from defusedxml.cElementTree import parse
from dusty.tools import log, url, markdown
from dusty.models.finding import DastFinding
from dusty.models.error import Error
from dusty.constants import SEVERITIES
def parse_findings(output_file, scanner):
""" Parse findings (code from dusty 1.0) """
log.debug("Parsing findings")
nscan = parse(output_file)
root = nscan.getroot()
# Check validity
if "nmaprun" not in root.tag:
log.error("Exception during Nmap findings processing: invalid XML file")
error = Error(
tool=scanner.get_name(),
error=f"Exception during Nmap findings processing",
details=f"Output file doesn't seem to be a valid Nmap xml file."
)
scanner.errors.append(error)
return
dupes = dict()
hostInfo = ""
for host in root.iter("host"):
ip = host.find("address[@addrtype='ipv4']").attrib["addr"]
fqdn = None
if host.find("hostnames/hostname[@type='PTR']") is not None:
fqdn = host.find("hostnames/hostname[@type='PTR']").attrib["name"]
#
for os in root.iter("os"):
if ip is not None:
hostInfo += "IP Address: %s\n" % ip
if fqdn is not None:
fqdn += "FQDN: %s\n" % ip
for osv in os.iter("osmatch"):
if "name" in osv.attrib:
hostInfo += "Host OS: %s\n" % osv.attrib["name"]
if "accuracy" in osv.attrib:
hostInfo += "Accuracy: {0}%\n".format(osv.attrib["accuracy"])
hostInfo += "\n"
#
xpath_port_selector = "ports/port[state/@state='open']"
if scanner.config.get("include_unfiltered", False):
xpath_port_selector = "ports/port[state/@state=('open','unfiltered')]"
#
for portelem in elementpath.select(host, xpath_port_selector):
port = portelem.attrib["portid"]
protocol = portelem.attrib["protocol"]
#
title = f"Open port: {ip}:{port}/{protocol}"
description = hostInfo
description += f"Port: {port}\n"
serviceinfo = ""
#
if portelem.find("service") is not None:
if "product" in portelem.find("service").attrib:
serviceinfo += "Product: %s\n" % portelem.find("service").attrib["product"]
#
if "version" in portelem.find("service").attrib:
serviceinfo += "Version: %s\n" % portelem.find("service").attrib["version"]
#
if "extrainfo" in portelem.find("service").attrib:
serviceinfo += "Extra Info: %s\n" % portelem.find("service").attrib["extrainfo"]
#
description += serviceinfo
#
description += "\n\n"
#
dupe_key = f"{port}_{protocol}_{ip}"
if dupe_key in dupes:
find = dupes[dupe_key]
if description is not None:
find["description"] += description
else:
find = {
"title": title,
"description": description,
"endpoints": list()
}
find["endpoints"].append(f"{ip}:{port}/{protocol}")
dupes[dupe_key] = find
# Create finding objects
for item in dupes.values():
finding = DastFinding(
title=item["title"],
description=markdown.markdown_escape(item["description"])
)
finding.set_meta("tool", scanner.get_name())
finding.set_meta("severity", SEVERITIES[-1])
# Endpoints (for backwards compatibility)
endpoints = list()
for entry in item["endpoints"]:
endpoint = url.parse_url(entry)
if endpoint in endpoints:
continue
endpoints.append(endpoint)
finding.set_meta("endpoints", endpoints)
log.debug(f"Endpoints: {finding.get_meta('endpoints')}")
# Done
scanner.findings.append(finding)
|
16,266 | 9e8ecbcddcb360db5a9abd407762ddf04e8b7c2f | import string_ques
str1 = "aa bb cc"
str2 = "asodf"
str_func_obj = string_ques.StringFunctions(str1, str2)
print("Test for unique Characters in a string:")
print(str_func_obj.hasUniqueChars())
print("Test for permutation in two strings:")
print(str_func_obj.stringPermutation())
print("urilifying a string:")
print(str_func_obj.urlifyString())
print("PermutationPalindrome:")
print(str_func_obj.isPermutationPalindrome())
|
16,267 | da9d55487c82ff81edfc2288df32b71f38a29b7c | #!/usr/bin/python3
#coding: utf-8
# Este código pode ser otimizado
# Foi desenvolvido para correr em Linux
# Esta versão utilizando queues e threadpoolsexecutor ainda está em desenvolvimento
# Um dos desafios é fazer a barra de progresso para as threads em execução versus as terminadas
# Importar bibliotecas
import random, string, sys, os, time, datetime, zipfile, threading, concurrent.futures, psutil, logging, random
from tqdm import tqdm
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
#logging.basicConfig(level=logging.DEBUG,
# format='(%(threadName)-9s) %(message)s',)
#Função para extrair ficheiro
def extrair_ficheiro(ficheiro_zip,senha_cod,dirExtrair):
try:
#Executa a extração do ficheiro zip
ficheiro_zip.extractall(dirExtrair, pwd=senha_cod)
except Exception as e:
if "Bad password for file" in str(e):
pass
elif "Erro -3 enquanto unzip" in str(e):
pass
else:
pass
except KeyboardInterrupt:
print("Program terminado a pedido! Bye...")
sys.exit()
except:
pass
else:
print("[+] A senha encontrada é: " + senha_cod.decode())
print("[+] O conteúdo do ficheiro zip foi extraído para: " + dirExtrair)
tfinal = time.time()
tfinalStr = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(tfinal))
# Calcula o tempo de execução
ttotal = tfinal - tinicio
print("Inicio do brutal force ao ficheiro zip " + str(tinicioStr))
print("Fim do brutal force ao ficheiro zip: " + str(tfinalStr))
print(f"O brutal force foi executado em: {ttotal} seg")
current_system_pid = os.getpid()
ThisSystem = psutil.Process(current_system_pid)
ThisSystem.terminate()
exit(0)
# Definição do worker para multi threading
def worker():
while not queue.empty():
senha_cod = queue.get()
extrair_ficheiro(ficheiro_zip,senha_cod,dirExtrair)
# Definição da função de threading
def super_thread():
thread = threading.Thread(target=worker)
thread_list.append(thread)
if __name__ == '__main__':
# Declara variável
msg_ajuda = "\nUtilize: python3 -help para obter ajuda!"
queue = Queue()
thread_list = []
senhaencontrada = False
# Limpar ecrã
os.system('clear')
# ficheiro = "-help"
#if len(sys.argv) > 1 and sys.argv[1] == "-help":
# print("\nPara executar este programa digite: python3 " + sys.argv[0] + " <nome do ficheiro zip> <nome do ficheiro de senha>")
# print("\nExemplo: python3 " + sys.argv[0] +" ficheiro.zip dicionario.txt\n")
# sys.exit(0) # Encerra a execução do programa
#elif len(sys.argv) < 3:
# print("\nNúmero incorreto de argumentos!")
# print(msg_ajuda)
# sys.exit(0) # Encerra a execução do programa
# Adiciona argumentos as variáveis
#ficheiro = sys.argv[1]
#dicionario = sys.argv[2]
ficheiro = "private.zip"
dicionario = "rockyou2.txt"
#dicionario = "dicionario.txt"
# Verifica se os ficheiros existem
if os.path.exists(ficheiro) == False:
print("\nFicheiro zip " + ficheiro + " não encontrado")
print(msg_ajuda)
sys.exit(0) # Encerra a execução do programa
if os.path.exists(dicionario) == False:
print("\nFicheiro de senha " + dicionario + " não encontrado")
print(msg_ajuda)
os._exit(1)
sys.exit(0) # Encerra a execução do programa
#Função para gestão da diretoria onde será extraído o zip
#Cria uma diretoria para descompactar o ficheiro zip
#Se a diretoria exitir, renomeia e cria uma nova
dirExtrair = '/tmp/extrair'
direxiste = os.path.isdir(dirExtrair)
if direxiste == True:
modTimeDir = os.path.getmtime(dirExtrair)
newTime = time.strftime('%Y%m%d%H%M%S', time.localtime(modTimeDir))
newDir = dirExtrair + "-" + newTime
os.rename(dirExtrair,newDir)
os.mkdir(dirExtrair)
ficheiro_zip = zipfile.ZipFile(os.getcwd()+"/"+ficheiro)
n_palavras = len(list(open(dicionario, "rb")))
# Inicia o brutal force (regista inicio)
tinicio = time.time()
tinicioStr = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(tinicio))
senha = None
print("\nInicio do brutal force ao ficheiro zip " + str(tinicioStr))
print("\nA preparar execução em threads")
print("Total de senhas a testar e incluir na queue: " + str(n_palavras))
thread_lock = threading.Lock()
t_workers = 100
t_threads = 5000
with open(dicionario, "rb") as dicionario:
for senha in tqdm(dicionario, total=n_palavras, unit=" senha "):
senha_cod = senha.strip()
queue.put(senha_cod)
with concurrent.futures.ThreadPoolExecutor(max_workers=t_workers) as executor:
for t in range(t_threads):
executor.submit(super_thread)
# Gestão das threads
#for thread in thread_list:
# thread.start()
print("\nTotal de threads a executar: " + str(len(thread_list)))
# for thread in thread_list:
# thread.daemon = True
# thread.start()
for thread in tqdm(thread_list, total=len(thread_list), unit=" threads "):
thread.daemon = True
thread.start()
# main_thread = threading.current_thread()
# for t in threading.enumerate():
# if t is main_thread:
# continue
# logging.debug('joining %s', t.getName())
# t.join()
for thread in thread_list:
thread.join()
# Termina o brutal force
tfinal = time.time()
tfinalStr = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(tfinal))
# Calcula o tempo de execução
ttotal = tfinal - tinicio
#print("Threads em execucação: ")
#for thread in tqdm(thread_list, total=len(thread_list), unit=" threads "):
# thread.join()
print("[!] Senha não encontrada!")
print("\nFim do brutal force ao ficheiro zip: " + str(tfinalStr))
print(f"\nO brutal force foi executado em: {ttotal} seg")
|
16,268 | 3d80ea434da7042df04895f184ac513508491ed8 | # Generated by Django 3.0.3 on 2020-02-17 10:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('job', '0005_auto_20200215_0638'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='contact_email',
new_name='contact_person_email',
),
]
|
16,269 | 1c2b3c8250f90ce9875b1a063ed6315b0f28b31a | import wx
from ObjectListView import ObjectListView, ColumnDefn
from openpyxl import load_workbook
from app.controller.load_data_dialog import load_and_convert
class LoadDataDialog(wx.Dialog):
def __init__(self, parent, pathName='', **kwargs):
super(LoadDataDialog, self).__init__(parent, **kwargs)
self.pathName = pathName
self.records = []
self.columns = []
vsizer = wx.BoxSizer(wx.VERTICAL)
buttons = self.CreateButtonSizer(wx.CANCEL | wx.OK)
self.dataOlv = ObjectListView(self, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
vsizer.Add(self.dataOlv, 1, wx.EXPAND)
vsizer.Add(buttons)
self.SetSizer(vsizer)
self.Layout()
if self.pathName:
self.choose_worksheet()
def setData(self, data=None, columns=[]):
"""Update the ObjectListView widget's contents """
olv_columns = []
for column in columns:
olv_columns.append(ColumnDefn(column.title(), "left", 120, column.lower()))
self.dataOlv.SetColumns(olv_columns)
if len(data) > 20:
self.dataOlv.SetObjects(data[:20])
else:
self.dataOlv.SetObjects(data)
def choose_worksheet(self):
wb = load_workbook(self.pathName)
with wx.SingleChoiceDialog(self, '', 'Choose column', [ws.title for ws in wb.worksheets]) as dialog:
if dialog.ShowModal() == wx.ID_OK:
sheet = dialog.GetStringSelection()
self.records, self.columns = load_and_convert(self.pathName, sheet)
self.setData(self.records, self.columns)
|
16,270 | a8d8f308c3e52a8fc828db2e6806d8b10a965e5b | import wx
class MainFrame(wx.Frame):
def __init__(self,p,t):
wx.Frame.__init__(self,id=-1,parent=p,size=(260,300),title=t)
panel=wx.Panel(self,-1)
self.label1 = wx.StaticText(parent=panel,
id=-1,
size=(40, 58),
label=u"地点:",
pos=(10, 10))
self.list1 = wx.ListBox(parent=panel,
id=-1,
size=(100, 68),
pos=(60, 10),
style=wx.LB_EXTENDED,
choices=[u"北京", u"上海", u"广州", u"深圳"])
if __name__=='__main__':
app=wx.App(False)
frame=MainFrame(None,'CheckBox复选框')
frame.Show()
app.MainLoop()
|
16,271 | d6222732471966971044d991601167b2f68f4340 | import torch
import numpy as np
import torch.nn as nn
import math
from ...utils import center_utils, box_utils, common_utils
from ..model_utils import model_nms_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
def draw_heatmap(heatmap, boxes):
raise NotImplementedError
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
# gather的用法??!!
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
# feat : (B, H, W, C)
# ind : (B, K)
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _nms(heat, kernel=3):
pad = (kernel - 1)//2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad
)
keep = (hmax == heat).float()
return heat*keep
def _topk(scores, K=40):
batch_size, cat, height, width = scores.size()
# (batch_size, cat, K)
topk_scores, topk_inds = torch.topk(scores.view(batch_size, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
# (batch_size, K)
topk_score, topk_ind = torch.topk(topk_scores.view(batch_size, -1), K)
topk_clses = (topk_ind / K).int() # from 0 start
topk_inds = topk_inds.view(batch_size, -1)
topk_inds = topk_inds.gather(1, topk_ind).view(batch_size, K)
topk_ys = topk_ys.view(batch_size, -1)
topk_ys = topk_ys.gather(1, topk_ind).view(batch_size, K)
topk_xs = topk_xs.view(batch_size, -1)
topk_xs = topk_xs.gather(1, topk_ind).view(batch_size, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def _circle_nms(boxes, min_radius, post_max_size=83):
"""
NMS according to center distance
"""
keep = np.array(center_utils.circle_nms(boxes.cpu().numpy(), thresh=min_radius))[:post_max_size]
keep = torch.from_numpy(keep).long().to(boxes.device)
return keep
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)
return y
def gaussian_radius(det_size, min_overlap=0.5):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1, -n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
# 这个生成高斯heatmap也不是特别巧妙
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
radius_xy = np.array([radius, radius], dtype=np.float32)
return fusion_heatmap(heatmap, gaussian, center, radius_xy, k)
def fusion_heatmap(heatmap, part_heatmap, center, radius, k=1):
x, y = int(center[0]), int(center[1])
radius_x, radius_y = int(radius[0]), int(radius[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius_x), min(width - x, radius_x + 1)
top, bottom = min(y, radius_y), min(height - y, radius_y + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = part_heatmap[radius_y - top:radius_y + bottom, radius_x - left:radius_x + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
# np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
torch.max(masked_heatmap, torch.from_numpy(masked_gaussian * k).type_as(masked_heatmap), out=masked_heatmap)
return heatmap
def get_points_in_boxes3d(points, boxes3d):
"""
Args:
points: (num_points, 3 + C)
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
points, is_numpy = common_utils.check_numpy_to_torch(points)
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d)
points = points[point_masks.sum(dim=0) != 0]
return points.numpy() if is_numpy else points
class CenterHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training, voxel_size):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.class_names = class_names
self.grid_size = grid_size
self.point_cloud_range = point_cloud_range
self.predict_boxes_when_training = predict_boxes_when_training
self.voxel_size = voxel_size
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
self.forward_ret_dict = {}
def AssignLabel(self, gt_boxes_classes, spatial_points=None):
"""
Args:
gt_boxes_classes: (B, M, 8)
spatial_points: (B, 1, W, H)
Returns:
target_heatmap: (B, class_num, W, H)
anno_box: (B, max_obj, 8/..) # (offset_2, height_1, size_3, orientation_2/8)
{
mask = (batch_size, max_object)
ind = (batch_size, max_object)
cat = (batch_size, max_object)
}
"""
# some param need to read from yaml
feater_map_stride = int(self.model_cfg.TARGET_ASSIGNER_CONFIG.get('MAP_STRIDE', 1))
# Optional ['2sin_cos', '8...'] -> 8... not implement
encode_orientation_type = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('ORIENTATION_ENCODING_TYPE', '2sin_cos')
# Optional ['umich_gaussian', 'car_shape', 'points_count']
heatmap_type = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('HEATMAP_ENCODING_TYPE', 'umich_gaussian')
gaussian_overlap = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('GAUSS_OVERLAP', 0.1)
min_radius = int(self.model_cfg.TARGET_ASSIGNER_CONFIG.get('GAUSS_MIN_RADIUS', 2))
auxiliary_reg = self.model_cfg.get('USE_AUXILIARY_REG', None)
# here x->H, y->W
mapW = int(self.grid_size[1]/feater_map_stride)
mapH = int(self.grid_size[0]/feater_map_stride)
# print('mapH: %d, mapW:%d' % (mapH, mapW))
gt_boxes = gt_boxes_classes[:, :, :-1]
gt_classes = gt_boxes_classes[:, :, -1]
batch_size, max_object, obj_encode_num = gt_boxes.shape
# Target here
target_heatmap = torch.zeros(batch_size, self.num_class, mapW, mapH)
if auxiliary_reg == 'point_counts':
point_counts = torch.zeros(batch_size, self.num_class, mapW, mapH)
elif auxiliary_reg == 'corner_cls':
corner_cls = torch.zeros(batch_size, self.num_class, mapW, mapH)
if encode_orientation_type == '2sin_cos':
anno_box = torch.zeros(batch_size, max_object, 8) # (offset:2, height:1, size:3, orientation:2 = 8)
else:
raise NotImplementedError('NOT REALIZE ALGORITHM!!')
# torch.int64 is necessary for torch index !!
mask = torch.zeros(batch_size, max_object, dtype=torch.int64)
ind = torch.zeros(batch_size, max_object, dtype=torch.int64)
cat = torch.zeros(batch_size, max_object, dtype=torch.int)
example = {}
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.shape[0] - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1].int()
if spatial_points is not None:
cur_spatial_points = spatial_points[k:k+1, 0:1, :, :] # (W, H)
avg_m = nn.AvgPool2d(feater_map_stride, stride=feater_map_stride)
avg_out = avg_m(cur_spatial_points)
cur_spatial_points = avg_out[0, 0, :, :].cpu()
for i in range(cnt + 1):
obj_box = cur_gt[i]
obj_class = cur_gt_classes[i] - 1
centerx, centery, centerz, dx, dy, dz, rot = obj_box.cpu().tolist()
centerw = (centery - self.point_cloud_range[1]) / self.voxel_size[1] / feater_map_stride
centerh = (centerx - self.point_cloud_range[0]) / self.voxel_size[0] / feater_map_stride
centerw_int = int(centerw)
centerh_int = int(centerh)
# throw out not in range objects to avoid out of array area when gather
if not (0 <= centerw_int < mapW and 0 <= centerh_int < mapH):
continue
if heatmap_type == 'car_shape':
###########################
# just like AFDet
# code need to optimize ...
##########################
car_shape_w = int(dy / self.voxel_size[1] / feater_map_stride)
car_shape_h = int(dx / self.voxel_size[0] / feater_map_stride)
obj_heatmap = torch.zeros(2*car_shape_w + 1, 2*car_shape_h + 1)
for w in range(-car_shape_w, car_shape_w + 1):
for h in range(-car_shape_h, car_shape_h + 1):
distance = math.sqrt(math.pow(w, 2) + math.pow(h, 2))
temp_w = centerw_int + w
temp_h = centerh_int + h
if not (0 <= temp_w < mapW and 0 <= temp_h < mapH):
continue
if distance == 0:
obj_heatmap[w + car_shape_w, h + car_shape_h] = 1.0
elif distance == 1:
obj_heatmap[w + car_shape_w, h + car_shape_h] = 0.8
else:
obj_heatmap[w + car_shape_w, h + car_shape_h] = 1 / distance
ct = np.array([centerh_int, centerw_int], dtype=np.float32)
radius = np.array([car_shape_h, car_shape_w], dtype=np.float32)
fusion_heatmap(target_heatmap[k][obj_class], obj_heatmap.numpy(), ct, radius)
elif heatmap_type == 'car_shape_real':
car_shape_w = int(dy / self.voxel_size[1] / feater_map_stride)
car_shape_h = int(dx / self.voxel_size[0] / feater_map_stride)
project_box = np.array([[centerh_int, centerw_int, 0, car_shape_h, car_shape_w, 0, rot]])
max_radius = math.ceil(math.sqrt(car_shape_h*car_shape_h + car_shape_w*car_shape_w)/2)
project_points = []
for hh in range(-max_radius, max_radius + 1):
for ww in range(-max_radius, max_radius + 1):
project_points.append([hh + centerh_int, ww + centerw_int, 0])
project_points = np.array(project_points)
project_points = get_points_in_boxes3d(project_points, project_box)
for nnn in range(project_points.shape[0]):
temp_h = int(project_points[nnn, 0])
temp_w = int(project_points[nnn, 1])
distance = math.sqrt(math.pow(temp_h - centerh_int, 2) + math.pow(temp_w - centerw_int, 2))
if not (0 <= temp_w < mapW and 0 <= temp_h < mapH):
continue
if distance == 0:
target_heatmap[k][obj_class][temp_w, temp_h] = max(target_heatmap[k][obj_class][temp_w, temp_h], 1.0)
elif distance == 1:
target_heatmap[k][obj_class][temp_w, temp_h] = max(target_heatmap[k][obj_class][temp_w, temp_h], 0.8)
else:
target_heatmap[k][obj_class][temp_w, temp_h] = max(target_heatmap[k][obj_class][temp_w, temp_h], 1/distance)
elif heatmap_type == 'umich_gaussian':
###########################
# just like CenterPoint
# 计算高斯半径这是用的栅格化的l和w
##########################
radius = gaussian_radius((dy / self.voxel_size[1] / feater_map_stride, dx / self.voxel_size[0] / feater_map_stride),
min_overlap=gaussian_overlap)
radius = max(min_radius, int(radius))
ct = np.array([centerh_int, centerw_int], dtype=np.float32)
draw_umich_gaussian(target_heatmap[k][obj_class], ct, radius)
elif heatmap_type == 'points_count':
###########################
# I think .... (as OneNet, just want to verify)
# 根据点云的特征,如果回归的“中心点”是box所包含的栅格里点数最多的那个栅格,“特征中心点”
##########################
if spatial_points is not None:
car_shape_w = int(dy / self.voxel_size[1] / feater_map_stride)
car_shape_h = int(dx / self.voxel_size[0] / feater_map_stride)
left, right = min(centerh_int, car_shape_h), min(mapH - centerh_int, car_shape_h + 1)
top, bottom = min(centerw_int, car_shape_w), min(mapW - centerw_int, car_shape_w + 1)
masked_heatmap = target_heatmap[k][obj_class][centerw_int-top:centerw_int+bottom, centerh_int-left:centerh_int+right]
masked_gaussian = cur_spatial_points[centerw_int-top:centerw_int+bottom, centerh_int-left:centerh_int+right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0 and masked_gaussian.max() > 0: # TODO debug
# np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
top1_point, top1_ind = torch.topk(masked_gaussian.reshape(-1), 1)
centerw_int_temp = (top1_ind / (right + left) + centerw_int - top).int()
centerh_int_temp = (top1_ind % (bottom + top) + centerh_int - left).int()
centerw_int = centerw_int_temp
centerh_int = centerh_int_temp
masked_gaussian = masked_gaussian / masked_gaussian.max()
torch.max(masked_heatmap, masked_gaussian.type_as(masked_heatmap),
out=masked_heatmap)
else:
continue
else:
raise Exception
else:
raise NotImplementedError('NOT REALIZE ALGORITHM!!')
if auxiliary_reg == 'point_counts_v1':
car_shape_w = int(dy / self.voxel_size[1] / feater_map_stride)
car_shape_h = int(dx / self.voxel_size[0] / feater_map_stride)
project_box = np.array([[centerh_int, centerw_int, 0, car_shape_h, car_shape_w, 0, rot]])
max_radius = math.ceil(math.sqrt(car_shape_h * car_shape_h + car_shape_w * car_shape_w) / 2)
project_points = []
for hh in range(-max_radius, max_radius + 1):
for ww in range(-max_radius, max_radius + 1):
if not (0 <= ww + centerw_int < mapW and 0 <= hh + centerh_int < mapH):
continue
project_points.append([hh + centerh_int, ww + centerw_int, 0,
cur_spatial_points[ww + centerw_int, hh + centerh_int]])
project_points = np.array(project_points)
project_points = get_points_in_boxes3d(project_points, project_box)
cur_max_count = max(project_points[:, 3])
if cur_max_count == 0:
# point_counts[k][obj_class][centerw_int, centerh_int] = 1.0
continue
else:
for nnn in range(project_points.shape[0]):
temp_h = int(project_points[nnn, 0])
temp_w = int(project_points[nnn, 1])
point_count_soft = project_points[nnn, 3] / cur_max_count
if not (0 <= temp_w < mapW and 0 <= temp_h < mapH):
continue
point_counts[k][obj_class][temp_w, temp_h] = float(point_count_soft)
if auxiliary_reg == 'point_counts':
car_shape_w = int(dy / self.voxel_size[1] / feater_map_stride)
car_shape_h = int(dx / self.voxel_size[0] / feater_map_stride)
left, right = min(centerh_int, car_shape_h), min(mapH - centerh_int, car_shape_h + 1)
top, bottom = min(centerw_int, car_shape_w), min(mapW - centerw_int, car_shape_w + 1)
masked_pointcount = cur_spatial_points[centerw_int - top:centerw_int + bottom, centerh_int - left:centerh_int + right]
top1_point, top1_ind = torch.topk(masked_pointcount.reshape(-1), 1)
centerw_int_temp = (top1_ind / (right + left) + centerw_int - top).int()
centerh_int_temp = (top1_ind % (bottom + top) + centerh_int - left).int()
radius = gaussian_radius((dy / self.voxel_size[1] / feater_map_stride, dx / self.voxel_size[0] / feater_map_stride), min_overlap=gaussian_overlap)
radius = max(min_radius, int(radius))
ct = np.array([centerh_int_temp, centerw_int_temp], dtype=np.float32)
draw_umich_gaussian(point_counts[k][obj_class], ct, radius)
if auxiliary_reg == 'corner_cls':
car_shape_w = dy / self.voxel_size[1] / feater_map_stride
car_shape_h = dx / self.voxel_size[0] / feater_map_stride
project_box = torch.tensor([centerh, centerw, 0, car_shape_h, car_shape_w, 0, rot]).float()
corner_points = box_utils.boxes_to_corners_3d(project_box.unsqueeze(0))
corner_points = corner_points[0, 0:4, 0:2]
radius = gaussian_radius((dy / self.voxel_size[1] / feater_map_stride, dx / self.voxel_size[0] / feater_map_stride),
min_overlap=gaussian_overlap)
radius = max(min_radius, int(radius))
for co in range(4):
ct = np.array([corner_points[co, 0].int(), corner_points[co, 1].int()], dtype=np.float32)
draw_umich_gaussian(corner_cls[k][obj_class], ct, radius)
# here cls is start from 0, and our predict && gt is start from 1
cat[k][i] = obj_class
mask[k][i] = 1
# check is error 为了匹配后面的WxH,需要仔细对应的维度
ind[k][i] = centerw_int*mapH + centerh_int # 后面gather用
if encode_orientation_type == '2sin_cos':
anno_box[k][i] = anno_box.new_tensor([centerh - centerh_int, centerw - centerw_int, centerz,
dx, dy, dz, math.sin(rot), math.cos(rot)])
else:
raise NotImplementedError
example.update({'hm': target_heatmap.cuda(), 'anno_box': anno_box.cuda(), 'ind': ind.cuda(), 'mask': mask.cuda(), 'cat': cat.cuda()})
if auxiliary_reg == 'point_counts':
example.update({'point_counts': point_counts.cuda()})
if auxiliary_reg == 'corner_cls':
example.update({'corner_cls': corner_cls.cuda()})
return example
def get_hm_loss(self):
# 需要sigmoid, 不过这里clamp ?
# pred_hm = _sigmoid(self.forward_ret_dict['heatmap']) # (B, W, H, class_num)
pred_hm = self.forward_ret_dict['heatmap'] # (B, W, H, class_num)
gt_hm = self.forward_ret_dict['hm'] # (B, class_num, W, H)
gt_hm = gt_hm.permute(0, 2, 3, 1).contiguous()
hm_loss = center_utils.Center_FocalLoss(pred_hm, gt_hm)
# 对batch和obj_num的归一化在Center_FocalLoss里
hm_loss = hm_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['hm_weight']
tb_dict = {
'hm_loss': hm_loss.item()
}
return hm_loss, tb_dict
def get_offset_loss(self):
off_radius = self.model_cfg.TARGET_ASSIGNER_CONFIG.OFFSET_RADIUS
pred_offset = self.forward_ret_dict['offset'] # (B, W, H, 2)
gt_offset = self.forward_ret_dict['anno_box'][:, :, 0:2] # (B, max_obj, 2)
mask = self.forward_ret_dict['mask'] # (batch_size, max_object)
ind = self.forward_ret_dict['ind'] # (batch_size, max_object)
# print(ind.shape)
# print(ind.max())
# print(ind.min())
if off_radius == 0:
pred_offset = pred_offset.view(pred_offset.size(0), -1, pred_offset.size(-1))
dim = pred_offset.size(-1)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
pred_offset = pred_offset.gather(1, ind) # !!!!
# mask = mask.unsqueeze(2).expand_as(pred_offset)
# pred_offset = pred_offset[mask]
# pred_offset = pred_offset.view(-1, dim)
offset_loss = center_utils.Center_RegLoss(pred_offset, gt_offset, mask)
else:
raise NotImplementedError('should like afdet paper -> have radius')
offset_loss = offset_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['offset_weight']
# print(offset_loss)
tb_dict = {
'offset_loss': offset_loss.item()
}
return offset_loss, tb_dict
def get_height_loss(self, LOSS_FUNC=None):
pred_height = self.forward_ret_dict['height'] # (B, W, H, 2)
gt_height = self.forward_ret_dict['anno_box'][:, :, 2:3] # (B, max_obj, 2)
mask = self.forward_ret_dict['mask'] # (batch_size, max_object)
ind = self.forward_ret_dict['ind'] # (batch_size, max_object)
pred_height = pred_height.view(pred_height.size(0), -1, pred_height.size(-1))
dim = pred_height.size(-1)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
pred_height = pred_height.gather(1, ind) # !!!!
if LOSS_FUNC is None:
height_loss = center_utils.Center_RegLoss(pred_height, gt_height, mask)
else:
height_loss = LOSS_FUNC(pred_height, gt_height, mask)
height_loss = height_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['height_weight']
tb_dict = {
'height_loss': height_loss.item()
}
return height_loss, tb_dict
def get_size_loss(self, LOSS_FUNC=None):
pred_size = self.forward_ret_dict['size'] # (B, W, H, 2)
gt_size = self.forward_ret_dict['anno_box'][:, :, 3:6] # (B, max_obj, 2)
mask = self.forward_ret_dict['mask'] # (batch_size, max_object)
ind = self.forward_ret_dict['ind'] # (batch_size, max_object)
pred_size = pred_size.view(pred_size.size(0), -1, pred_size.size(-1))
dim = pred_size.size(-1)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
pred_size = pred_size.gather(1, ind) # !!!!
if LOSS_FUNC is None:
size_loss = center_utils.Center_RegLoss(pred_size, gt_size, mask)
else:
size_loss = LOSS_FUNC(pred_size, gt_size, mask)
size_loss = size_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['size_weight']
tb_dict = {
'size_loss': size_loss.item()
}
return size_loss, tb_dict
def get_orientation_loss(self):
orientation_encode_type = self.model_cfg.get('ORIENTATION_ENCODING_TYPE', '2sin_cos')
pred_orientation = self.forward_ret_dict['orientation'] # (B, W, H, 2)
mask = self.forward_ret_dict['mask'] # (batch_size, max_object)
ind = self.forward_ret_dict['ind'] # (batch_size, max_object)
pred_orientation = pred_orientation.view(pred_orientation.size(0), -1, pred_orientation.size(-1))
dim = pred_orientation.size(-1)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
pred_orientation = pred_orientation.gather(1, ind) # !!!!
if orientation_encode_type == '2sin_cos':
gt_orientation = self.forward_ret_dict['anno_box'][:, :, 6:8] # (B, max_obj, 2)
orientation_loss = center_utils.Center_RegLoss(pred_orientation, gt_orientation, mask)
else:
raise NotImplementedError('NOT REALIZE ALGORITHM!!')
orientation_loss = orientation_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['ori_weight']
tb_dict = {
'orientation_loss': orientation_loss.item()
}
return orientation_loss, tb_dict
def get_loss(self):
hm_loss, tb_dict = self.get_hm_loss()
offset_loss, tb_dict_offset = self.get_offset_loss()
if self.model_cfg['LOSS_CONFIG'].get('USE_BalancedL1Loss', False):
BLoss = center_utils.BalancedL1Loss(alpha=0.5, gamma=1.5)
height_loss, tb_dict_height = self.get_height_loss(LOSS_FUNC=BLoss)
size_loss, tb_dict_size = self.get_size_loss(LOSS_FUNC=BLoss)
else:
height_loss, tb_dict_height = self.get_height_loss()
size_loss, tb_dict_size = self.get_size_loss()
orientation_loss, tb_dict_orientation = self.get_orientation_loss()
tb_dict.update(tb_dict_offset)
tb_dict.update(tb_dict_height)
tb_dict.update(tb_dict_size)
tb_dict.update(tb_dict_orientation)
rpn_loss = hm_loss + offset_loss + height_loss + size_loss + orientation_loss
tb_dict['rpn_loss'] = rpn_loss.item()
return rpn_loss, tb_dict
def generate_predicted_boxes(self, pred_hm, pred_offset, pred_height, pred_size, pred_ori):
"""
Args:
Returns:
pred_dicts: (B, num_boxes, num_classes)
recall_dict: (B, num_boxes, 7+C)
"""
# 这里一般不会和上面计算loss同时运行(除非roi头),所以可以不考虑上面loss时对hm offset...的操作
# (就是tensor那些操作涉及的共享内存/梯度传播 问题)
orientation_encode_type = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('ORIENTATION_ENCODING_TYPE', '2sin_cos')
feater_map_stride = int(self.model_cfg.TARGET_ASSIGNER_CONFIG.get('MAP_STRIDE', 1))
use_maxpool = self.model_cfg.POST_CONFIG.get('USE_MAXPOOL', False)
use_circle_nms = self.model_cfg.POST_CONFIG.get('USE_CIRCLE_NMS', False)
use_iou_nms = self.model_cfg.POST_CONFIG.get('USE_IOU_NMS', False)
circle_nms_min_radius = self.model_cfg.POST_CONFIG.get('MIN_RADIUS', None)
max_per_img = self.model_cfg.POST_CONFIG.get('MAX_PRE_IMG', 500)
post_max_size = self.model_cfg.POST_CONFIG.get('MAX_POST', 83)
score_threshold = self.model_cfg.POST_CONFIG.get('SCORE_THRESHOLD', 0)
if not isinstance(pred_hm, list):
pred_hm = [pred_hm]
pred_offset = [pred_offset]
pred_height = [pred_height]
pred_size = [pred_size]
pred_ori = [pred_ori]
recall_dict = {}
pred_dicts = []
for idx in range(len(pred_hm)):
cur_pred_hm = pred_hm[idx]
cur_pred_offset = pred_offset[idx]
cur_pred_height = pred_height[idx]
cur_pred_size = pred_size[idx]
cur_pred_ori = pred_ori[idx]
batch_size = cur_pred_hm.size(0)
cur_pred_hm = cur_pred_hm.permute(0, 3, 1, 2).contiguous() # (B, class_num, W, H)
# maxpool_2D input size (N,C,H,W) , output (N, C, H_{out}, W_{out})
if use_maxpool:
cur_pred_hm = _nms(heat=cur_pred_hm, kernel=3)
topk_score, topk_inds, topk_clses, topk_ys, topk_xs = _topk(cur_pred_hm, max_per_img)
xs_key = topk_xs.view(batch_size, max_per_img, 1) * self.voxel_size[0] * feater_map_stride + self.point_cloud_range[0]
ys_key = topk_ys.view(batch_size, max_per_img, 1) * self.voxel_size[1] * feater_map_stride + self.point_cloud_range[1]
if cur_pred_offset is not None:
cur_pred_offset = _transpose_and_gather_feat(cur_pred_offset, topk_inds)
cur_pred_offset = cur_pred_offset.view(batch_size, max_per_img, 2)
# offset是直接加在这的,就是加在栅格坐标上
xs = topk_xs.view(batch_size, max_per_img, 1) + cur_pred_offset[:, :, 0:1]
ys = topk_ys.view(batch_size, max_per_img, 1) + cur_pred_offset[:, :, 1:2]
else:
xs = topk_xs.view(batch_size, max_per_img, 1) + 0.5
ys = topk_ys.view(batch_size, max_per_img, 1) + 0.5
if orientation_encode_type == '2sin_cos':
rots = _transpose_and_gather_feat(cur_pred_ori[:, :, :, 0:1], topk_inds)
rots = rots.view(batch_size, max_per_img, 1)
rotc = _transpose_and_gather_feat(cur_pred_ori[:, :, :, 1:2], topk_inds)
rotc = rotc.view(batch_size, max_per_img, 1)
rot = torch.atan2(rots, rotc)
else:
raise Exception('not code!')
height = _transpose_and_gather_feat(cur_pred_height, topk_inds)
height = height.view(batch_size, max_per_img, 1)
dim = _transpose_and_gather_feat(cur_pred_size, topk_inds)
dim = dim.view(batch_size, max_per_img, 3)
clses = topk_clses.view(batch_size, max_per_img)
scores = topk_score.view(batch_size, max_per_img)
xs = xs.view(batch_size, max_per_img, 1) * self.voxel_size[0] * feater_map_stride + self.point_cloud_range[0]
ys = ys.view(batch_size, max_per_img, 1) * self.voxel_size[1] * feater_map_stride + self.point_cloud_range[1]
# get final !!
final_box_preds = torch.cat(
[xs, ys, height, dim, rot], dim=2
)
final_keypoint_preds = torch.cat([xs_key, ys_key, height], dim=2)
final_scores = scores
final_class = clses
# max_pool_nms是在topk之前用;;;circle_nms是在topk后用
if score_threshold is not None:
thresh_mask = final_scores > score_threshold
for i in range(batch_size):
if score_threshold:
boxes3d = final_box_preds[i, thresh_mask[i]]
scores = final_scores[i, thresh_mask[i]]
labels = final_class[i, thresh_mask[i]]
keypoints = final_keypoint_preds[i, thresh_mask[i]]
else:
boxes3d = final_box_preds[i]
scores = final_scores[i]
labels = final_class[i]
keypoints = final_keypoint_preds[i]
if self.use_multihead:
cur_label_mapping = self.rpn_heads[idx].head_label_indices
labels = cur_label_mapping[labels.long()]
else:
labels = labels + 1
if use_circle_nms:
centers = boxes3d[:, [0, 1]]
boxes = torch.cat([centers, scores.view(-1, 1)], dim=1).detach()
keep = _circle_nms(boxes, min_radius=circle_nms_min_radius[idx], post_max_size=post_max_size)
boxes3d = boxes3d[keep]
scores = scores[keep]
labels = labels[keep]
keypoints = keypoints[keep]
elif use_iou_nms and self.model_cfg.POST_CONFIG.NMS_CONFIG.get('MULTI_CLASSES_NMS', False):
# use normal nms
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=scores, box_preds=boxes3d,
nms_config=self.model_cfg.POST_CONFIG.NMS_CONFIG,
score_thresh=None
)
boxes3d = boxes3d[selected]
scores = scores[selected]
labels = labels[selected]
keypoints = keypoints[selected]
record_dict = {
'pred_boxes': boxes3d,
'pred_scores': scores,
'pred_labels': labels,
'pred_keypoints': keypoints
}
if idx == 0:
pred_dicts.append(record_dict)
else:
pred_dicts[i]['pred_boxes'] = torch.cat([pred_dicts[i]['pred_boxes'], record_dict['pred_boxes']], dim=0)
pred_dicts[i]['pred_scores'] = torch.cat([pred_dicts[i]['pred_scores'], record_dict['pred_scores']], dim=0)
pred_dicts[i]['pred_labels'] = torch.cat([pred_dicts[i]['pred_labels'], record_dict['pred_labels']], dim=0)
pred_dicts[i]['pred_keypoints'] = torch.cat([pred_dicts[i]['pred_keypoints'], record_dict['pred_keypoints']], dim=0)
if use_iou_nms and not self.model_cfg.POST_CONFIG.NMS_CONFIG.get('MULTI_CLASSES_NMS', False):
# use normal nms
batch_size = pred_hm[0].size(0)
for i in range(batch_size):
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=pred_dicts[i]['pred_scores'], box_preds=pred_dicts[i]['pred_boxes'],
nms_config=self.model_cfg.POST_CONFIG.NMS_CONFIG,
score_thresh=None
)
pred_dicts[i]['pred_boxes'] = pred_dicts[i]['pred_boxes'][selected]
pred_dicts[i]['pred_scores'] = pred_dicts[i]['pred_scores'][selected]
pred_dicts[i]['pred_labels'] = pred_dicts[i]['pred_labels'][selected]
pred_dicts[i]['pred_keypoints'] = pred_dicts[i]['pred_keypoints'][selected]
return pred_dicts, recall_dict
def forward(self, **kwargs):
raise NotImplementedError
|
16,272 | 6af1c993dd32a788c625047f63f24905cecaf276 |
def rodCutting(price,totallength):
temp = [0]*(len(price)+1)
for i in range(1,len(price)+1):
for j in range(i,len(price)+1):
temp[j] = max(temp[j] , temp[j-i]+price[i-1])
print (temp)
return None
print(rodCutting([2,5,7,8],5)) |
16,273 | 685c427319b57eb546cffdc29f16430849249288 | from app import app
from flask import render_template, redirect, url_for, flash, request
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User, Post, Borda
from app import db
from app.forms import RegistrationForm, LoginForm, PostForm, BoardForm
from werkzeug.urls import url_parse
@app.route('/', methods=['GET', 'POST'])
def index():
all_boards = Borda.query.order_by(Borda.timestamp.desc())
form = BoardForm()
if form.validate_on_submit():
if form.validate_board(form.bord_name) is True:
flash(f"sorry, but {form.bord_name.data} already exists")
return redirect(url_for('index'))
new_bord = Borda(bord_name=form.bord_name.data, creator_name=current_user.username)
db.session.add(new_bord)
db.session.commit()
return redirect(f'/{form.bord_name.data}')
return render_template("index.html", title="Borda", all_boards=all_boards, form=form)
@app.route('/<bord_name>', methods=['GET', 'POST'])
def bord_page(bord_name):
form = BoardForm()
form_board = PostForm()
all_boards = Borda.query.order_by(Borda.timestamp.desc())
page = request.args.get('page', 1, type=int)
posts_on_page = Post.query.order_by(Post.timestamp.desc()).filter_by(bord_id=bord_name).paginate(page,
app.config['POSTS_PER_PAGE'], False)
borda = Borda.query.filter_by(bord_name=bord_name).first()
next_url = url_for("bord_page", bord_name=bord_name,
page=posts_on_page.next_num) if posts_on_page.has_next else None
prev_url = url_for("bord_page", bord_name=bord_name,
page=posts_on_page.prev_num) if posts_on_page.has_prev else None
if form_board.validate_on_submit():
post_content = form_board.postBody.data
new_post = Post(body=post_content, author=current_user.username, bord_id=bord_name)
db.session.add(new_post)
db.session.commit()
return redirect(f"/{bord_name}")
return render_template('bordPage.html', borda=borda, posts=posts_on_page.items, bord_name=bord_name,
form=form, form_board=form_board, all_boards=all_boards,
next_url=next_url, prev_url=prev_url, title=bord_name
)
@app.route('/boards/delete/<int:id>')
def delete_boards(id):
board_name = Borda.query.get_or_404(id)
all_post = Post.query.filter_by(bord_id=board_name.bord_name).all()
db.session.delete(board_name)
db.session.commit()
for posts in all_post:
db.session.delete(posts)
db.session.commit()
return redirect("/")
@app.route('/posts/delete/<int:id>')
def delete(id):
post = Post.query.get_or_404(id)
board_name = Post.query.filter_by(id=id).first()
db.session.delete(post)
db.session.commit()
return redirect(f"/{board_name.bord_id}")
@app.route('/posts/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
post = Post.query.get_or_404(id)
board_name = Post.query.filter_by(id=id).first()
flash("your post has been edited")
if request.method == 'POST':
post.body = request.form['content']
db.session.commit()
return redirect(f"/{board_name.bord_id}")
else:
return render_template('edit.html', post=post, title="EDIT")
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='registration', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
|
16,274 | 62d5059149e3373028d2bc0bc5ef478dfa9a70f5 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from collections import OrderedDict
import inspect
import re
from rest_framework.fields import empty, Field as SerializerField
from rest_framework.relations import RelatedField
from django_filters.filters import Filter as DjangoFilterField
from .documents import Document
from .resources import Action, Request, Resource, Response
from .structures import Field, Structure
from .types import get_raw_type
from .utils import (
get_accept_media_types,
get_allowed_methods,
get_filter_class,
get_filter_fields,
get_response_status,
get_serializer_class,
get_serializer_fields,
get_views,
is_api_view,
)
URL_PARAMETER_PATTERN = re.compile(r'{(?P<param>.+?)}')
def create_document_from_urlpatterns(urlpatterns, **kwargs):
document = Document(**kwargs)
document.resources = [
create_resource_from_api_view(url, view)
for url, view in get_views(urlpatterns).items()
if is_api_view(view)
]
return document
def create_resource_from_api_view(path, view):
document = inspect.getdoc(view.cls)
resource = Resource(
path=path,
document=document or '',
parameters=create_structure_from_path(path),
)
resource.actions = [
create_action_from_api_view(resource, method, view)
for method in get_allowed_methods(view.cls)
]
return resource
def create_action_from_api_view(resource, method, view):
document = inspect.getdoc(getattr(view.cls, method.lower()))
serializer_class = get_serializer_class(view.cls)
filter_class = get_filter_class(view.cls)
parameters = create_structure_from_filter_class(filter_class)
action = Action(
resource=resource,
method=method,
document=document or '',
parameters=parameters,
)
if method.lower() == 'get':
request_attribute = None
else:
request_attribute = create_structure_from_serializer_class(
serializer_class,
only_writable=True,
)
action.requests = [
Request(
action=action,
media_type=media_type,
attribute=request_attribute,
)
for media_type in get_accept_media_types(view.cls)
]
response_attribute = create_structure_from_serializer_class(
serializer_class,
only_readable=True,
)
action.responses = [
Response(
action=action,
media_type='application/json',
attribute=response_attribute,
status=get_response_status(method, view.cls),
),
]
return action
def create_structure_from_serializer_class(
serializer_class,
only_readable=False,
only_writable=False,
):
fields = create_fields_from_serializer_class(
serializer_class,
only_readable,
only_writable,
)
if len(fields) == 0:
return None
return Structure(fields=fields)
def create_structure_from_filter_class(filter_class):
fields = create_fields_from_filter_class(filter_class)
if len(fields) == 0:
return None
return Structure(fields=fields)
def create_structure_from_path(path):
params = URL_PARAMETER_PATTERN.findall(path)
if len(params) == 0:
return None
return Structure(fields=[
Field(name=param, type='number', required=True) # TODO: Resolve type
for param in params
])
# TODO: Hack me
def create_fields_from_serializer_class(
serializer_class,
only_readable=False,
only_writable=False,
):
if serializer_class is None:
return []
fields = get_serializer_fields(serializer_class)
if only_readable:
fields = OrderedDict([
(name, field)
for name, field in fields.items()
if field.read_only or not field.write_only
])
if only_writable:
fields = OrderedDict([
(name, field)
for name, field in fields.items()
if field.write_only or not field.read_only
])
if len(fields) == 0:
return []
fields = [
create_field_from_serializer_field(name, field)
for name, field in fields.items()
]
if only_readable:
for index, field in enumerate(fields):
fields[index].required = True
return fields
def create_fields_from_filter_class(filter_class):
if filter_class is None:
return []
fields = get_filter_fields(filter_class)
if len(fields) == 0:
return []
return [
create_field_from_filter_field(name, field)
for name, field in fields.items()
]
def create_field_from_serializer_field(name, field):
if not isinstance(field, SerializerField):
raise TypeError('The arguemnt is not rest_framework.fields.Field')
if not isinstance(field, RelatedField):
choices = getattr(field, 'choices', None)
else:
choices = None
default = field.default
if issubclass(default, empty) or not default:
default = ''
return Field(
name=name,
type=get_raw_type(field),
choices=choices,
label=field.label or '',
required=field.required,
default=default,
help_text=field.help_text or '',
)
def create_field_from_filter_field(name, field):
if not isinstance(field, DjangoFilterField):
raise TypeError('The arguemnt is not django_filters.filters.Filter')
# TODO: Ignore queryset choices
choices = getattr(field.field, 'choices', None)
help_text = field.field.help_text
if help_text == 'Filter' or not help_text:
help_text = ''
return Field(
name=name,
type=get_raw_type(field),
choices=choices,
label=field.label or '',
required=field.required,
default=field.field.initial,
help_text=help_text,
)
|
16,275 | ef51150fccd35ec4ce118aed1cf45acb8ee0ec0e | #!/oasis/scratch/csd181/mdburns/python/bin/python
# Copyright (C) 2012 Matthew Burns <mdburns@ucsd.edu>
from datetime import datetime
import helpers
import helpers.float_open as fop
from scipy import stats, zeros, ones, signal
from scipy.io import loadmat, savemat
import numpy as np
from numpy import array
import os
import sys
import pickle
import argparse
import base64
import gc
import multiprocessing as mp
from hadoop.io import SequenceFile, Text
NUMFOLDS = 5
NUM_SAMPLES = 0
SAMPLE_RATE = 0
NUM_EVENTS = 0
NUM_POINTS = 0
pool=None
def get_eeg(path, file_name):
print 'get_eeg: reading EEGLAB .set file '+ file_name
fullpath = path+file_name
try:
f = loadmat(fullpath+'.set', appendmat=False)
except:
print >> sys.stderr, 'get_eeg: could not load '+ file_name + '.set'
return 1
EEG = f['EEG']
events = {}
eeg = {}
label = []
latencyInFrame=[]
uniqueLabel=[]
event = EEG['event'][0][0][0]
gc.disable()
for t_event in event:
this_latency = str(t_event[0][0][0])
this_label = str(t_event[1][0][0])
latencyInFrame.append(this_latency)
label.append(this_label)
if this_label not in uniqueLabel:
uniqueLabel.append(this_label)
gc.enable()
uniqueLabel=[int(x) for x in uniqueLabel]
events['uniqueLabel'] = [str(x) for x in sorted(uniqueLabel)]
#-1 for Matlab indexing conversion
events['latencyInFrame'] = [(int(x)-1) for x in latencyInFrame]
events['label'] = label
eeg['events']=events
eeg['num_events']=len(events.keys())
eeg['sample_rate']=EEG['srate'][0][0][0][0]
eeg['num_samples']=EEG['pnts'][0][0][0][0]
eeg['num_channels']=EEG['nbchan'][0][0][0][0]
eeg['trials']=EEG['trials'][0][0][0][0]
eeg['ica_weights']=EEG['icaweights'][0][0]
eeg['ica_sphere']=EEG['icasphere'][0][0]
eeg['ica_winv=EEG']=['icawinv'][0][0]
eeg['file_name']=file_name
eeg['path']=path;
eeg['channel_locations']=EEG['chanlocs'][0][0]
eeg['prior_data_path']=EEG['data'][0][0][0]
return eeg
def find_artifact_indexes(eeg, data):
windowTimeLength = 200;# in ms.
windowFrameLength = int(round((eeg['sample_rate'] * windowTimeLength/1000)));
coefs = ones((windowFrameLength,))
threshold = 2.1
args=[data[:,i] for i in np.arange(data.shape[1])]
result = pool.map(tied_rank, args)
tdrnk = array(result)/data.shape[0]
twosidep = np.minimum(tdrnk, 1-tdrnk)
logliklihood = -np.log(twosidep)
meanLogLikelihood = np.mean(np.transpose(logliklihood),1)
windowFrame = np.arange((int(round(-windowFrameLength/2))),int(round((windowFrameLength/2)))).reshape((1,-1))
meanLogLikelihood = np.nan_to_num(meanLogLikelihood)
meanLogLikelihood[meanLogLikelihood > 1e20]=1e20
smoothMeanLogLikelihood = signal.filtfilt(coefs, array([1]), meanLogLikelihood)/(np.power(windowFrameLength,2))
isArtifactWindowCenter = np.where(smoothMeanLogLikelihood > threshold)[0].reshape((-1,1))
print 'clean indexes: number of artifact frames detected = %d' % len(isArtifactWindowCenter)
artifactFrames = np.tile(windowFrame, (isArtifactWindowCenter.shape[0], 1)) + np.tile(isArtifactWindowCenter, (1 , windowFrame.shape[0]))
artifactFrames = np.maximum(artifactFrames, 1)
artifactFrames = np.minimum(artifactFrames, meanLogLikelihood.shape[0])
artifactFrames = np.unique(artifactFrames[:])-1
return artifactFrames
def tied_rank(x):
"""
from: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/auc.py
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
"""
compile_data: input_str is the path to your imput files, with the character '?' inserted where you want to specify different values.
For instance: 'X:\RSVP\exp?\realtime\exp?_continuous_with_ica' with substitute = range(44,61) will process files
'X:\RSVP\exp44\realtime\exp44_continuous_with_ica.set' ... all the way to 60. The sequence files will be created in the outputpath and
automatically uploaded to hdfs_target_path in the HDFS file system if those are specified. Assumes you are running this on the head node.
"""
def compile_data(input_str, substitute, outputpath='', compression=False, test_file=False, p=None):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
if not p==None:
global pool
pool=p
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
eeg = get_eeg(path_to_data + os.sep, filename)
if eeg is not 1:
raw_data, ica_act = read_full_float(eeg)
else:
continue
if raw_data is None:
continue
print(filename + ': identifying outliers')
artifact_indexes = find_artifact_indexes(eeg, ica_act)
eeg['artifact_indexes'] = artifact_indexes;
f=open('..\\artifact_indexes', 'w')
pickle.dump(artifact_indexes,f)
f.close()
eegstr = pickle.dumps(eeg, protocol=2)
print(filename + ': compiling dataset into hadoop sequence file')
if outputpath is '':
outputpath = path_to_data;
#Enable compression if requested
if compression:
comp_type=SequenceFile.CompressionType.RECORD
else:
comp_type=SequenceFile.CompressionType.NONE
writer = SequenceFile.createWriter(outputpath + os.sep + filename + '.seq', Text, Text, compression_type=comp_type)
for i in range(raw_data.shape[1]):
if test_file and i > 3:
break
this_raw = np.ascontiguousarray(raw_data[:,i], dtype=raw_data.dtype)
this_ica = np.ascontiguousarray(ica_act[:,i], dtype=ica_act.dtype)
ica_key.set(outputpath + os.sep + filename + '.ica.' + str(i+1))
raw_key.set(outputpath + os.sep + filename + '.raw.' + str(i+1))
ica_temp = pickle.dumps((this_ica, eegstr), protocol=2)
raw_temp = pickle.dumps((this_raw, eegstr), protocol=2)
ica = base64.b64encode(ica_temp)
raw = base64.b64encode(raw_temp)
ica_val.set(ica)
raw_val.set(raw)
writer.append(raw_key, raw_val)
writer.append(ica_key, ica_val)
print(filename + ': '+str(i+1))
writer.close()
print filename + ': finished writing file'
return 0
def read_full_float(eeg):
print(eeg['file_name'] + ': reading full float file')
fn =eeg['path'] + eeg['file_name'] + '.fdt';
try:
f = fop.fopen(fn, 'r', 'l')
except:
print eeg['file_name']+': could not open ' + fn
return None, None
raw_data = f.read((eeg['num_samples'], eeg['num_channels']), 'float32')
f.close();
#Recompute ICA activations
print (eeg['file_name'] + ': recomputing ICA activations')
ica_act= np.transpose(np.float32(np.dot(np.dot(eeg['ica_weights'], eeg['ica_sphere']), np.transpose(raw_data))))
return raw_data, ica_act
def create_file_manifest(input_str, substitute, outputpath=''):
temp = input_str.rpartition(os.sep)
path_temp = temp[0]
file_temp = temp[2]
f=open(outputpath+os.sep+'manifest.txt','w')
if outputpath is not '':
try:
os.mkdir(outputpath)
except: pass
ica_key, ica_val, raw_key, raw_val = Text(), Text(), Text(), Text()
for i, v in enumerate(substitute):
path_to_data = path_temp.replace('?', str(v))
filename = file_temp.replace('?', str(v))
def hadoop2mat(directory):
result={}
for fl in os.listdir(directory):
if fl.split('-')[0]=='part':
current = os.path.join(directory, fl)
print current
if os.path.isfile(current):
f = open(current, 'rb')
result_str = f.read().strip('\n')
f.close()
if not result_str=='':
experiments = result_str.split('\n')
kvps = [exp.split('\t') for exp in experiments]
for kvp in kvps:
this_result = pickle.loads(base64.b64decode(kvp[1]))
path, name = kvp[0].rsplit('/', 1)
print name
result[name]=this_result
savemat(directory+os.sep+'result.mat', result)
def main():
parser = argparse.ArgumentParser(description='Recompile EEGLAB files into sequence files for Hadoop')
parser.add_argument('file_str', type=str)
parser.add_argument('range', type=int, nargs=2)
parser.add_argument('outputpath', type=str)
parser.add_argument('--hdfs_target_path', type=str,default='', dest='hdfs_path')
parser.add_argument('--compression', help='compression on',action='store_true')
parser.add_argument('--manifest', help='compile the output as a list of file locations',action='store_true')
parser.add_argument('--sequencefile', help='compile the output as a hadoop sequencefile for use with hdfs',action='store_true')
parser.add_argument('--testfile', help='compile a small sequencefile for testing (~10 channels)',action='store_true')
parser.add_argument('--hadoop2mat', help='collect hadoop output files into a single .mat file',action='store_true')
#b= ['X:\RSVP\exp?\\realtime\exp?_continuous_with_ica','54' ,'54', 'X:\RSVP\hadoop\\']
theseargs = parser.parse_args()
if theseargs.range[0] is theseargs.range[1]:
trange = [theseargs.range[0]]
else:
trange = range(theseargs.range[0] ,theseargs.range[1]+1)
global pool
pool = mp.Pool(mp.cpu_count()-1)
ts = datetime.now()
#Creates full sequencefiles
if theseargs.sequencefile:
print 'eeglab2hadoop: creating sequence file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression)
#Creates list of file locations to bypass hdfs
if theseargs.manifest:
print 'eeglab2hadoop: creating manifest'
create_file_manifest(theseargs.file_str, trange, theseargs.outputpath)
#Creates small sequencefile for testing purposes
if theseargs.testfile:
print 'eeglab2hadoop: creating test file'
compile_data(theseargs.file_str, trange, theseargs.outputpath, compression=theseargs.compression, test_file=True)
#Puts the files created by hadoop into a JSON string for Matlab
if theseargs.hadoop2mat:
print 'eeglab2hadoop: consolidating hadoop parts into result.mat'
hadoop2mat(theseargs.file_str)
c=datetime.now()-ts
print ' '
print 'eeglab2hadoop: Completed processing in ' + str(c.seconds) + ' seconds'
pool.close()
pool.join()
return 0
if __name__ == "__main__":
mp.freeze_support()
main() |
16,276 | 7819e5b3fc142645fa3bd545f44c70ee721b99f8 | from unittest import mock
from know_me.profile import serializers, views
@mock.patch("know_me.profile.views.DRYPermissions.has_permission")
@mock.patch(
"know_me.profile.views.permissions.HasListEntryListPermissions.has_permission" # noqa
)
def test_check_permissions(mock_list_permissions, mock_dry_permissions):
"""
The view should use the appropriate permissions checks.
"""
view = views.ListEntryListView()
view.check_permissions(None)
assert mock_dry_permissions.call_count == 1
assert mock_list_permissions.call_count == 1
def test_get_queryset(list_entry_factory, profile_item_factory):
"""
The view should only operate on profile items belonging to the
specified topic.
"""
item = profile_item_factory()
list_entry_factory(profile_item=item)
list_entry_factory()
view = views.ListEntryListView()
view.kwargs = {"pk": item.pk}
assert list(view.get_queryset()) == list(item.list_entries.all())
def test_get_serializer_class():
"""
Test the serializer class used by the view.
"""
view = views.ListEntryListView()
expected = serializers.ListEntrySerializer
assert view.get_serializer_class() == expected
def test_perform_create(profile_item_factory):
"""
Creating a new profile item with the view should associate the item
with the topic whose ID is given in the URL.
"""
item = profile_item_factory()
view = views.ListEntryListView()
view.kwargs = {"pk": item.pk}
serializer = mock.Mock(name="Mock ListEntrySerializer")
result = view.perform_create(serializer)
assert result == serializer.save.return_value
assert serializer.save.call_count == 1
assert serializer.save.call_args[1] == {"profile_item": item}
|
16,277 | f1b3fb2fb0cc380c16087991bec5b03d98de013a | #!/usr/bin/env python
from distutils.core import setup
setup(
name='provisor',
version='0.2',
packages=['provisor'],
scripts=[
'bin/provisor-server',
'bin/provisor-create',
'bin/provisor-config'
],
data_files=[('/etc',['provisor.conf'])],
author='Hashbang Team',
author_email='team@hashbang.sh',
license='GPL 3.0',
description='Server that provisions new users on a Linux system',
long_description=open('README.md').read(),
install_requires=[
'flask',
'python-ldap'
]
)
|
16,278 | 0e66979977c06ecddc6ea13c75073684c4a86a17 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import tensorflow as tf
# 读取图片
image_data = tf.gfile.FastGFile("data/dog3.jpg", 'br').read()
#
with tf.Session() as sess:
img_data = tf.image.decode_jpeg(image_data)
plt.imshow(img_data.eval())
plt.show()
# 将图片数据转换成实数类型
img_data = tf.image.convert_image_dtype(img_data, dtype=tf.float32)
# # 图像上下翻转
# fliped1 = tf.image.flip_up_down(img_data)
# plt.imshow(fliped1.eval())
# 左右翻转
fliped = tf.image.flip_left_right(img_data)
plt.imshow(fliped.eval())
# 沿着对角线翻转
# transposed = tf.image.transpose_image(img_data)
# plt.imshow(transposed.eval())
# # 以50%概率上下翻转
# fliped = tf.image.random_flip_up_down(img_data)
# plt.imshow(fliped.eval())
# # 以50%概率左右翻转
# fliped = tf.image.random_flip_left_right(img_data)
# plt.imshow(fliped.eval())
plt.show()
# 1.图像编码处理
# 读取图片
# image_data = tf.gfile.FastGFile("data/dog1.jpg",'br').read()
# # # print(image_data)
# with tf.Session() as sess:
# # 解码原图片并展示出来
# img_data = tf.image.decode_jpeg(image_data)
# plt.imshow(img_data.eval())
# plt.show()
# encoded_image = tf.image.encode_jpeg(img_data)
# # print(sess.run(encoded_image))
# with tf.gfile.GFile("data/output","wb") as f:
# f.write(encoded_image.eval())
# image_data = tf.gfile.GFile("data/output",'br').read()
# print(image_data)
# with tf.Session() as sess:
# img_data = tf.image.decode_jpeg(image_data)
# # print(img_data.eval())
# plt.imshow(img_data.eval())
# plt.show()
#
# 2.图像大小调整
# with tf.Session() as sess:
# img_data = tf.image.decode_jpeg(image_data)
# # 将图片数据转换成实数类型
# img_data = tf.image.convert_image_dtype(img_data, dtype=tf.float32)
# # 重新转换成300X900的图片,并展示出调整后的图片
# resized = tf.image.resize_images(img_data,[300,900],method=1)
# plt.imshow(resized.eval())
# plt.show()
#1920X1200
# with tf.Session() as sess:
# img_data = tf.image.decode_jpeg(image_data)
# img_data = tf.image.convert_image_dtype(img_data, dtype=tf.float32)
# # resized = tf.image.resize_images(img_data, [300, 900], method=1)
# # plt.imshow(resized.eval())
# # croped = tf.image.resize_image_with_crop_or_pad(img_data, 1000, 1000)
# # plt.imshow(croped.eval())
# # padded = tf.image.resize_image_with_crop_or_pad(img_data,3000,3000)
# # plt.imshow(padded.eval())
# central_croped = tf.image.central_crop(img_data,0.8)
# plt.imshow(central_croped.eval())
# plt.show()
# 3.图像翻转
# with tf.Session() as sess:
# img_data = tf.image.decode_jpeg(image_data)
# img_data = tf.image.convert_image_dtype(img_data, dtype=tf.float32)
# fliped1 = tf.image.flip_up_down(img_data)
# plt.imshow(fliped1.eval())
# fliped = tf.image.flip_left_right(img_data)
# plt.imshow(fliped.eval())
# transposed = tf.image.transpose_image(img_data)
# plt.imshow(transposed.eval())
# fliped = tf.image.random_flip_up_down(img_data)
# plt.imshow(fliped.eval())
# fliped = tf.image.random_flip_left_right(img_data)
# plt.imshow(fliped.eval())
# plt.show()
# 4.图像色彩调整
# with tf.Session() as sess:
# img_data = tf.image.decode_jpeg(image_data)
# img_data = tf.image.convert_image_dtype(img_data, dtype=tf.float32)
# # 亮度
# # adjusted = tf.image.adjust_brightness(img_data,0.5)
# # adjusted = tf.image.adjust_brightness(img_data, -0.5)
# # adjusted = tf.clip_by_value(adjusted,0.0,1.0)
# # adjusted = tf.image.random_brightness(img_data,0.5)
# #
# # 对比度
# # adjusted = tf.image.adjust_contrast(img_data,0.5)
# # adjusted = tf.image.adjust_contrast(img_data, 5)
# # adjusted = tf.image.random_contrast(img_data,0.1,5)
# # 色相
# # adjusted = tf.image.adjust_hue(img_data,0.3)
# # adjusted = tf.image.adjust_hue(img_data, 0.1)
# # adjusted = tf.image.adjust_hue(img_data, 0.9)
# # adjusted = tf.image.adjust_hue(img_data, 0.6)
# # adjusted = tf.image.random_hue(img_data,0.5)
# # 饱和度
# # adjusted = tf.image.adjust_saturation(img_data,-5)
# # adjusted = tf.image.adjust_saturation(img_data, 5)
# # adjusted = tf.image.random_saturation(img_data,2,5)
# # 图像标准化 均值为0 方差变为1
# adjusted = tf.image.per_image_standardization(img_data)
# plt.imshow(adjusted.eval())
# plt.show()
# 5.处理标注窗
#
# with tf.Session() as sess:
# img_data = tf.image.decode_jpeg(image_data)
# img_data = tf.image.convert_image_dtype(img_data, dtype=tf.float32)
# # 将图片缩小一些,这样可视化能让标注框更加清楚
# img_data = tf.image.resize_images(img_data,[180,267],method=1)
# batched = tf.expand_dims(tf.image.convert_image_dtype(img_data,tf.float32),0)
# boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]])
# # result = tf.image.draw_bounding_boxes(batched,boxes=boxes)
# # plt.imshow(result[0].eval())
# # print(result)
# # 随机截取图片
# begin,size,bbox_for_draw = tf.image.sample_distorted_bounding_box(tf.shape(img_data),
# bounding_boxes=boxes,
# min_object_covered=0.4)
# batched = tf.expand_dims(tf.image.convert_image_dtype(img_data,tf.float32),0)
# image_with_box = tf.image.draw_bounding_boxes(batched,bbox_for_draw)
# distored_image = tf.slice(img_data,begin,size=size)
# plt.imshow(distored_image.eval())
# plt.show()
|
16,279 | a4b16f65894d994815628fbfd146115e32c18c2d | from serializer import UserSerializer
class NeonUserMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
request.user.user_json = UserSerializer(request.user,many=False).data
return None |
16,280 | 8eefafb3c00af6ab0d6d97727f562131bfb1ffe0 | import matplotlib.pyplot as plt
from matplotlib import pylab
import numpy as np
import json
APRCH_PATH = './results/approach_test/'
CWT_PATH = './results/context_window_test/'
# Training Output Parsing
def parse_approach_test_results(filenames):
'''
Parses results from an architecture test given result filenames.
Expects first level of results to contain values.
'''
results = {}
for fn in filenames:
f = open(APRCH_PATH + fn)
fj = json.load(f)
# Get all data
vloss = fj['val_loss']
vacc = fj['val_acc']
loss = fj['loss']
acc = fj['acc']
vprec = np.stack(fj['val_precision'])
vrec = np.stack(fj['val_recall'])
results[fn] = (vloss, vacc, loss, acc, vprec, vrec)
f.close()
return results
def parse_param_test_results(filenames, bot, top, step=1):
'''
Parses `json` results where first level is param number and second level is values.
'''
results = {}
for fn in filenames:
f = open(CWT_PATH + fn)
fj = json.load(f)
for i in range(bot, top, step):
# Get all data
vloss = fj[str(i)]['val_loss']
vacc = fj[str(i)]['val_acc']
loss = fj[str(i)]['loss']
acc = fj[str(i)]['acc']
vprec = np.stack(fj[str(i)]['val_precision'])
vrec = np.stack(fj[str(i)]['val_recall'])
results[fn + str(i)] = (vloss, vacc, loss, acc, vprec, vrec)
f.close()
return results
def pull_approach_results(results, param_name, filenames):
'''
Returns list of values from architecture test results for given `param_name`.
Expects `results` to be from `parse_approach_test_results`.
'''
i = ('vloss', 'vacc', 'loss', 'acc', 'vprec', 'vrec').index(param_name)
res = []
for fn in filenames:
res.append(results[fn][i])
return res
def pull_param_test_results(results, param_name, val_f):
'''
Returns list of values from parameter testing results for given `param_name`.
Expects `results` to be from `parse_param_test_results`.
'''
i = ('vloss', 'vacc', 'loss', 'acc', 'vprec', 'vrec').index(param_name)
res = []
for _, v in results.items():
res.append(val_f(v[i]))
return res
# Graphing Functions
def graph_approach_results(results, labels, y_label, title, x_range=(0,500)):
x = np.arange(x_range[0], x_range[1])
j = x_range[1]
for y, l in zip(results, labels):
plt.plot(x, y[:j], label=l)
plt.title(title)
plt.xlabel('Epoch')
plt.ylabel(y_label)
plt.legend(labels)
plt.show()
def graph_approach_custom_results(results, labels, y_label, title, x_range=(0,500)):
x = np.arange(x_range[0], x_range[1])
j = x_range[1]
legend_labels = []
for y, l in zip(results, labels):
y_arr = np.array(y[:j])
for i in range(y_arr.shape[1]):
plt.plot(x, y_arr[:,i], label=l + ' Class {}'.format(i))
legend_labels.append(l + ' Class {}'.format(i))
plt.title(title)
plt.xlabel('Epoch')
plt.ylabel(y_label)
plt.legend(legend_labels)
plt.show()
def graph_param_test_results(results, labels, y_label, title, param_name):
h = int(len(results) / 2)
x = np.arange(0, h)
plt.plot(x, results[:h], label=labels[0])
plt.plot(x, results[h:], label=labels[1])
plt.title(title)
plt.xlabel(param_name)
plt.ylabel(y_label)
plt.legend(labels)
plt.show()
# Graphing and Visualization
def plot_keras_history(history, filename):
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig(filename)
def plot_keras_(history, filename, metric_desc, num_classes):
pass
#TODO plot each label as a line on the plot (so num_classes == lines on plot)
# https://stackoverflow.com/questions/24120023/strange-error-with-matplotlib-axes-labels
aprch_filenames = [
'binary-simple_lstm.json',
'binary-three_layer_cnn.json',
'combined-simple_lstm.json',
'combined-three_layer_cnn.json',
'sentiment-simple_lstm.json',
'sentiment-three_layer_cnn.json'
]
aprch_labels = [
'Binary Simple LSTM',
'Binary Three Layer CNN',
'Combined Simple LSTM',
'Combined Three Layer CNN',
'Sentiment Simple LSTM',
'Sentiment Three Layer CNN'
]
cwt_filenames = [
'binary-three_layer_cnn.json',
'combined-three_layer_cnn.json'
]
cwt_labels = [
'Binary Three Layer CNN',
'Combined Three Layer CNN'
]
if __name__ == '__main__':
# For Fall 2018 Tests
a_res = parse_approach_test_results(aprch_filenames)
vloss_a_res = pull_approach_results(a_res, 'vloss', aprch_filenames)
graph_approach_results(vloss_a_res, aprch_labels, 'Loss', 'Validation Loss over Training Epochs')
cwt_res = parse_param_test_results(cwt_filenames, 0, 3)
vloss_cwt_res = pull_param_test_results(cwt_res, 'vloss', min)
graph_param_test_results(vloss_cwt_res, cwt_labels, 'Loss', 'Min Loss over 500 Epochs for Context Window Size', 'Window Size') |
16,281 | f0b2366aaf1908d0ddfbe0e8ef083bafd5b33d8f | # -*- coding: utf8 -*-
# ============LICENSE_START=======================================================
# org.onap.vvp/validation-scripts
# ===================================================================
# Copyright © 2019 AT&T Intellectual Property. All rights reserved.
# ===================================================================
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the "License");
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the "License");
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
#
import re
import pytest
from six import string_types
from tests.helpers import validates, get_environment_pair
from tests.structures import Heat
@validates("R-86476")
def test_vm_role_hardcoded(yaml_file):
"""
Validate vm_role value when hardcoded in the template
"""
heat = Heat(filepath=yaml_file)
servers = heat.get_resource_by_type("OS::Nova::Server")
errors = []
for r_id, server in servers.items():
props = server.get("properties") or {}
metadata = props.get("metadata") or {}
if "vm_role" not in metadata:
continue
vm_role_value = metadata["vm_role"]
if isinstance(vm_role_value, dict):
continue # Likely using get_param - validate separately
if not re.match(r"^\w+$", vm_role_value):
errors.append(
"OS::Nova::Server {} vm_role = {}".format(r_id, vm_role_value)
)
msg = (
"vm_role's value must only contain alphanumerics and underscores. "
+ "Invalid vm_role's detected: "
+ ". ".join(errors)
)
assert not errors, msg
@validates("R-86476")
def test_vm_role_from_env_file(yaml_file):
"""
Validate vm_role when using parameters and env file
"""
pair = get_environment_pair(yaml_file)
if not pair:
pytest.skip("Unable to resolve environment pair")
template_params = pair["yyml"].get("parameters") or {}
env_params = pair["eyml"].get("parameters") or {}
if "vm_role" not in template_params:
pytest.skip("vm_role not in parameters")
if "vm_role" not in env_params:
pytest.skip("vm_role not in environment file. Error checked elsewhere")
vm_role = env_params.get("vm_role", "")
if not isinstance(vm_role, string_types):
vm_role = str(vm_role)
msg = "vm_role {} contains non-alphanumeric or non-underscore characters".format(
vm_role
)
assert re.match(r"^\w+$", vm_role), msg
|
16,282 | d000679ddecf863cc24a1835e1ea5ab84ef4263a | print("Assalamualaikum")
print("Ayo semangat") |
16,283 | aafa99eb7742165ee4a64897e5d9cf2e597cde56 |
import requests
import os
response = requests.get('https://api.github.com/users/<repo_name>/repos').json() #####getting the whole response containing every detail of all repos
for repo in response:
url=str(repo['html_url'])######using this to parse the html url of therepo
cmd='git clone '+url ####adding it to the git clone cmd
os.system(cmd)########## executing the command
grepcmd='grep -r <pattern>'
os.system(grepcmd)
|
16,284 | aa484cae880de326e852c5bb292bf030325955e2 | from collections import OrderedDict
class Transacao:
def __init__(self, remetente, destinatario, valor):
self.remetente = remetente
self.destinatario = destinatario
self.valor = valor
def dict_ordenado(self):
return OrderedDict([('remetente', self.remetente),('destinatario', self.destinatario), ('valor', self.valor)]) |
16,285 | b116a26312998b52c7a095e681e5aa396c687088 | import collections
import heapq
import re
ipset = set()
prodcount = collections.defaultdict(int)
ipv4_pattern = re.compile("^(([01]?[0-9][0-9]?|25[0-5]|2[0-4][0-9]|)\\.){3}([01]?[0-9][0-9]?|25[0-5]|2[0-4][0-9]|)$")
file = open('weblog.txt', 'r')
for line in file:
words = line.split()
for word in words:
if ipv4_pattern.match(word):
print("valid IP:" +word)
|
16,286 | 5513ca4cbd3d9e5ea3ad3c2e84862ecfb9327e07 | # Chapter 7 - Reading Files
# OPENING A FILE
# before we can read the contents of the file, we must tell Python which file we are going to work with and what we will be doing with the file
# this is done with the 'open()' function
# open() returns a "file handle" - a variable used to perform operations on the file
# Similar to "file -> Open" in a Word Processor
# USING OPEN
# handle = open(filename, mode)
# fhand = open('mbox.txt', 'r')
# returns a handle use to manipulate the file
# filename is a string
# mode is option and should be 'r' if we are planning to read the file and 'w' if we want to write the file
fhand = open('file/self-driving-car')
print(fhand)
# New line Characters
# we use a special character called "newline" to indicate when a line ends
# we represent it as '\n' in strings
# 'Newline' is still a open character not two
stuff = 'Hello\nWorld!'
print(stuff)
stuff = '\nHello\nWorld!'
print(stuff)
stuff = 'X\nY\n'
print(stuff)
len(stuff)
# READING - file Handle as a Sequence
# a file handle open for read can be treated as a sequence of strings where each line in the file is a string in the sequence
# we can use the for statement to iterate through a sequence
# Remember - a sequence is an ordered set
xfile = open('file/self-driving-car')
for line in xfile:
print(line)
# COUNTING LINES in a file
# open a file read-only
# use a 'for' loop to read each line
# count the lines and print out the number of lines
fhand = open('file/self-driving-car')
count = 0
for line in fhand:
count = count + 1
print('Line Count:', count)
# Reading the 'WHOLE' file
# we can read the whole file (newlines and all) into a 'Single String'
fhand = open('file/self-driving-car')
inp = fhand.read()
print(len(inp))
print(inp[:18])
# SEARCHING THOROUGH A FILE
# we can put an if statement in our for loop to ony print lines that meet some criteria
fhand = open('file/self-driving-car')
for line in fhand:
if line.startswith('Our'):
print(line)
# the problem will be - 'each line from the file will have a newline at the end'
# also the 'print statement will add a newline to each line'
# so there will be two '/n' including a blank line
# SEARCHING THOROUGH A FILE (fixed) - newline problem fixed
fhand = open('file/self-driving-car')
for line in fhand:
line = line.rstrip()
if line.startswith('Our'):
print(line)
# SKIPPING WITH CONTINUE
# we can conveniently skip a line by using the continue statement
fhand = open('file/self-driving-car')
for line in fhand:
line = line.rstrip()
if not line.startswith('From:'):
continue
print(line)
# USING IN TO SELECT LINES
# we can look for a string anywhere in a line as our selection criteria
fhand = open('file/self-driving-car')
for line in fhand:
line = line.rstrip()
if not 'From' in line:
continue
print(line)
# Prompt for File Name
fname = input('Enter the file name: ')
fhand = open(fname)
count = 0
for line in fhand:
if line.startswith('Subject:'):
count = count + 1
print('There were', count, 'subject lines in', fname)
# BAD FILE NAMES
fname = input('Enter the file name: ')
try:
fhand = open(fname)
except:
print('File can not be opened:', fname)
quit()
count = 0
for line in fhand:
if line.startswith('Subject:'):
count = count + 1
print('There were', count, 'subject lines in', fname)
|
16,287 | 2f427aa8740845d173872f21f9d9a36f9b03220f | from django.db import models
from accounts.models import Profile
# Create your models here.
class ContactMessage(models.Model):
sender = models.ForeignKey(Profile, on_delete=models.DO_NOTHING)
subject = models.CharField(max_length=200)
email = models.EmailField()
body = models.TextField()
def __str__(self):
return 'From: '+self.sender.user.username+' Title: '+self.subject |
16,288 | b871b2019cfe1d984dbb9f27240c1bf84d16b406 | """
Unit and regression test for the openff_nagl_models package.
"""
import glob
import os
from pkg_resources import resource_filename
import pytest
from openff.nagl_models import validate_nagl_model_path, list_available_nagl_models
def find_model_files():
pattern = resource_filename('openff.nagl_models', 'models/*.pt')
filenames = sorted([os.path.abspath(path) for path in glob.glob(pattern)])
assert len(filenames) > 0
return filenames
@pytest.mark.parametrize("model_name", find_model_files())
def test_validate_nagl_model_path(model_name):
"""Test that we can find files."""
model_path = validate_nagl_model_path(model_name)
assert os.path.exists(model_path)
def test_validate_nagl_model_path_failed():
"""Test that we cannot find false files."""
with pytest.raises(FileNotFoundError):
validate_nagl_model_path("does-not-exist.pt")
def test_local_validation(tmpdir):
"""Test that we can find local files."""
with tmpdir.as_cwd():
with pytest.raises(FileNotFoundError):
validate_nagl_model_path("test.pt")
with open("test.pt", "w") as f:
f.write("test")
model_path = validate_nagl_model_path("test.pt")
assert os.path.exists(model_path)
def test_list_models():
"""Test that we can list models."""
model_names = find_model_files()
listed_model_names = list_available_nagl_models()
assert listed_model_names == model_names
def test_entry_points():
from pkg_resources import iter_entry_points
for entry_point in iter_entry_points(group='openforcefield.nagl_model_directory'):
paths = entry_point.load()()
for path in paths:
assert os.path.exists(path)
|
16,289 | 3bca927a2478c9c9fd458b72b80dc0f7a4551f58 | import sys
_module = sys.modules[__name__]
del sys
main = _module
src = _module
client = _module
models = _module
server = _module
utils = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import time
import logging
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import torch
from torch.utils.data import DataLoader
import numpy as np
import torch.nn.functional as F
import copy
from collections import OrderedDict
import torch.nn.init as init
from torch.utils.data import Dataset
from torch.utils.data import TensorDataset
from torch.utils.data import ConcatDataset
from torchvision import datasets
from torchvision import transforms
class TwoNN(nn.Module):
def __init__(self, name, in_features, num_hiddens, num_classes):
super(TwoNN, self).__init__()
self.name = name
self.activation = nn.ReLU(True)
self.fc1 = nn.Linear(in_features=in_features, out_features=num_hiddens, bias=True)
self.fc2 = nn.Linear(in_features=num_hiddens, out_features=num_hiddens, bias=True)
self.fc3 = nn.Linear(in_features=num_hiddens, out_features=num_classes, bias=True)
def forward(self, x):
if x.ndim == 4:
x = x.view(x.size(0), -1)
x = self.activation(self.fc1(x))
x = self.activation(self.fc2(x))
x = self.fc3(x)
return x
class CNN(nn.Module):
def __init__(self, name, in_channels, hidden_channels, num_hiddens, num_classes):
super(CNN, self).__init__()
self.name = name
self.activation = nn.ReLU(True)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=hidden_channels, kernel_size=(5, 5), padding=1, stride=1, bias=False)
self.conv2 = nn.Conv2d(in_channels=hidden_channels, out_channels=hidden_channels * 2, kernel_size=(5, 5), padding=1, stride=1, bias=False)
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), padding=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), padding=1)
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(in_features=hidden_channels * 2 * (7 * 7), out_features=num_hiddens, bias=False)
self.fc2 = nn.Linear(in_features=num_hiddens, out_features=num_classes, bias=False)
def forward(self, x):
x = self.activation(self.conv1(x))
x = self.maxpool1(x)
x = self.activation(self.conv2(x))
x = self.maxpool2(x)
x = self.flatten(x)
x = self.activation(self.fc1(x))
x = self.fc2(x)
return x
class CNN2(nn.Module):
def __init__(self, name, in_channels, hidden_channels, num_hiddens, num_classes):
super(CNN2, self).__init__()
self.name = name
self.activation = nn.ReLU(True)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=hidden_channels, kernel_size=(5, 5), padding=1, stride=1, bias=False)
self.conv2 = nn.Conv2d(in_channels=hidden_channels, out_channels=hidden_channels * 2, kernel_size=(5, 5), padding=1, stride=1, bias=False)
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), padding=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), padding=1)
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(in_features=hidden_channels * 2 * (8 * 8), out_features=num_hiddens, bias=False)
self.fc2 = nn.Linear(in_features=num_hiddens, out_features=num_classes, bias=False)
def forward(self, x):
x = self.activation(self.conv1(x))
x = self.maxpool1(x)
x = self.activation(self.conv2(x))
x = self.maxpool2(x)
x = self.flatten(x)
x = self.activation(self.fc1(x))
x = self.fc2(x)
return x
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(TwoNN,
lambda: ([], {'name': 4, 'in_features': 4, 'num_hiddens': 4, 'num_classes': 4}),
lambda: ([torch.rand([4, 4])], {}),
True),
]
class Test_vaseline555_Federated_Averaging_PyTorch(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
|
16,290 | 1ebadfcec17b6f68f82dc4f97449486dffde4151 | from setuptools import setup,find_packages
setup(name= "pixellib",
version='0.6.6',
description='PixelLib is a library used for easy implementation of semantic and instance segmentation of objects in images and videos with few lines of code.PixelLib makes it possible to train a custom segmentation model using few lines of code.PixelLib supports background editing of images and videos using few lines of code. ',
url="https://github.com/ayoolaolafenwa/PixelLib",
author='Ayoola Olafenwa',
license='MIT',
packages= find_packages(),
install_requires=['pillow','scikit-image','opencv-python','matplotlib','imgaug', 'labelme2coco', 'imantics'],
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
) |
16,291 | 0f24ef96cf82ffafee278f4c5a4cf252d34090da | from django import forms
class PoetryForm(forms.Form):
poetry_seed = forms.CharField(label='Poetry seed')
n_words = forms.IntegerField() |
16,292 | 5562d872961516fdf036ea0118ba070298b64f00 | """
airPy is a flight controller based on pyboard and written in micropython.
The MIT License (MIT)
Copyright (c) 2016 Fabrizio Scimia, fabrizio.scimia@gmail.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# boot.py -- run on boot-up
# import machine
import pyb
from utils.airpy_config_utils import save_config_file, load_config_file
config = load_config_file("app_config.json")
config['serial_only'] = False
boot_delay = 2000
if config['esc_calibration_mode']: # if esc calibration is true set esc_calibration script to run after this one
pyb.main('./attitude/esc_calibration.py')
config['esc_calibration_mode'] = False
boot_delay = 0 # avoid the boot delay to proceed with esc calibration
else:
pyb.main('main.py') # if esc calibration is false set main script to run after this one
# pyb.main('./aplink/test/test_ap_save_tx_settings.py') # TEST
pyb.LED(3).on() # indicate we are waiting for switch press
pyb.delay(boot_delay) # wait for user to maybe press the switch
switch_value = pyb.Switch()() # sample the switch at end of delay
pyb.LED(3).off() # indicate that we finished waiting for the switch
pyb.LED(4).on() # indicate that we are selecting the mode
if switch_value:
pyb.usb_mode('CDC+MSC')
else:
pyb.usb_mode('CDC+HID')
config['serial_only'] = True
save_config_file("app_config.json", config)
pyb.LED(4).off() # indicate that we finished selecting the mode
|
16,293 | b101ad7eff2cf7007509d640b7828cfbd3e40748 | from .curry import curry
from .reduce import reduce
@curry
def apply(fn, args):
return fn(*args)
|
16,294 | 7f8c042600a0899f6d16754726aad39cbe3998a9 | # -*- coding: utf-8 -*-
import datetime, pytz
import dateutil, dateutil.tz, dateutil.parser
import logging
class Date:
def isAware(self, date):
"""
Tiene zona definida?
"""
return (date.tzinfo != None) and (date.tzinfo.utcoffset(date) != None)
def isNaive(self, date):
"""
No tiene zona definida?
"""
return not self.isAware(date)
""" transforma un datetime naive a uno aware con la zona pasada """
def localize(self, timezone, naive):
tz = pytz.timezone(timezone)
local = tz.localize(naive)
return local
""" retorna la zona del servidor """
def getLocalTimezone(self):
return dateutil.tz.tzlocal()
""" cambia la zona de un aware a la zona local del servidor """
def localizeAwareToLocal(self,aware):
tz = dateutil.tz.tzlocal()
return aware.astimezone(tz)
""" supongo la fecha en utc y retorno un datetime con la zona de utc """
def localizeUtc(self,naive):
return naive.replace(tzinfo=pytz.utc)
""" supnog que la fecha esta en la timezone local. """
def localizeLocal(self,naive):
tz = dateutil.tz.tzlocal()
local = naive.replace(tzinfo=tz)
return local
""" retorna el datetime transformado a utc """
def awareToUtc(self, date):
return date.astimezone(pytz.utc)
""" retorna la fecha/hora en zona local """
def now(self):
date = datetime.datetime.now()
return self.localizeLocal(date)
""" retorna la fecha/hora corregida y con zona utc """
def utcNow(self):
return datetime.datetime.now(pytz.utc)
def isUTC(self,date):
#logging.debug(date.tzinfo)
return date.tzinfo != None and date.tzinfo.utcoffset(date) == datetime.timedelta(0)
"""
parsea una fecha y hora y la retorna el la zona local del servidor.
si viene sin timezone entonces supone que esta en la zona del server.
"""
def parse(self, datestr):
dt = dateutil.parser.parse(datestr)
if self.isNaive(dt):
dt = self.localizeLocal(dt)
return dt
|
16,295 | 7da967cefed4c62902107c45ba31cd0c5d9f425d | import wpilib
from wpilib import RobotDrive
driveTrain = None
def init(driveMotors):
global driveTrain
driveTrain = wpilib.RobotDrive(**driveMotors)
driveTrain.setExpiration(0.2)
driveTrain.setSafetyEnabled(False) |
16,296 | 3ace4cedd67ecddd7ff66241d2fe47650c8a59ee | '''
Curva - A module for Elliptic-Curve Integrated Encryption Scheme (ECIES) based on AES and HKDF.
This module also provides interfaces for Elliptic-Curve Diffie-Hellman (ECDH) and Elliptic-Curve Digital Signature Algorithm (ECDSA).
The only interface exposed to the user is Curva, which integrates all functions mentioned above.
Usage:
>>> c1 = Curva()
>>> c2 = Curva()
>>> c1.load(c2.export()) # load / export as bytes
>>> c2.loadHex(c1.exportHex()) # load / export as hex string
>>> c1.secret() == c2.secret() # calculate shared secret
True
>>> signature = c1.sign(b'data') # digital signature
>>> c2.verify(b'data', signature) # verify
True
>>> c2.decrypt(c1.encrypt(b'data')) # encrypt / decrypt
b'data'
'''
__all__ = ['Curva', 'CurvaError']
class CurvaError(Exception):
'General base class for curva errors.'
from .key import ECKeys
from .aes import *
class Curva(ECKeys):
'Public interfaces of curva.'
def encrypt(self, data: bytes) -> bytes:
key, secret = self._ephemeral()
data = encrypt(secret, data)
return key+data
def decrypt(self, data: bytes) -> bytes:
secret, data = self._ephemeral(data)
data = decrypt(secret, data)
return data
|
16,297 | fb8a0df34a52355a421f6ce2f74bae52d7ef25a1 | # 58A - Chat room
# http://codeforces.com/problemset/problem/58/A
import re
s = input()
if re.match(r'.*h.*e.*l.*l.*o.*', s):
print('YES')
else:
print('NO')
|
16,298 | c48f402ca02ab7d1b37acce916fcc5025d97dfa7 | # -*- coding: utf-8 -*-
#
# Telefónica Digital - Product Development and Innovation
#
# THIS CODE AND INFORMATION ARE PROVIDED 'AS IS' WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
#
# Copyright (c) Telefónica Investigación y Desarrollo S.A.U.
# All rights reserved.
#
"""Reused test utilities like mocks"""
import StringIO
import sys
import mock
def mock_response(status_code=200, json=None, raw=None, text=None):
response = mock.MagicMock()
response.status_code = status_code
if json is None:
response.json.side_effect = ValueError("Not JSON")
elif isinstance(json, mock.Mock):
response.json = json
else:
response.json.return_value = json
if raw is not None:
response.raw = raw
if text is not None:
response.text = text
return response
class collect_outputs(object):
"""Environment for collecting standard and error outputs."""
def __init__(self):
self.stdout = StringIO.StringIO()
self.stderr = StringIO.StringIO()
def __enter__(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
return self
def __exit__(self, type, value, traceback):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
class mocked_standard_descriptors(collect_outputs):
"""Environment for mocking all standard file descriptors"""
def __init__(self, canned_input):
super(mocked_standard_descriptors, self).__init__()
self.stdin = StringIO.StringIO(canned_input)
def __enter__(self):
sys.stdin = self.stdin
return super(mocked_standard_descriptors, self).__enter__()
def __exit__(self, *args):
sys.stdin = sys.__stdin__
return super(mocked_standard_descriptors, self).__exit__(*args)
|
16,299 | de68725fe5d3042304465f421c25b7c58fc13a34 | from django import forms
from captcha.fields import CaptchaField
class CaptchaForm(forms.Form):
captcha = CaptchaField()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.